diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index 8e2c2c405db22d411b7f86f8c0af634b09a3f36a..3becc9a82bdf6fbb28012e84763811dd6c091450 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -22,6 +22,7 @@ Date: Oct 25, 2019 KernelVersion: 5.6.0 Contact: dmaengine@vger.kernel.org Description: The largest number of work descriptors in a batch. + It's not visible when the device does not support batch. What: /sys/bus/dsa/devices/dsa/max_work_queues_size Date: Oct 25, 2019 @@ -49,6 +50,8 @@ Description: The total number of read buffers supported by this device. The read buffers represent resources within the DSA implementation, and these resources are allocated by engines to support operations. See DSA spec v1.2 9.2.4 Total Read Buffers. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/dsa/max_transfer_size Date: Oct 25, 2019 @@ -122,6 +125,8 @@ Contact: dmaengine@vger.kernel.org Description: The maximum number of read buffers that may be in use at one time by operations that access low bandwidth memory in the device. See DSA spec v1.2 9.2.8 GENCFG on Global Read Buffer Limit. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/dsa/cmd_status Date: Aug 28, 2020 @@ -205,6 +210,7 @@ KernelVersion: 5.10.0 Contact: dmaengine@vger.kernel.org Description: The max batch size for this workqueue. Cannot exceed device max batch size. Configurable parameter. + It's not visible when the device does not support batch. What: /sys/bus/dsa/devices/wq./ats_disable Date: Nov 13, 2020 @@ -250,6 +256,8 @@ KernelVersion: 5.17.0 Contact: dmaengine@vger.kernel.org Description: Enable the use of global read buffer limit for the group. See DSA spec v1.2 9.2.18 GRPCFG Use Global Read Buffer Limit. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./read_buffers_allowed Date: Dec 10, 2021 @@ -258,6 +266,8 @@ Contact: dmaengine@vger.kernel.org Description: Indicates max number of read buffers that may be in use at one time by all engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers Allowed. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./read_buffers_reserved Date: Dec 10, 2021 @@ -266,6 +276,8 @@ Contact: dmaengine@vger.kernel.org Description: Indicates the number of Read Buffers reserved for the use of engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers Reserved. + It's not visible when the device does not support Read Buffer + allocation control. What: /sys/bus/dsa/devices/group./desc_progress_limit Date: Sept 14, 2022 diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor index d76cd3946434d00c805db4fb15c253f32a843128..e9ef69aef20b173d46f3ae743fb8da771a16d67a 100644 --- a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor +++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor @@ -5,6 +5,9 @@ Contact: linux-mtd@lists.infradead.org Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the flash device. + The attribute is not present if the flash doesn't support + the "Read JEDEC ID" command (9Fh). This is the case for + non-JEDEC compliant flashes. What: /sys/bus/spi/devices/.../spi-nor/manufacturer Date: April 2021 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 42af9ca0127e522beaa8113a02d2568cd1c56a6a..6b838869554b144c3a5e23e74f3d13f715ee90df 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2300,7 +2300,13 @@ Provide an override to the IOAPIC-ID<->DEVICE-ID mapping provided in the IVRS ACPI table. By default, PCI segment is 0, and can be omitted. - For example: + + For example, to map IOAPIC-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_ioapic=10@0001:00:14.0 + + Deprecated formats: * To map IOAPIC-ID decimal 10 to PCI device 00:14.0 write the parameter as: ivrs_ioapic[10]=00:14.0 @@ -2312,7 +2318,13 @@ Provide an override to the HPET-ID<->DEVICE-ID mapping provided in the IVRS ACPI table. By default, PCI segment is 0, and can be omitted. - For example: + + For example, to map HPET-ID decimal 10 to + PCI segment 0x1 and PCI device 00:14.0, + write the parameter as: + ivrs_hpet=10@0001:00:14.0 + + Deprecated formats: * To map HPET-ID decimal 0 to PCI device 00:14.0 write the parameter as: ivrs_hpet[0]=00:14.0 @@ -2323,15 +2335,20 @@ ivrs_acpihid [HW,X86-64] Provide an override to the ACPI-HID:UID<->DEVICE-ID mapping provided in the IVRS ACPI table. + By default, PCI segment is 0, and can be omitted. For example, to map UART-HID:UID AMD0020:0 to PCI segment 0x1 and PCI device ID 00:14.5, write the parameter as: - ivrs_acpihid[0001:00:14.5]=AMD0020:0 + ivrs_acpihid=AMD0020:0@0001:00:14.5 - By default, PCI segment is 0, and can be omitted. - For example, PCI device 00:14.5 write the parameter as: + Deprecated formats: + * To map UART-HID:UID AMD0020:0 to PCI segment is 0, + PCI device ID 00:14.5, write the parameter as: ivrs_acpihid[00:14.5]=AMD0020:0 + * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and + PCI device ID 00:14.5, write the parameter as: + ivrs_acpihid[0001:00:14.5]=AMD0020:0 js= [HW,JOY] Analog joystick See Documentation/input/joydev/joystick.rst. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 98d1b198b2b4c1040172761c88c7ee956c1edb12..c2c64c1b706ff6be4859cf1be24bbb6cc0364a22 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1314,6 +1314,29 @@ watchdog work to be queued by the watchdog timer function, otherwise the NMI watchdog — if enabled — can detect a hard lockup condition. +split_lock_mitigate (x86 only) +============================== + +On x86, each "split lock" imposes a system-wide performance penalty. On larger +systems, large numbers of split locks from unprivileged users can result in +denials of service to well-behaved and potentially more important users. + +The kernel mitigates these bad users by detecting split locks and imposing +penalties: forcing them to wait and only allowing one core to execute split +locks at a time. + +These mitigations can make those bad applications unbearably slow. Setting +split_lock_mitigate=0 may restore some application performance, but will also +increase system exposure to denial of service attacks from split lock users. + += =================================================================== +0 Disable the mitigation mode - just warns the split lock on kernel log + and exposes the system to denials of service from the split lockers. +1 Enable the mitigation mode (this is the default) - penalizes the split + lockers with intentional performance degradation. += =================================================================== + + stack_erasing ============= diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml index 02e605fac408d2637c9d9064f2ba6c8c633d727b..9ddba7f2e7aa619758c1eb0483cc284ebfd4e096 100644 --- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml +++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml @@ -473,9 +473,6 @@ patternProperties: Specifies whether the event is to be interpreted as a key (1) or a switch (5). - required: - - linux,code - additionalProperties: false dependencies: @@ -501,7 +498,7 @@ patternProperties: azoteq,slider-size: $ref: /schemas/types.yaml#/definitions/uint32 - minimum: 0 + minimum: 1 maximum: 65535 description: Specifies the slider's one-dimensional resolution, equal to the @@ -575,9 +572,9 @@ patternProperties: linux,code: true azoteq,gesture-max-ms: - multipleOf: 4 + multipleOf: 16 minimum: 0 - maximum: 1020 + maximum: 4080 description: Specifies the length of time (in ms) within which a tap, swipe or flick gesture must be completed in order to be acknowledged @@ -585,9 +582,9 @@ patternProperties: gesture applies to all remaining swipe or flick gestures. azoteq,gesture-min-ms: - multipleOf: 4 + multipleOf: 16 minimum: 0 - maximum: 124 + maximum: 496 description: Specifies the length of time (in ms) for which a tap gesture must be held in order to be acknowledged by the device. @@ -620,9 +617,6 @@ patternProperties: GPIO, they must all be of the same type (proximity, touch or slider gesture). - required: - - linux,code - additionalProperties: false required: @@ -693,6 +687,7 @@ allOf: properties: azoteq,slider-size: multipleOf: 16 + minimum: 16 maximum: 4080 azoteq,top-speed: @@ -935,14 +930,14 @@ examples: event-tap { linux,code = ; - azoteq,gesture-max-ms = <600>; - azoteq,gesture-min-ms = <24>; + azoteq,gesture-max-ms = <400>; + azoteq,gesture-min-ms = <32>; }; event-flick-pos { linux,code = ; - azoteq,gesture-max-ms = <600>; - azoteq,gesture-dist = <816>; + azoteq,gesture-max-ms = <800>; + azoteq,gesture-dist = <800>; }; event-flick-neg { diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml index 6a3e3ede1ede7508943f34cd9a209cd048125076..777f2da52f1edcb9b5d0ade70ede0995b9a43cb8 100644 --- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml @@ -98,6 +98,10 @@ properties: type: object $ref: /schemas/regulator/qcom,spmi-regulator.yaml# + pwm: + type: object + $ref: /schemas/leds/leds-qcom-lpg.yaml# + patternProperties: "^adc@[0-9a-f]+$": type: object @@ -123,10 +127,6 @@ patternProperties: type: object $ref: /schemas/power/reset/qcom,pon.yaml# - "pwm@[0-9a-f]+$": - type: object - $ref: /schemas/leds/leds-qcom-lpg.yaml# - "^rtc@[0-9a-f]+$": type: object $ref: /schemas/rtc/qcom-pm8xxx-rtc.yaml# diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml index 376e739bcad40a89083067f18d7837a5b64af43e..49b4f7a32e71e4eba5267f8559914f4e36f22790 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml @@ -14,9 +14,6 @@ description: |+ This PCIe host controller is based on the Synopsys DesignWare PCIe IP and thus inherits all the common properties defined in snps,dw-pcie.yaml. -allOf: - - $ref: /schemas/pci/snps,dw-pcie.yaml# - properties: compatible: enum: @@ -61,7 +58,7 @@ properties: - const: pcie - const: pcie_bus - const: pcie_phy - - const: pcie_inbound_axi for imx6sx-pcie, pcie_aux for imx8mq-pcie + - enum: [ pcie_inbound_axi, pcie_aux ] num-lanes: const: 1 @@ -175,6 +172,47 @@ required: - clocks - clock-names +allOf: + - $ref: /schemas/pci/snps,dw-pcie.yaml# + - if: + properties: + compatible: + contains: + const: fsl,imx6sx-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - {} + - const: pcie_inbound_axi + - if: + properties: + compatible: + contains: + const: fsl,imx8mq-pcie + then: + properties: + clock-names: + items: + - {} + - {} + - {} + - const: pcie_aux + - if: + properties: + compatible: + not: + contains: + enum: + - fsl,imx6sx-pcie + - fsl,imx8mq-pcie + then: + properties: + clock-names: + maxItems: 3 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml index 48ed227fc5b9e888781a9df39d52b90b1cf5f6b7..53da2edd7c9abef7d13c588af2fbb63a17d509eb 100644 --- a/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml +++ b/Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml @@ -36,7 +36,7 @@ properties: - const: mpu interrupts: - maxItems: 1 + maxItems: 2 clocks: items: @@ -94,8 +94,9 @@ examples: #interrupt-cells = <1>; ranges = <0x81000000 0 0x40000000 0 0x40000000 0 0x00010000>, <0x82000000 0 0x50000000 0 0x50000000 0 0x20000000>; - interrupts = ; - interrupt-names = "intr"; + interrupts = , + ; + interrupt-names = "msi", "intr"; interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &gic GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml index 89b8f3dd67a19bfc5b9d8f2c047a4565c3dfb590..3342847dcb19a6c4b949a3a0813cb143fc7ea0f9 100644 --- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml @@ -87,6 +87,8 @@ patternProperties: "wifi_led" "led" 1, 2 "i2c" "i2c" 3, 4 "uart1_0" "uart" 7, 8, 9, 10 + "uart1_rx_tx" "uart" 42, 43 + "uart1_cts_rts" "uart" 44, 45 "pcie_clk" "pcie" 9 "pcie_wake" "pcie" 10 "spi1_0" "spi" 11, 12, 13, 14 @@ -98,9 +100,11 @@ patternProperties: "emmc_45" "emmc" 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 "spi1_1" "spi" 23, 24, 25, 26 - "uart1_2" "uart" 29, 30, 31, 32 + "uart1_2_rx_tx" "uart" 29, 30 + "uart1_2_cts_rts" "uart" 31, 32 "uart1_1" "uart" 23, 24, 25, 26 - "uart2_0" "uart" 29, 30, 31, 32 + "uart2_0_rx_tx" "uart" 29, 30 + "uart2_0_cts_rts" "uart" 31, 32 "spi0" "spi" 33, 34, 35, 36 "spi0_wp_hold" "spi" 37, 38 "uart1_3_rx_tx" "uart" 35, 36 @@ -157,7 +161,7 @@ patternProperties: then: properties: groups: - enum: [emmc, emmc_rst] + enum: [emmc_45, emmc_51] - if: properties: function: @@ -221,8 +225,12 @@ patternProperties: then: properties: groups: - enum: [uart1_0, uart1_1, uart1_2, uart1_3_rx_tx, - uart1_3_cts_rts, uart2_0, uart2_1, uart0, uart1, uart2] + items: + enum: [uart1_0, uart1_rx_tx, uart1_cts_rts, uart1_1, + uart1_2_rx_tx, uart1_2_cts_rts, uart1_3_rx_tx, + uart1_3_cts_rts, uart2_0_rx_tx, uart2_0_cts_rts, + uart2_1, uart0, uart1, uart2] + maxItems: 2 - if: properties: function: @@ -356,6 +364,27 @@ examples: interrupt-parent = <&gic>; #interrupt-cells = <2>; + pcie_pins: pcie-pins { + mux { + function = "pcie"; + groups = "pcie_clk", "pcie_wake", "pcie_pereset"; + }; + }; + + pwm_pins: pwm-pins { + mux { + function = "pwm"; + groups = "pwm0", "pwm1_0"; + }; + }; + + spi0_pins: spi0-pins { + mux { + function = "spi"; + groups = "spi0", "spi0_wp_hold"; + }; + }; + uart1_pins: uart1-pins { mux { function = "uart"; @@ -363,6 +392,13 @@ examples: }; }; + uart1_3_pins: uart1-3-pins { + mux { + function = "uart"; + groups = "uart1_3_rx_tx", "uart1_3_cts_rts"; + }; + }; + uart2_pins: uart2-pins { mux { function = "uart"; diff --git a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml index a7fae1772a81b3382ecbe400a494b90ec40fdb62..cd8e9a8907f84a6b278d0e5e6335a07510858dae 100644 --- a/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml +++ b/Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml @@ -30,7 +30,9 @@ properties: maxItems: 1 "#pwm-cells": - const: 2 + enum: [2, 3] + description: + The only flag supported by the controller is PWM_POLARITY_INVERTED. microchip,sync-update-mask: description: | diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt index 5d6ea66a863fe2e46c2f8ca292d941dcded61a65..1f75feec3dec683bb40d4de930cb8fef94e644e3 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt +++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt @@ -109,7 +109,7 @@ audio-codec@1{ reg = <1 0>; interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "intr2" - reset-gpios = <&msmgpio 64 0>; + reset-gpios = <&msmgpio 64 GPIO_ACTIVE_LOW>; slim-ifc-dev = <&wc9335_ifd>; clock-names = "mclk", "native"; clocks = <&rpmcc RPM_SMD_DIV_CLK1>, diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt index c5f2b8febceecb73e886b4f91d417917135e9f6c..6b87db68337c2fea81c68d9e4793ee8bcc68bdec 100644 --- a/Documentation/devicetree/bindings/sound/rt5682.txt +++ b/Documentation/devicetree/bindings/sound/rt5682.txt @@ -46,7 +46,7 @@ Optional properties: - realtek,dmic-clk-driving-high : Set the high driving of the DMIC clock out. -- #sound-dai-cells: Should be set to '<0>'. +- #sound-dai-cells: Should be set to '<1>'. Pins on the device (for linking into audio routes) for RT5682: diff --git a/Documentation/driver-api/spi.rst b/Documentation/driver-api/spi.rst index f64cb666498aabc7682714c235c45d9ac28093ad..f28887045049d4d3e6aba102715ece7befb58413 100644 --- a/Documentation/driver-api/spi.rst +++ b/Documentation/driver-api/spi.rst @@ -25,8 +25,8 @@ hardware, which may be as simple as a set of GPIO pins or as complex as a pair of FIFOs connected to dual DMA engines on the other side of the SPI shift register (maximizing throughput). Such drivers bridge between whatever bus they sit on (often the platform bus) and SPI, and expose -the SPI side of their device as a :c:type:`struct spi_master -`. SPI devices are children of that master, +the SPI side of their device as a :c:type:`struct spi_controller +`. SPI devices are children of that master, represented as a :c:type:`struct spi_device ` and manufactured from :c:type:`struct spi_board_info ` descriptors which are usually provided by diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 17779a2772e51ea51d14278dd07ed532a7796199..5f6454b9dbd4d9235e5ae19aec226bcfeedd18ed 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -83,9 +83,7 @@ configuration of fault-injection capabilities. - /sys/kernel/debug/fail*/times: specifies how many times failures may happen at most. A value of -1 - means "no limit". Note, though, that this file only accepts unsigned - values. So, if you want to specify -1, you better use 'printf' instead - of 'echo', e.g.: $ printf %#x -1 > times + means "no limit". - /sys/kernel/debug/fail*/space: @@ -284,7 +282,7 @@ Application Examples echo Y > /sys/kernel/debug/$FAILTYPE/task-filter echo 10 > /sys/kernel/debug/$FAILTYPE/probability echo 100 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 2 > /sys/kernel/debug/$FAILTYPE/verbose echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait @@ -338,7 +336,7 @@ Application Examples echo N > /sys/kernel/debug/$FAILTYPE/task-filter echo 10 > /sys/kernel/debug/$FAILTYPE/probability echo 100 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 2 > /sys/kernel/debug/$FAILTYPE/verbose echo Y > /sys/kernel/debug/$FAILTYPE/ignore-gfp-wait @@ -369,7 +367,7 @@ Application Examples echo N > /sys/kernel/debug/$FAILTYPE/task-filter echo 100 > /sys/kernel/debug/$FAILTYPE/probability echo 0 > /sys/kernel/debug/$FAILTYPE/interval - printf %#x -1 > /sys/kernel/debug/$FAILTYPE/times + echo -1 > /sys/kernel/debug/$FAILTYPE/times echo 0 > /sys/kernel/debug/$FAILTYPE/space echo 1 > /sys/kernel/debug/$FAILTYPE/verbose diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst index eb358a00be2795359e07606e50c3f73864422341..1d16787a00e95af45acbeacba8648ab79d814a1a 100644 --- a/Documentation/filesystems/mount_api.rst +++ b/Documentation/filesystems/mount_api.rst @@ -814,6 +814,7 @@ process the parameters it is given. int fs_lookup_param(struct fs_context *fc, struct fs_parameter *value, bool want_bdev, + unsigned int flags, struct path *_path); This takes a parameter that carries a string or filename type and attempts diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst index 0bfb4c33974890db4bfa4bcd91d5c52730104d54..9bc9db8ec6517c10c0cad15aa05777d43ec6f659 100644 --- a/Documentation/security/keys/trusted-encrypted.rst +++ b/Documentation/security/keys/trusted-encrypted.rst @@ -350,7 +350,8 @@ Load an encrypted key "evm" from saved blob:: Instantiate an encrypted key "evm" using user-provided decrypted data:: - $ keyctl add encrypted evm "new default user:kmk 32 `cat evm_decrypted_data.blob`" @u + $ evmkey=$(dd if=/dev/urandom bs=1 count=32 | xxd -c32 -p) + $ keyctl add encrypted evm "new default user:kmk 32 $evmkey" @u 794890253 $ keyctl print 794890253 diff --git a/Documentation/trace/kprobes.rst b/Documentation/trace/kprobes.rst index 48cf778a246802fc6ecd6365110931846fbf374c..fc7ce76eab655c0163c3dfe46f787663db7aa320 100644 --- a/Documentation/trace/kprobes.rst +++ b/Documentation/trace/kprobes.rst @@ -131,8 +131,7 @@ For example, if the function is non-recursive and is called with a spinlock held, maxactive = 1 should be enough. If the function is non-recursive and can never relinquish the CPU (e.g., via a semaphore or preemption), NR_CPUS should be enough. If maxactive <= 0, it is -set to a default value. If CONFIG_PREEMPT is enabled, the default -is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS. +set to a default value: max(10, 2*NR_CPUS). It's not a disaster if you set maxactive too low; you'll just miss some probes. In the kretprobe struct, the nmissed field is set to diff --git a/Makefile b/Makefile index 997b67722292074c1d491c73a8b2207a12c47b02..ddbd2fc917c598fb0e61ee45c0aab53ae3d93b5a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 0 +SUBLEVEL = 5 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth diff --git a/arch/Kconfig b/arch/Kconfig index 8f138e580d1ae1f141dd0b4d3060d86e16f0287c..81599f5c17b0f70cc7c5ebf567d1ec7b2f237df9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -635,7 +635,7 @@ config ARCH_SUPPORTS_SHADOW_CALL_STACK config SHADOW_CALL_STACK bool "Shadow Call Stack" depends on ARCH_SUPPORTS_SHADOW_CALL_STACK - depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER + depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER help This option enables the compiler's Shadow Call Stack, which uses a shadow stack to protect function return addresses from diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index fdc485d7787a65adbc56d304d57c1fb17848b936..084c27cb0c707c9a0ffc52a46c923ee8746b6cbd 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -75,7 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); /* Work to do on interrupt/exception return. */ #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL) /* Work to do on any return to userspace. */ #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index e227f3a29a43c1722552eada5435e838387f8086..c41a5a9c3b9f2351ad8c95b38820e9bd9e069d9d 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -469,8 +469,10 @@ entSys: #ifdef CONFIG_AUDITSYSCALL lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and $3, $6, $3 -#endif bne $3, strace +#else + blbs $3, strace /* check for SYSCALL_TRACE in disguise */ +#endif beq $4, 1f ldq $27, 0($5) 1: jsr $26, ($27), sys_ni_syscall diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 9dc928859ad3317f519f19f51b61b8e4887a2313..2013a5ccecd317e8ec55c9505417d0ab5c9de575 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -84,7 +84,7 @@ pcie0_intc: interrupt-controller { pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index 929deaf312a55c6989711d57be72f1a570ea3ce9..c310ef26d1ccec50b01752b9cb83edde1aba77ba 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -592,7 +592,7 @@ pcie0_intc: interrupt-controller { pcie1: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi index ce1dddb2269b02edc3b9dab4817dfdd712fa4c56..e94f22b0e9b5e366543d8c4b329549de255ff42c 100644 --- a/arch/arm/boot/dts/armada-380.dtsi +++ b/arch/arm/boot/dts/armada-380.dtsi @@ -89,7 +89,7 @@ pcie1_intc: interrupt-controller { /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -118,7 +118,7 @@ pcie2_intc: interrupt-controller { /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts index 72ac807cae259cc3ded771120ad72677bcd1de84..0c1f238e4c3061b1965e224ef50d5bddb4547f4c 100644 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts @@ -23,6 +23,12 @@ chosen { stdout-path = &uart0; }; + aliases { + ethernet0 = ð0; + ethernet1 = ð1; + ethernet2 = ð2; + }; + memory { device_type = "memory"; reg = <0x00000000 0x40000000>; /* 1024 MB */ @@ -483,7 +489,17 @@ fixed-link { }; }; - /* port 6 is connected to eth0 */ + ports@6 { + reg = <6>; + label = "cpu"; + ethernet = <ð0>; + phy-mode = "rgmii-id"; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi index 83392b92dae288ac7fd64a2f6c93cce1ae1c4a1d..be8d607c59b21dbadd1436f67012fe668ab5a914 100644 --- a/arch/arm/boot/dts/armada-385.dtsi +++ b/arch/arm/boot/dts/armada-385.dtsi @@ -93,7 +93,7 @@ pcie1_intc: interrupt-controller { /* x1 port */ pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -121,7 +121,7 @@ pcie2_intc: interrupt-controller { /* x1 port */ pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -152,7 +152,7 @@ pcie3_intc: interrupt-controller { */ pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index 923b035a3ab38e79b6a9b7c357def34ab14ea4f9..9d1cac49c022f07c315efabc57c5f08158c2833a 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -463,7 +463,7 @@ pcie1_intc: interrupt-controller { /* x1 port */ pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x40000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x40000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -492,7 +492,7 @@ pcie2_intc: interrupt-controller { /* x1 port */ pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x44000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -524,7 +524,7 @@ pcie3_intc: interrupt-controller { */ pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x48000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi index bf9360f41e0a601d80179fc3681d080e113a25db..5ea9d509cd3082f0a7aded77a32319d7c3b39f2c 100644 --- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -107,7 +107,7 @@ pcie1_intc: interrupt-controller { pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -135,7 +135,7 @@ pcie2_intc: interrupt-controller { pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -163,7 +163,7 @@ pcie3_intc: interrupt-controller { pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -191,7 +191,7 @@ pcie4_intc: interrupt-controller { pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 0714af52e607536a794864d1ff5b95dedc663ee5..6c6fbb9faf5acd4108e4142e70d74206ee361de6 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -122,7 +122,7 @@ pcie1_intc: interrupt-controller { pcie2: pcie@2,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x44000 0 0x2000>; reg = <0x1000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -150,7 +150,7 @@ pcie2_intc: interrupt-controller { pcie3: pcie@3,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; + assigned-addresses = <0x82001800 0 0x48000 0 0x2000>; reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -178,7 +178,7 @@ pcie3_intc: interrupt-controller { pcie4: pcie@4,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>; + assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>; reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -206,7 +206,7 @@ pcie4_intc: interrupt-controller { pcie5: pcie@5,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; reg = <0x2800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -234,7 +234,7 @@ pcie5_intc: interrupt-controller { pcie6: pcie@6,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x84000 0 0x2000>; + assigned-addresses = <0x82003000 0 0x84000 0 0x2000>; reg = <0x3000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -262,7 +262,7 @@ pcie6_intc: interrupt-controller { pcie7: pcie@7,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x88000 0 0x2000>; + assigned-addresses = <0x82003800 0 0x88000 0 0x2000>; reg = <0x3800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -290,7 +290,7 @@ pcie7_intc: interrupt-controller { pcie8: pcie@8,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>; + assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>; reg = <0x4000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; @@ -318,7 +318,7 @@ pcie8_intc: interrupt-controller { pcie9: pcie@9,0 { device_type = "pci"; - assigned-addresses = <0x82000800 0 0x42000 0 0x2000>; + assigned-addresses = <0x82004800 0 0x42000 0 0x2000>; reg = <0x4800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts index a6a2bc3b855c2e1b45b24b16d19288a93a1beb52..fcc890e3ad735f11b91844ab7ea77318d1814090 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts @@ -162,16 +162,9 @@ reserved-memory { #size-cells = <1>; ranges; - /* LPC FW cycle bridge region requires natural alignment */ - flash_memory: region@b8000000 { - no-map; - reg = <0xb8000000 0x04000000>; /* 64M */ - }; - - /* 48MB region from the end of flash to start of vga memory */ - ramoops@bc000000 { + ramoops@b3e00000 { compatible = "ramoops"; - reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */ + reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */ record-size = <0x8000>; console-size = <0x8000>; ftrace-size = <0x8000>; @@ -179,6 +172,12 @@ ramoops@bc000000 { max-reason = <3>; /* KMSG_DUMP_EMERG */ }; + /* LPC FW cycle bridge region requires natural alignment */ + flash_memory: region@b4000000 { + no-map; + reg = <0xb4000000 0x04000000>; /* 64M */ + }; + /* VGA region is dictated by hardware strapping */ vga_memory: region@bf000000 { no-map; diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts index bf59a9962379d14fa2210a5bacdbde16de204fe2..4879da4cdbd25595f9c0428f663423d13a648ff2 100644 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts @@ -95,14 +95,9 @@ reserved-memory { #size-cells = <1>; ranges; - flash_memory: region@b8000000 { - no-map; - reg = <0xb8000000 0x04000000>; /* 64M */ - }; - - ramoops@bc000000 { + ramoops@b3e00000 { compatible = "ramoops"; - reg = <0xbc000000 0x200000>; /* 16 * (4 * 0x8000) */ + reg = <0xb3e00000 0x200000>; /* 16 * (4 * 0x8000) */ record-size = <0x8000>; console-size = <0x8000>; ftrace-size = <0x8000>; @@ -110,6 +105,13 @@ ramoops@bc000000 { max-reason = <3>; /* KMSG_DUMP_EMERG */ }; + /* LPC FW cycle bridge region requires natural alignment */ + flash_memory: region@b4000000 { + no-map; + reg = <0xb4000000 0x04000000>; /* 64M */ + }; + + /* VGA region is dictated by hardware strapping */ vga_memory: region@bf000000 { no-map; compatible = "shared-dma-pool"; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 00a36fba2fd234e447e227ec01881310ae49cd73..9aee3cfd3e981b1af332425535babf1c472aea8a 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -139,7 +139,7 @@ pcie0_intc: interrupt-controller { pcie1: pcie@2 { device_type = "pci"; status = "disabled"; - assigned-addresses = <0x82002800 0 0x80000 0 0x2000>; + assigned-addresses = <0x82001000 0 0x80000 0 0x2000>; reg = <0x1000 0 0 0 0>; clocks = <&gate_clk 5>; marvell,pcie-port = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts index d10669fcd527d8bcf1e50b9e25cab61829cdcaad..9e9eba8bad5e424a6b4c1cbaadef1379fa7d92fd 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts @@ -366,7 +366,7 @@ flash@0 { spi-max-frequency = <20000000>; spi-rx-bus-width = <2>; label = "bmc"; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts index 491606c4f044da3939289aa608edadcbfd623124..2a394cc15284c55dbb679992ac77c1ae7956414b 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-gsj.dts @@ -142,7 +142,7 @@ flash@0 { reg = <0>; spi-rx-bus-width = <2>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts index a0c2d765262580c34a9674d7ec44865c7f5e886d..f7b38bee039bcab52ee2f550696455c5931f3672 100644 --- a/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts +++ b/arch/arm/boot/dts/nuvoton-npcm730-kudo.dts @@ -388,7 +388,7 @@ flash@0 { spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; label = "bmc"; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -422,7 +422,7 @@ flash@1 { reg = <1>; spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; - partitions@88000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -447,7 +447,7 @@ flash@0 { reg = <0>; spi-max-frequency = <5000000>; spi-rx-bus-width = <2>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts index 3dad32834e5eab5877a20637bccbdffc74681ff2..f53d45fa1de8788bae45f557e2899d528b3b1144 100644 --- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts +++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts @@ -74,7 +74,7 @@ flash@0 { spi-rx-bus-width = <2>; reg = <0>; spi-max-frequency = <5000000>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -135,7 +135,7 @@ flash@0 { spi-rx-bus-width = <2>; reg = <0>; spi-max-frequency = <5000000>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts index 132e702281fc5a6916f6be78bad3525d657f2742..87359ab05db3ebaba1ea552fdce9369bca9afede 100644 --- a/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts +++ b/arch/arm/boot/dts/nuvoton-npcm750-runbmc-olympus.dts @@ -107,7 +107,7 @@ flash@0 { reg = <0>; spi-rx-bus-width = <2>; - partitions@80000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -146,7 +146,7 @@ flash@1 { reg = <1>; npcm,fiu-rx-bus-width = <2>; - partitions@88000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; @@ -173,7 +173,7 @@ flash@0 { reg = <0>; spi-rx-bus-width = <2>; - partitions@A0000000 { + partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 942aa2278355d809d6fa35819e2aeb093f08654a..a39b940d585326ec23f8b842c6cea931a99bd9dd 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -1615,7 +1615,7 @@ wifi { }; etb@1a01000 { - compatible = "coresight-etb10", "arm,primecell"; + compatible = "arm,coresight-etb10", "arm,primecell"; reg = <0x1a01000 0x1000>; clocks = <&rpmcc RPM_QDSS_CLK>; diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index fd41243a0b2c0373d3dc2f0e235edcb9286e4e85..9d5a04a46b14eb060e35289c770e18814ac5b52b 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -47,7 +47,7 @@ clcd: clcd@fc200000 { compatible = "arm,pl110", "arm,primecell"; reg = <0xfc200000 0x1000>; interrupt-parent = <&vic1>; - interrupts = <12>; + interrupts = <13>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts index 2e3c9fbb4eb361d901ae8c30d571afb797bf170c..275167f26fd9d3c4550dbe0249ab2382956ac705 100644 --- a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts +++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts @@ -13,7 +13,6 @@ /dts-v1/; #include "stm32mp157.dtsi" -#include "stm32mp15xc.dtsi" #include "stm32mp15xx-dhcor-som.dtsi" #include "stm32mp15xx-dhcor-avenger96.dtsi" diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi index 90933077d66dec808ada9dda5565213e7dfc0e3d..b6957cbdeff5f1ce6a56f15546cd5fc851560e3d 100644 --- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi +++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi @@ -100,7 +100,7 @@ wlan_pwr: regulator-wlan { regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>; + gpio = <&gpioz 3 GPIO_ACTIVE_HIGH>; enable-active-high; }; }; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index aecc403b2880493d6925eed075bf405b77ee03cb..7f092cb55a4171548da8e4df3c72708512df9264 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -128,15 +128,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *, #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_UPROBE 3 /* breakpointed or singlestepping */ -#define TIF_SYSCALL_TRACE 4 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ -#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -#define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */ +#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 20 +#define TIF_RESTORE_SIGMASK 19 +#define TIF_SYSCALL_TRACE 20 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 21 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 22 /* syscall tracepoint instrumentation */ +#define TIF_SECCOMP 23 /* seccomp syscall filtering active */ + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 41b2e8abc9e691c2f58ca49a5803ecc1bf96d626..708816caf859cd442d073c8a7ca4739890e71b3b 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -43,18 +43,21 @@ static void __iomem *mmp_timer_base = TIMERS_VIRT_BASE; /* - * FIXME: the timer needs some delay to stablize the counter capture + * Read the timer through the CVWR register. Delay is required after requesting + * a read. The CR register cannot be directly read due to metastability issues + * documented in the PXA168 software manual. */ static inline uint32_t timer_read(void) { - int delay = 100; + uint32_t val; + int delay = 3; __raw_writel(1, mmp_timer_base + TMR_CVWR(1)); while (delay--) - cpu_relax(); + val = __raw_readl(mmp_timer_base + TMR_CVWR(1)); - return __raw_readl(mmp_timer_base + TMR_CVWR(1)); + return val; } static u64 notrace mmp_read_sched_clock(void) diff --git a/arch/arm/nwfpe/Makefile b/arch/arm/nwfpe/Makefile index 303400fa2cdf79d27c75e61595252ce403e44350..2aec85ab1e8b9be227e8c4513c1710b15030b6b0 100644 --- a/arch/arm/nwfpe/Makefile +++ b/arch/arm/nwfpe/Makefile @@ -11,3 +11,9 @@ nwfpe-y += fpa11.o fpa11_cpdo.o fpa11_cpdt.o \ entry.o nwfpe-$(CONFIG_FPE_NWFPE_XP) += extended_cpdo.o + +# Try really hard to avoid generating calls to __aeabi_uldivmod() from +# float64_rem() due to loop elision. +ifdef CONFIG_CC_IS_CLANG +CFLAGS_softfloat.o += -mllvm -replexitval=never +endif diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi index 51a63b29d4045ee3a34c34567dba1916c73026bb..a4d195e9eb8c805609f2d4d98654e167f830dd09 100644 --- a/arch/arm64/boot/dts/apple/t8103.dtsi +++ b/arch/arm64/boot/dts/apple/t8103.dtsi @@ -412,7 +412,7 @@ nvme@27bcc0000 { resets = <&ps_ans2>; }; - pcie0_dart_0: dart@681008000 { + pcie0_dart_0: iommu@681008000 { compatible = "apple,t8103-dart"; reg = <0x6 0x81008000 0x0 0x4000>; #iommu-cells = <1>; @@ -421,7 +421,7 @@ pcie0_dart_0: dart@681008000 { power-domains = <&ps_apcie_gp>; }; - pcie0_dart_1: dart@682008000 { + pcie0_dart_1: iommu@682008000 { compatible = "apple,t8103-dart"; reg = <0x6 0x82008000 0x0 0x4000>; #iommu-cells = <1>; @@ -430,7 +430,7 @@ pcie0_dart_1: dart@682008000 { power-domains = <&ps_apcie_gp>; }; - pcie0_dart_2: dart@683008000 { + pcie0_dart_2: iommu@683008000 { compatible = "apple,t8103-dart"; reg = <0x6 0x83008000 0x0 0x4000>; #iommu-cells = <1>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index ada164d423f3d20855198cff9a33c6da4c885ad9..200f97e1c4c9cee193ef3cd1ef23b9edc1a36b0f 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -125,9 +125,12 @@ &i2c0 { /delete-property/ mrvl,i2c-fast-mode; status = "okay"; + /* MCP7940MT-I/MNY RTC */ rtc@6f { compatible = "microchip,mcp7940x"; reg = <0x6f>; + interrupt-parent = <&gpiosb>; + interrupts = <5 0>; /* GPIO2_5 */ }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts index 9b1af9c8013085871a7f1530b7092f0aab287b7f..d31a194124c91a0b140da54a596be20ae3bed609 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts @@ -26,14 +26,14 @@ chosen { stdout-path = "serial0:921600n8"; }; - cpus_fixed_vproc0: fixedregulator@0 { + cpus_fixed_vproc0: regulator-vproc-buck0 { compatible = "regulator-fixed"; regulator-name = "vproc_buck0"; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <1000000>; }; - cpus_fixed_vproc1: fixedregulator@1 { + cpus_fixed_vproc1: regulator-vproc-buck1 { compatible = "regulator-fixed"; regulator-name = "vproc_buck1"; regulator-min-microvolt = <1000000>; @@ -50,7 +50,7 @@ extcon_usb1: extcon_iddig1 { id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; }; - usb_p0_vbus: regulator@2 { + usb_p0_vbus: regulator-usb-p0-vbus { compatible = "regulator-fixed"; regulator-name = "p0_vbus"; regulator-min-microvolt = <5000000>; @@ -59,7 +59,7 @@ usb_p0_vbus: regulator@2 { enable-active-high; }; - usb_p1_vbus: regulator@3 { + usb_p1_vbus: regulator-usb-p1-vbus { compatible = "regulator-fixed"; regulator-name = "p1_vbus"; regulator-min-microvolt = <5000000>; @@ -68,7 +68,7 @@ usb_p1_vbus: regulator@3 { enable-active-high; }; - usb_p2_vbus: regulator@4 { + usb_p2_vbus: regulator-usb-p2-vbus { compatible = "regulator-fixed"; regulator-name = "p2_vbus"; regulator-min-microvolt = <5000000>; @@ -77,7 +77,7 @@ usb_p2_vbus: regulator@4 { enable-active-high; }; - usb_p3_vbus: regulator@5 { + usb_p3_vbus: regulator-usb-p3-vbus { compatible = "regulator-fixed"; regulator-name = "p3_vbus"; regulator-min-microvolt = <5000000>; diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index e6d7453e56e0e94374ade4b89d1a6d9460c9f295..1ac0b2cf3d406d1b20247b501a07afd99cc8e258 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -160,70 +160,70 @@ sys_clk: dummyclk { #clock-cells = <0>; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; clock-output-names = "clk32k"; }; - clkfpc: oscillator@2 { + clkfpc: oscillator-50m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <50000000>; clock-output-names = "clkfpc"; }; - clkaud_ext_i_0: oscillator@3 { + clkaud_ext_i_0: oscillator-aud0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <6500000>; clock-output-names = "clkaud_ext_i_0"; }; - clkaud_ext_i_1: oscillator@4 { + clkaud_ext_i_1: oscillator-aud1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <196608000>; clock-output-names = "clkaud_ext_i_1"; }; - clkaud_ext_i_2: oscillator@5 { + clkaud_ext_i_2: oscillator-aud2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <180633600>; clock-output-names = "clkaud_ext_i_2"; }; - clki2si0_mck_i: oscillator@6 { + clki2si0_mck_i: oscillator-i2s0 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si0_mck_i"; }; - clki2si1_mck_i: oscillator@7 { + clki2si1_mck_i: oscillator-i2s1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si1_mck_i"; }; - clki2si2_mck_i: oscillator@8 { + clki2si2_mck_i: oscillator-i2s2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; clock-output-names = "clki2si2_mck_i"; }; - clktdmin_mclk_i: oscillator@9 { + clktdmin_mclk_i: oscillator-mclk { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <30000000>; @@ -266,7 +266,7 @@ syscfg_pctl_a: syscfg_pctl_a@10005000 { reg = <0 0x10005000 0 0x1000>; }; - pio: pinctrl@10005000 { + pio: pinctrl@1000b000 { compatible = "mediatek,mt2712-pinctrl"; reg = <0 0x1000b000 0 0x1000>; mediatek,pctl-regmap = <&syscfg_pctl_a>; diff --git a/arch/arm64/boot/dts/mediatek/mt6779.dtsi b/arch/arm64/boot/dts/mediatek/mt6779.dtsi index 9bdf5145966c5f3c80a2008d28203bd2c186a9c9..dde9ce137b4f192e653be3fd33a5eedc63b968a5 100644 --- a/arch/arm64/boot/dts/mediatek/mt6779.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6779.dtsi @@ -88,14 +88,14 @@ pmu { interrupts = ; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; clock-output-names = "clk26m"; }; - clk32k: oscillator@1 { + clk32k: oscillator-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; @@ -117,7 +117,7 @@ soc { compatible = "simple-bus"; ranges; - gic: interrupt-controller@0c000000 { + gic: interrupt-controller@c000000 { compatible = "arm,gic-v3"; #interrupt-cells = <4>; interrupt-parent = <&gic>; @@ -138,7 +138,7 @@ ppi_cluster1: interrupt-partition-1 { }; - sysirq: intpol-controller@0c53a650 { + sysirq: intpol-controller@c53a650 { compatible = "mediatek,mt6779-sysirq", "mediatek,mt6577-sysirq"; interrupt-controller; diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi index 15616231022a242da08963ae45ee4c68ef591f10..c3677d77e0a45a78daaac27ea73ce99a7bdedc01 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi @@ -95,7 +95,7 @@ cpu9: cpu@201 { }; }; - clk26m: oscillator@0 { + clk26m: oscillator-26m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <26000000>; diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi index 72e0d9722e07af74c9ab6cc9ec71b8d0e22abfcf..35e01fa2d314b8b0440bade974d64b6e98090dec 100644 --- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi @@ -14,7 +14,7 @@ / { #address-cells = <2>; #size-cells = <2>; - clk40m: oscillator@0 { + clk40m: oscillator-40m { compatible = "fixed-clock"; clock-frequency = <40000000>; #clock-cells = <0>; @@ -112,6 +112,12 @@ infracfg: infracfg@10001000 { #clock-cells = <1>; }; + wed_pcie: wed-pcie@10003000 { + compatible = "mediatek,mt7986-wed-pcie", + "syscon"; + reg = <0 0x10003000 0 0x10>; + }; + topckgen: topckgen@1001b000 { compatible = "mediatek,mt7986-topckgen", "syscon"; reg = <0 0x1001B000 0 0x1000>; @@ -168,7 +174,7 @@ sgmiisys1: syscon@10070000 { #clock-cells = <1>; }; - trng: trng@1020f000 { + trng: rng@1020f000 { compatible = "mediatek,mt7986-rng", "mediatek,mt7623-rng"; reg = <0 0x1020f000 0 0x100>; @@ -228,12 +234,6 @@ ethsys: syscon@15000000 { #reset-cells = <1>; }; - wed_pcie: wed-pcie@10003000 { - compatible = "mediatek,mt7986-wed-pcie", - "syscon"; - reg = <0 0x10003000 0 0x10>; - }; - wed0: wed@15010000 { compatible = "mediatek,mt7986-wed", "syscon"; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index a70b669c49baa3ccc3314320c0c4d2c8841f5c42..402136bfd5350b044d7fb74c6338c3273c14e0f5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1678,7 +1678,7 @@ gpu: gpu@13040000 { ; interrupt-names = "job", "mmu", "gpu"; - clocks = <&topckgen CLK_TOP_MFGPLL_CK>; + clocks = <&mfgcfg CLK_MFG_BG3D>; power-domains = <&spm MT8183_POWER_DOMAIN_MFG_CORE0>, diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts index 4fbd99eb496a2a19a3c14974af6ec573307ce06d..dec85d2548384ca5ed1ac8e4868d3d20c4f8cc85 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts @@ -56,10 +56,10 @@ reserved-memory { #size-cells = <2>; ranges; - /* 192 KiB reserved for ARM Trusted Firmware (BL31) */ + /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ bl31_secmon_reserved: secmon@54600000 { no-map; - reg = <0 0x54600000 0x0 0x30000>; + reg = <0 0x54600000 0x0 0x200000>; }; /* 12 MiB reserved for OP-TEE (BL32) diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index 905d1a90b406c31d37e24966c4e0cd3cd9152bce..0b85b5874a4f9d0c9dc54c4f570881331a3f6c49 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -36,7 +36,7 @@ cpu0: cpu@0 { enable-method = "psci"; performance-domains = <&performance 0>; clock-frequency = <1701000000>; - capacity-dmips-mhz = <578>; + capacity-dmips-mhz = <308>; cpu-idle-states = <&cpu_off_l &cluster_off_l>; next-level-cache = <&l2_0>; #cooling-cells = <2>; @@ -49,7 +49,7 @@ cpu1: cpu@100 { enable-method = "psci"; performance-domains = <&performance 0>; clock-frequency = <1701000000>; - capacity-dmips-mhz = <578>; + capacity-dmips-mhz = <308>; cpu-idle-states = <&cpu_off_l &cluster_off_l>; next-level-cache = <&l2_0>; #cooling-cells = <2>; @@ -62,7 +62,7 @@ cpu2: cpu@200 { enable-method = "psci"; performance-domains = <&performance 0>; clock-frequency = <1701000000>; - capacity-dmips-mhz = <578>; + capacity-dmips-mhz = <308>; cpu-idle-states = <&cpu_off_l &cluster_off_l>; next-level-cache = <&l2_0>; #cooling-cells = <2>; @@ -75,7 +75,7 @@ cpu3: cpu@300 { enable-method = "psci"; performance-domains = <&performance 0>; clock-frequency = <1701000000>; - capacity-dmips-mhz = <578>; + capacity-dmips-mhz = <308>; cpu-idle-states = <&cpu_off_l &cluster_off_l>; next-level-cache = <&l2_0>; #cooling-cells = <2>; diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi index 8ee1529683a34321191dbb9f917092199d8a459d..ec8dfb3d1c6d6914f03990b230001865f896e528 100644 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi @@ -17,7 +17,7 @@ chosen { }; firmware { - optee: optee@4fd00000 { + optee: optee { compatible = "linaro,optee-tz"; method = "smc"; }; @@ -209,7 +209,7 @@ pins_cmd_dat { }; }; - i2c0_pins_a: i2c0@0 { + i2c0_pins_a: i2c0 { pins1 { pinmux = , ; @@ -217,7 +217,7 @@ pins1 { }; }; - i2c2_pins_a: i2c2@0 { + i2c2_pins_a: i2c2 { pins1 { pinmux = , ; diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index 0170bfa8a467906e862c3d8ff3b8986b41e535a6..dfe2cf2f4b2189f232dc0049f7322127fd8b9945 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -1965,7 +1965,7 @@ pcie@140c0000 { bus-range = <0x0 0xff>; - ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xe8000000>, /* prefetchable memory (11904 MB) */ + ranges = <0x43000000 0x35 0x40000000 0x35 0x40000000 0x2 0xc0000000>, /* prefetchable memory (11264 MB) */ <0x02000000 0x0 0x40000000 0x38 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */ <0x01000000 0x0 0x2c100000 0x00 0x2c100000 0x0 0x00100000>; /* downstream I/O (1 MB) */ @@ -2178,7 +2178,7 @@ pcie@14140000 { bus-range = <0x0 0xff>; ranges = <0x43000000 0x21 0x00000000 0x21 0x00000000 0x0 0x28000000>, /* prefetchable memory (640 MB) */ - <0x02000000 0x0 0x40000000 0x21 0xe8000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */ + <0x02000000 0x0 0x40000000 0x21 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */ <0x01000000 0x0 0x34100000 0x00 0x34100000 0x0 0x00100000>; /* downstream I/O (1 MB) */ interconnects = <&mc TEGRA234_MEMORY_CLIENT_PCIE3R &emc>, @@ -2336,7 +2336,7 @@ pcie@141a0000 { bus-range = <0x0 0xff>; - ranges = <0x43000000 0x27 0x40000000 0x27 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */ + ranges = <0x43000000 0x28 0x00000000 0x28 0x00000000 0x3 0x28000000>, /* prefetchable memory (12928 MB) */ <0x02000000 0x0 0x40000000 0x2b 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */ <0x01000000 0x0 0x3a100000 0x00 0x3a100000 0x0 0x00100000>; /* downstream I/O (1 MB) */ @@ -2442,7 +2442,7 @@ pcie@141e0000 { bus-range = <0x0 0xff>; - ranges = <0x43000000 0x2e 0x40000000 0x2e 0x40000000 0x3 0xe8000000>, /* prefetchable memory (16000 MB) */ + ranges = <0x43000000 0x30 0x00000000 0x30 0x00000000 0x2 0x28000000>, /* prefetchable memory (8832 MB) */ <0x02000000 0x0 0x40000000 0x32 0x28000000 0x0 0x08000000>, /* non-prefetchable memory (128 MB) */ <0x01000000 0x0 0x3e100000 0x00 0x3e100000 0x0 0x00100000>; /* downstream I/O (1 MB) */ diff --git a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts index 1ba2eca33c7b6ada5731064472915c9a0aa5d51c..6a716c83e5f1db86d9061b38f8b4e5c88a1af38f 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts +++ b/arch/arm64/boot/dts/qcom/ipq6018-cp01-c1.dts @@ -37,6 +37,8 @@ &blsp1_i2c3 { &blsp1_spi1 { cs-select = <0>; + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; status = "okay"; flash@0 { diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index a831064700ee89df47c8db12dcfa1a5ffa920cb3..9743cb270639d50f10069c2c36ac25692212eda7 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -1345,7 +1345,7 @@ bam_dmux_dma: dma-controller@4044000 { }; mpss: remoteproc@4080000 { - compatible = "qcom,msm8916-mss-pil", "qcom,q6v5-pil"; + compatible = "qcom,msm8916-mss-pil"; reg = <0x04080000 0x100>, <0x04020000 0x040>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index aba717644391950e65fb9b66f06aced99fc1581c..1107befc3b0914ecb0b04ed041b679399074c771 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -144,82 +144,92 @@ cluster0_opp: opp-table-cluster0 { /* Nominal fmax for now */ opp-307200000 { opp-hz = /bits/ 64 <307200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-422400000 { opp-hz = /bits/ 64 <422400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-480000000 { opp-hz = /bits/ 64 <480000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-556800000 { opp-hz = /bits/ 64 <556800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-652800000 { opp-hz = /bits/ 64 <652800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-729600000 { opp-hz = /bits/ 64 <729600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-844800000 { opp-hz = /bits/ 64 <844800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-960000000 { opp-hz = /bits/ 64 <960000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1036800000 { opp-hz = /bits/ 64 <1036800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1113600000 { opp-hz = /bits/ 64 <1113600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1190400000 { opp-hz = /bits/ 64 <1190400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1228800000 { opp-hz = /bits/ 64 <1228800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1324800000 { opp-hz = /bits/ 64 <1324800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x5>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x2>; clock-latency-ns = <200000>; }; opp-1401600000 { opp-hz = /bits/ 64 <1401600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x5>; clock-latency-ns = <200000>; }; opp-1478400000 { opp-hz = /bits/ 64 <1478400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + opp-1497600000 { + opp-hz = /bits/ 64 <1497600000>; + opp-supported-hw = <0x04>; clock-latency-ns = <200000>; }; opp-1593600000 { opp-hz = /bits/ 64 <1593600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; }; @@ -232,127 +242,137 @@ cluster1_opp: opp-table-cluster1 { /* Nominal fmax for now */ opp-307200000 { opp-hz = /bits/ 64 <307200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-403200000 { opp-hz = /bits/ 64 <403200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-480000000 { opp-hz = /bits/ 64 <480000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-556800000 { opp-hz = /bits/ 64 <556800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-652800000 { opp-hz = /bits/ 64 <652800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-729600000 { opp-hz = /bits/ 64 <729600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-806400000 { opp-hz = /bits/ 64 <806400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-883200000 { opp-hz = /bits/ 64 <883200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-940800000 { opp-hz = /bits/ 64 <940800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1036800000 { opp-hz = /bits/ 64 <1036800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1113600000 { opp-hz = /bits/ 64 <1113600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1190400000 { opp-hz = /bits/ 64 <1190400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1248000000 { opp-hz = /bits/ 64 <1248000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1324800000 { opp-hz = /bits/ 64 <1324800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1401600000 { opp-hz = /bits/ 64 <1401600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1478400000 { opp-hz = /bits/ 64 <1478400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1555200000 { opp-hz = /bits/ 64 <1555200000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1632000000 { opp-hz = /bits/ 64 <1632000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1708800000 { opp-hz = /bits/ 64 <1708800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; clock-latency-ns = <200000>; }; opp-1785600000 { opp-hz = /bits/ 64 <1785600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x7>; + clock-latency-ns = <200000>; + }; + opp-1804800000 { + opp-hz = /bits/ 64 <1804800000>; + opp-supported-hw = <0x6>; clock-latency-ns = <200000>; }; opp-1824000000 { opp-hz = /bits/ 64 <1824000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; + clock-latency-ns = <200000>; + }; + opp-1900800000 { + opp-hz = /bits/ 64 <1900800000>; + opp-supported-hw = <0x4>; clock-latency-ns = <200000>; }; opp-1920000000 { opp-hz = /bits/ 64 <1920000000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-1996800000 { opp-hz = /bits/ 64 <1996800000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-2073600000 { opp-hz = /bits/ 64 <2073600000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; opp-2150400000 { opp-hz = /bits/ 64 <2150400000>; - opp-supported-hw = <0x77>; + opp-supported-hw = <0x1>; clock-latency-ns = <200000>; }; }; @@ -1213,17 +1233,17 @@ gpu_opp_table: opp-table { compatible = "operating-points-v2"; /* - * 624Mhz and 560Mhz are only available on speed - * bin (1 << 0). All the rest are available on - * all bins of the hardware + * 624Mhz is only available on speed bins 0 and 3. + * 560Mhz is only available on speed bins 0, 2 and 3. + * All the rest are available on all bins of the hardware. */ opp-624000000 { opp-hz = /bits/ 64 <624000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x09>; }; opp-560000000 { opp-hz = /bits/ 64 <560000000>; - opp-supported-hw = <0x01>; + opp-supported-hw = <0x0d>; }; opp-510000000 { opp-hz = /bits/ 64 <510000000>; @@ -3342,7 +3362,7 @@ wcd9335: codec@1{ interrupt-names = "intr1", "intr2"; interrupt-controller; #interrupt-cells = <1>; - reset-gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>; slim-ifc-dev = <&tasha_ifd>; diff --git a/arch/arm64/boot/dts/qcom/msm8996pro.dtsi b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..63e1b4ec7a360e2ea18c6182192baca336514d86 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8996pro.dtsi @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2022, Linaro Limited + */ + +#include "msm8996.dtsi" + +/ { + /delete-node/ opp-table-cluster0; + /delete-node/ opp-table-cluster1; + + /* + * On MSM8996 Pro the cpufreq driver shifts speed bins into the high + * nibble of supported hw, so speed bin 0 becomes 0x10, speed bin 1 + * becomes 0x20, speed 2 becomes 0x40. + */ + + cluster0_opp: opp-table-cluster0 { + compatible = "operating-points-v2-kryo-cpu"; + nvmem-cells = <&speedbin_efuse>; + opp-shared; + + opp-307200000 { + opp-hz = /bits/ 64 <307200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-384000000 { + opp-hz = /bits/ 64 <384000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-460800000 { + opp-hz = /bits/ 64 <460800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-537600000 { + opp-hz = /bits/ 64 <537600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-614400000 { + opp-hz = /bits/ 64 <614400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-691200000 { + opp-hz = /bits/ 64 <691200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-768000000 { + opp-hz = /bits/ 64 <768000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-844800000 { + opp-hz = /bits/ 64 <844800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-902400000 { + opp-hz = /bits/ 64 <902400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-979200000 { + opp-hz = /bits/ 64 <979200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1132800000 { + opp-hz = /bits/ 64 <1132800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1209600000 { + opp-hz = /bits/ 64 <1209600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1286400000 { + opp-hz = /bits/ 64 <1286400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1516800000 { + opp-hz = /bits/ 64 <1516800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1593600000 { + opp-hz = /bits/ 64 <1593600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1996800000 { + opp-hz = /bits/ 64 <1996800000>; + opp-supported-hw = <0x20>; + clock-latency-ns = <200000>; + }; + opp-2188800000 { + opp-hz = /bits/ 64 <2188800000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + }; + + cluster1_opp: opp-table-cluster1 { + compatible = "operating-points-v2-kryo-cpu"; + nvmem-cells = <&speedbin_efuse>; + opp-shared; + + opp-307200000 { + opp-hz = /bits/ 64 <307200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-384000000 { + opp-hz = /bits/ 64 <384000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-460800000 { + opp-hz = /bits/ 64 <460800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-537600000 { + opp-hz = /bits/ 64 <537600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-614400000 { + opp-hz = /bits/ 64 <614400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-691200000 { + opp-hz = /bits/ 64 <691200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-748800000 { + opp-hz = /bits/ 64 <748800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-825600000 { + opp-hz = /bits/ 64 <825600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-902400000 { + opp-hz = /bits/ 64 <902400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-979200000 { + opp-hz = /bits/ 64 <979200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1056000000 { + opp-hz = /bits/ 64 <1056000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1132800000 { + opp-hz = /bits/ 64 <1132800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1209600000 { + opp-hz = /bits/ 64 <1209600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1286400000 { + opp-hz = /bits/ 64 <1286400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1363200000 { + opp-hz = /bits/ 64 <1363200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1440000000 { + opp-hz = /bits/ 64 <1440000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1516800000 { + opp-hz = /bits/ 64 <1516800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1593600000 { + opp-hz = /bits/ 64 <1593600000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1670400000 { + opp-hz = /bits/ 64 <1670400000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1747200000 { + opp-hz = /bits/ 64 <1747200000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1824000000 { + opp-hz = /bits/ 64 <1824000000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1900800000 { + opp-hz = /bits/ 64 <1900800000>; + opp-supported-hw = <0x70>; + clock-latency-ns = <200000>; + }; + opp-1977600000 { + opp-hz = /bits/ 64 <1977600000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2054400000 { + opp-hz = /bits/ 64 <2054400000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2150400000 { + opp-hz = /bits/ 64 <2150400000>; + opp-supported-hw = <0x30>; + clock-latency-ns = <200000>; + }; + opp-2246400000 { + opp-hz = /bits/ 64 <2246400000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + opp-2342400000 { + opp-hz = /bits/ 64 <2342400000>; + opp-supported-hw = <0x10>; + clock-latency-ns = <200000>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/pm6350.dtsi b/arch/arm64/boot/dts/qcom/pm6350.dtsi index ecf9b99191828c40006473696bfef74af0414cfd..68245d78d2b93bbf8dedb050e4eb4f35d117a102 100644 --- a/arch/arm64/boot/dts/qcom/pm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6350.dtsi @@ -3,6 +3,7 @@ * Copyright (c) 2021, Luca Weiss */ +#include #include &spmi_bus { diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi index e1622b16c08bd9dbe185113605672c09ae541d84..02a69ac0149b29dcb4f7210b0c79262d01e469d7 100644 --- a/arch/arm64/boot/dts/qcom/pm660.dtsi +++ b/arch/arm64/boot/dts/qcom/pm660.dtsi @@ -163,7 +163,7 @@ vadc_vph_pwr: vph_pwr@83 { qcom,pre-scaling = <1 3>; }; - vcoin: vcoin@83 { + vcoin: vcoin@85 { reg = ; qcom,decimation = <1024>; qcom,pre-scaling = <1 3>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi index 1bd6c7dcd9e91784e80bd51f9080039da280ee41..bfab67f4a7c9c44574b325fe6be10ab3630e8e01 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi @@ -194,6 +194,12 @@ pinmux { pins = "gpio49", "gpio50", "gpio51", "gpio52"; function = "mi2s_1"; }; + + pinconf { + pins = "gpio49", "gpio50", "gpio51", "gpio52"; + drive-strength = <2>; + bias-pull-down; + }; }; &ts_reset_l { diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts index 7559164cdda080a6c9905c66fc02a0382345a488..e2e37a0292ad693a9789ee5615d320a5d26a1522 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-idp.dts +++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts @@ -10,7 +10,6 @@ #include #include "sc7280-idp.dtsi" #include "pmr735a.dtsi" -#include "sc7280-herobrine-lte-sku.dtsi" / { model = "Qualcomm Technologies, Inc. sc7280 IDP SKU1 platform"; diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi index cd432a2856a7b154c2fbe8b7196132451a24a473..ca50f0ba9b8159ffcf5a8b87fd97db6c4bf1f7d4 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dtsi @@ -13,6 +13,7 @@ #include "pmk8350.dtsi" #include "sc7280-chrome-common.dtsi" +#include "sc7280-herobrine-lte-sku.dtsi" / { aliases { @@ -34,7 +35,7 @@ wcd9385: audio-codec-1 { pinctrl-0 = <&wcd_reset_n>; pinctrl-1 = <&wcd_reset_n_sleep>; - reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>; qcom,rx-device = <&wcd_rx>; qcom,tx-device = <&wcd_tx>; diff --git a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi index 4b8c676b0bb196e784d2324a17b24b0e757db451..f7665b3799233d5d5e5a80a6ca0f0883aa81711d 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-qcard.dtsi @@ -37,7 +37,7 @@ wcd9385: audio-codec-1 { pinctrl-0 = <&wcd_reset_n>, <&us_euro_hs_sel>; pinctrl-1 = <&wcd_reset_n_sleep>, <&us_euro_hs_sel>; - reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>; us-euro-gpios = <&tlmm 81 GPIO_ACTIVE_HIGH>; qcom,rx-device = <&wcd_rx>; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 212d63d5cbf28a94ff7d4d7a2cd3491bc1e3d678..9f2a136d5cbc5095c1a4e1beb52070802a931b5e 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -855,12 +855,13 @@ ufs_mem_hc: ufs@1d84000 { required-opps = <&rpmhpd_opp_nom>; iommus = <&apps_smmu 0xe0 0x0>; + dma-coherent; clocks = <&gcc GCC_UFS_PHY_AXI_CLK>, <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, <&gcc GCC_UFS_PHY_AHB_CLK>, <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, - <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_REF_CLKREF_CLK>, <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; @@ -891,7 +892,7 @@ ufs_mem_phy: phy@1d87000 { ranges; clock-names = "ref", "ref_aux"; - clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, + clocks = <&gcc GCC_UFS_CARD_CLKREF_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; resets = <&ufs_mem_hc 0>; @@ -923,12 +924,13 @@ ufs_card_hc: ufs@1da4000 { power-domains = <&gcc UFS_CARD_GDSC>; iommus = <&apps_smmu 0x4a0 0x0>; + dma-coherent; clocks = <&gcc GCC_UFS_CARD_AXI_CLK>, <&gcc GCC_AGGRE_UFS_CARD_AXI_CLK>, <&gcc GCC_UFS_CARD_AHB_CLK>, <&gcc GCC_UFS_CARD_UNIPRO_CORE_CLK>, - <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_REF_CLKREF_CLK>, <&gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>, <&gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>, <&gcc GCC_UFS_CARD_RX_SYMBOL_1_CLK>; @@ -959,7 +961,7 @@ ufs_card_phy: phy@1da7000 { ranges; clock-names = "ref", "ref_aux"; - clocks = <&gcc GCC_UFS_REF_CLKREF_CLK>, + clocks = <&gcc GCC_UFS_1_CARD_CLKREF_CLK>, <&gcc GCC_UFS_CARD_PHY_AUX_CLK>; resets = <&ufs_card_hc 0>; diff --git a/arch/arm64/boot/dts/qcom/sdm630.dtsi b/arch/arm64/boot/dts/qcom/sdm630.dtsi index b51b85f583e5d2803a28419b2d5ffeea6f184371..e119060ac56cb106105df298753d266366bc35e5 100644 --- a/arch/arm64/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm630.dtsi @@ -779,7 +779,7 @@ rx-cts-rts { pins = "gpio17", "gpio18", "gpio19"; function = "gpio"; drive-strength = <2>; - bias-no-pull; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index b5eb8f7eca1d5f8d48442fc1afe17b617d0795ee..b5f11fbcc30044b60d897581d2529a54e9c33a91 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -1436,7 +1436,7 @@ ap_suspend_l_assert: ap_suspend_l_assert { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-low; }; @@ -1446,7 +1446,7 @@ ap_suspend_l_deassert: ap_suspend_l_deassert { config { pins = "gpio126"; function = "gpio"; - bias-no-pull; + bias-disable; drive-strength = <2>; output-high; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index 132417e2d11e52a7a706dbcfc45c40ecb6748303..a3e15dedd60cbb9c9b772ce7645cc96ad9314bd2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -1123,7 +1123,10 @@ &wifi { /* PINCTRL - additions to nodes defined in sdm845.dtsi */ &qup_spi2_default { - drive-strength = <16>; + pinconf { + pins = "gpio27", "gpio28", "gpio29", "gpio30"; + drive-strength = <16>; + }; }; &qup_uart3_default{ diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts index afc17e4d403fcea2c5a391e0860a3dd457fef290..f982594896796478404e40575a242205061ec9a2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts @@ -628,7 +628,7 @@ sde_dsi_suspend: sde-dsi-suspend { }; wcd_intr_default: wcd-intr-default { - pins = "goui54"; + pins = "gpio54"; function = "gpio"; input-enable; bias-pull-down; diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index be59a8ba9c1fe60767d4c53ce267518c2d5a1185..74f43da51fa504b42c9d54c3715bd7010991eb91 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -487,8 +487,10 @@ pinconf { }; &qup_i2c12_default { - drive-strength = <2>; - bias-disable; + pinmux { + drive-strength = <2>; + bias-disable; + }; }; &qup_uart6_default { diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts index f954fe5cb61ab22641b18c88539d01ac89a68afc..d028a7eb364a680752be6e4988f8ddd0631682c1 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts @@ -415,8 +415,10 @@ pinconf { }; &qup_i2c12_default { - drive-strength = <2>; - bias-disable; + pinmux { + drive-strength = <2>; + bias-disable; + }; }; &qup_uart6_default { diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index 1fe3fa3ad877067fab49ebd543434c3df25787d9..7818fb6c5a10a8c8ed004ebbfabd03c84afc2c09 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -458,7 +458,7 @@ rpm_msg_ram: sram@45f0000 { sdhc_1: mmc@4744000 { compatible = "qcom,sm6125-sdhci", "qcom,sdhci-msm-v5"; reg = <0x04744000 0x1000>, <0x04745000 0x1000>; - reg-names = "hc", "core"; + reg-names = "hc", "cqhci"; interrupts = , ; diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi index c39de7d3ace0bc61af41a69253e5fc287d64c6dd..7be5fc8dec671b725abab778f0b2ad0e7b4a5bec 100644 --- a/arch/arm64/boot/dts/qcom/sm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi @@ -485,6 +485,7 @@ sdhc_1: mmc@7c4000 { interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; + iommus = <&apps_smmu 0x60 0x0>; clocks = <&gcc GCC_SDCC1_AHB_CLK>, <&gcc GCC_SDCC1_APPS_CLK>, @@ -1063,6 +1064,7 @@ sdhc_2: mmc@8804000 { interrupts = , ; interrupt-names = "hc_irq", "pwr_irq"; + iommus = <&apps_smmu 0x560 0x0>; clocks = <&gcc GCC_SDCC2_AHB_CLK>, <&gcc GCC_SDCC2_APPS_CLK>, @@ -1148,15 +1150,11 @@ usb_1_ssphy: usb3-phy@88e9200 { dp_phy: dp-phy@88ea200 { reg = <0 0x088ea200 0 0x200>, <0 0x088ea400 0 0x200>, - <0 0x088eac00 0 0x400>, + <0 0x088eaa00 0 0x200>, <0 0x088ea600 0 0x200>, - <0 0x088ea800 0 0x200>, - <0 0x088eaa00 0 0x100>; + <0 0x088ea800 0 0x200>; #phy-cells = <0>; #clock-cells = <1>; - clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; - clock-names = "pipe0"; - clock-output-names = "usb3_phy_pipe_clk_src"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index cef8c4f4f0ff278c8d894061d12c4b6ea74d69d3..4a527a64772b47b69658a90e5fd2c7ceaf68edac 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -2032,11 +2032,11 @@ ufs_mem_phy: phy@1d87000 { status = "disabled"; ufs_mem_phy_lanes: phy@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + reg = <0 0x01d87400 0 0x16c>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x16c>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts index a102aa5efa3266d33ef11ffec9d343f869430b41..a05fe468e0b41f88d1b5df93e5aedc8d93e26f28 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts @@ -635,7 +635,7 @@ &soc { wcd938x: codec { compatible = "qcom,wcd9380-codec"; #sound-dai-cells = <1>; - reset-gpios = <&tlmm 32 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; vdd-buck-supply = <&vreg_s4a_1p8>; vdd-rxtx-supply = <&vreg_s4a_1p8>; vdd-io-supply = <&vreg_s4a_1p8>; diff --git a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi index 5428aab3058dd975c3a2937846dda94ab6bcdc27..e4769dcfaad7bc9fdfd6058120b2100a99d56a4e 100644 --- a/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250-sony-xperia-edo.dtsi @@ -619,7 +619,7 @@ ts_int_default: ts-int-default { pins = "gpio39"; function = "gpio"; drive-strength = <2>; - bias-disabled; + bias-disable; input-enable; }; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index e276eed1f8e2c300c06563670d5d730d79aa74cb..29e352a577311022818782fc3bc6af9e4c913f9b 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -2180,11 +2180,11 @@ ufs_mem_phy: phy@1d87000 { status = "disabled"; ufs_mem_phy_lanes: phy@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + reg = <0 0x01d87400 0 0x16c>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x16c>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; @@ -2455,7 +2455,7 @@ data { pins = "gpio7"; function = "dmic1_data"; drive-strength = <2>; - pull-down; + bias-pull-down; input-enable; }; }; @@ -2892,15 +2892,11 @@ usb_1_ssphy: usb3-phy@88e9200 { dp_phy: dp-phy@88ea200 { reg = <0 0x088ea200 0 0x200>, <0 0x088ea400 0 0x200>, - <0 0x088eac00 0 0x400>, + <0 0x088eaa00 0 0x200>, <0 0x088ea600 0 0x200>, - <0 0x088ea800 0 0x200>, - <0 0x088eaa00 0 0x100>; + <0 0x088ea800 0 0x200>; #phy-cells = <0>; #clock-cells = <1>; - clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; - clock-names = "pipe0"; - clock-output-names = "usb3_phy_pipe_clk_src"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index a86d9ea93b9d4af71dd0509fe554d282e9fe4820..a6270d97a319219411186643891421e1fec33edd 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -2142,11 +2142,11 @@ ufs_mem_phy: phy@1d87000 { status = "disabled"; ufs_mem_phy_lanes: phy@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + reg = <0 0x01d87400 0 0x188>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x188>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts index d68765eb6d4f97527e3bc96addb2e9036d748076..6351050bc87f2e2850779d32e9b649e3f1738c91 100644 --- a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts +++ b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara-pdx223.dts @@ -556,8 +556,6 @@ &sdhc_2 { pinctrl-1 = <&sdc2_sleep_state &sdc2_card_det_n>; vmmc-supply = <&pm8350c_l9>; vqmmc-supply = <&pm8350c_l6>; - /* Forbid SDR104/SDR50 - broken hw! */ - sdhci-caps-mask = <0x3 0x0>; no-sdio; no-mmc; status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index d32f08df743d8162f7d8cb2ecf087dfdb1b3ddf7..32a37c878a34cfd0a24576f006ab893938a17725 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -3161,11 +3161,11 @@ ufs_mem_phy: phy@1d87000 { status = "disabled"; ufs_mem_phy_lanes: phy@1d87400 { - reg = <0 0x01d87400 0 0x108>, - <0 0x01d87600 0 0x1e0>, - <0 0x01d87c00 0 0x1dc>, - <0 0x01d87800 0 0x108>, - <0 0x01d87a00 0 0x1e0>; + reg = <0 0x01d87400 0 0x188>, + <0 0x01d87600 0 0x200>, + <0 0x01d87c00 0 0x200>, + <0 0x01d87800 0 0x188>, + <0 0x01d87a00 0 0x200>; #phy-cells = <0>; }; }; @@ -3192,6 +3192,9 @@ sdhc_2: sdhci@8804000 { bus-width = <4>; dma-coherent; + /* Forbid SDR104/SDR50 - broken hw! */ + sdhci-caps-mask = <0x3 0x0>; + status = "disabled"; sdhc2_opp_table: opp-table { diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi index c2f152bcf10ec43890fdb1ab8139f93fbd5b8ec3..4092c0016035ebf7adfd7aa59f9612cc98c1687a 100644 --- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi @@ -577,7 +577,7 @@ hscif0: serial@e6540000 { reg = <0 0xe6540000 0 0x60>; interrupts = ; clocks = <&cpg CPG_MOD 514>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x31>, <&dmac0 0x30>, @@ -594,7 +594,7 @@ hscif1: serial@e6550000 { reg = <0 0xe6550000 0 0x60>; interrupts = ; clocks = <&cpg CPG_MOD 515>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x33>, <&dmac0 0x32>, @@ -611,7 +611,7 @@ hscif2: serial@e6560000 { reg = <0 0xe6560000 0 0x60>; interrupts = ; clocks = <&cpg CPG_MOD 516>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x35>, <&dmac0 0x34>, @@ -628,7 +628,7 @@ hscif3: serial@e66a0000 { reg = <0 0xe66a0000 0 0x60>; interrupts = ; clocks = <&cpg CPG_MOD 517>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x37>, <&dmac0 0x36>, @@ -657,7 +657,7 @@ scif0: serial@e6e60000 { reg = <0 0xe6e60000 0 64>; interrupts = ; clocks = <&cpg CPG_MOD 702>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x51>, <&dmac0 0x50>, @@ -674,7 +674,7 @@ scif1: serial@e6e68000 { reg = <0 0xe6e68000 0 64>; interrupts = ; clocks = <&cpg CPG_MOD 703>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x53>, <&dmac0 0x52>, @@ -691,7 +691,7 @@ scif3: serial@e6c50000 { reg = <0 0xe6c50000 0 64>; interrupts = ; clocks = <&cpg CPG_MOD 704>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x57>, <&dmac0 0x56>, @@ -708,7 +708,7 @@ scif4: serial@e6c40000 { reg = <0 0xe6c40000 0 64>; interrupts = ; clocks = <&cpg CPG_MOD 705>, - <&cpg CPG_CORE R8A779F0_CLK_S0D3_PER>, + <&cpg CPG_CORE R8A779F0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; dmas = <&dmac0 0x59>, <&dmac0 0x58>, diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi index d70f0600ae5a946950569ee701ab6efa1b4bb9fe..d58b18802cb0189da06129ecb1795cb8d1f89488 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi @@ -326,7 +326,7 @@ hscif0: serial@e6540000 { reg = <0 0xe6540000 0 96>; interrupts = ; clocks = <&cpg CPG_MOD 514>, - <&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>, + <&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; diff --git a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi index fb1a97202c387dbc9352eb979ac83008f8ac1a5f..ebaa8cdd747d23c8bdfc5dd1396db774271da8a1 100644 --- a/arch/arm64/boot/dts/renesas/r9a09g011.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a09g011.dtsi @@ -48,7 +48,7 @@ soc: soc { #size-cells = <2>; ranges; - gic: interrupt-controller@82000000 { + gic: interrupt-controller@82010000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; #address-cells = <0>; @@ -126,7 +126,7 @@ cpg: clock-controller@a3500000 { i2c0: i2c@a4030000 { #address-cells = <1>; #size-cells = <0>; - compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c"; + compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c"; reg = <0 0xa4030000 0 0x80>; interrupts = , ; @@ -140,7 +140,7 @@ i2c0: i2c@a4030000 { i2c2: i2c@a4030100 { #address-cells = <1>; #size-cells = <0>; - compatible = "renesas,i2c-r9a09g011", "renesas,rzv2m-i2c"; + compatible = "renesas,r9a09g011-i2c", "renesas,rzv2m-i2c"; reg = <0 0xa4030100 0 0x80>; interrupts = , ; diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi index d0abb9aa0e9edc4e690cb58674f7a567f370896b..e3852c9463528cdbbace0e48729de7c9d025eaf1 100644 --- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi +++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.dtsi @@ -55,14 +55,14 @@ ufs_rst_n: ufs-rst-n-pins { samsung,pins = "gpf5-0"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; ufs_refclk_out: ufs-refclk-out-pins { samsung,pins = "gpf5-1"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; }; @@ -239,105 +239,105 @@ pwm0_out: pwm0-out-pins { samsung,pins = "gpb6-1"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; pwm1_out: pwm1-out-pins { samsung,pins = "gpb6-5"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c0_bus: hs-i2c0-bus-pins { samsung,pins = "gpb0-0", "gpb0-1"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c1_bus: hs-i2c1-bus-pins { samsung,pins = "gpb0-2", "gpb0-3"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c2_bus: hs-i2c2-bus-pins { samsung,pins = "gpb0-4", "gpb0-5"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c3_bus: hs-i2c3-bus-pins { samsung,pins = "gpb0-6", "gpb0-7"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c4_bus: hs-i2c4-bus-pins { samsung,pins = "gpb1-0", "gpb1-1"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c5_bus: hs-i2c5-bus-pins { samsung,pins = "gpb1-2", "gpb1-3"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c6_bus: hs-i2c6-bus-pins { samsung,pins = "gpb1-4", "gpb1-5"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; hs_i2c7_bus: hs-i2c7-bus-pins { samsung,pins = "gpb1-6", "gpb1-7"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; uart0_data: uart0-data-pins { samsung,pins = "gpb7-0", "gpb7-1"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; uart1_data: uart1-data-pins { samsung,pins = "gpb7-4", "gpb7-5"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; spi0_bus: spi0-bus-pins { samsung,pins = "gpb4-0", "gpb4-2", "gpb4-3"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; spi1_bus: spi1-bus-pins { samsung,pins = "gpb4-4", "gpb4-6", "gpb4-7"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; spi2_bus: spi2-bus-pins { samsung,pins = "gpb5-0", "gpb5-2", "gpb5-3"; samsung,pin-function = ; samsung,pin-pud = ; - samsung,pin-drv = ; + samsung,pin-drv = ; }; }; diff --git a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h index 6ffbda362493057ad43c6c65c33df56e7a22afa8..c397d02208a086176f16a7493dd15a6d7ef11dee 100644 --- a/arch/arm64/boot/dts/tesla/fsd-pinctrl.h +++ b/arch/arm64/boot/dts/tesla/fsd-pinctrl.h @@ -16,9 +16,9 @@ #define FSD_PIN_PULL_UP 3 #define FSD_PIN_DRV_LV1 0 -#define FSD_PIN_DRV_LV2 2 -#define FSD_PIN_DRV_LV3 1 -#define FSD_PIN_DRV_LV4 3 +#define FSD_PIN_DRV_LV2 1 +#define FSD_PIN_DRV_LV4 2 +#define FSD_PIN_DRV_LV6 3 #define FSD_PIN_FUNC_INPUT 0 #define FSD_PIN_FUNC_OUTPUT 1 diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 4005a73cfea990bce88fd50e4972e76a1e03e306..ebb1c5ce7aece613aae6b2fff7eba03aa03f44d4 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -120,7 +120,6 @@ crypto: crypto@4e00000 { dmas = <&main_udmap 0xc001>, <&main_udmap 0x4002>, <&main_udmap 0x4003>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi index e5be78a58682d542a3ce90a0956409b8bcef9a91..d3fb86b2ea939dc4cf761c71f568cd9f76a778c6 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi @@ -386,7 +386,6 @@ mcu_crypto: crypto@40900000 { dmas = <&mcu_udmap 0xf501>, <&mcu_udmap 0x7502>, <&mcu_udmap 0x7503>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@40910000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 917c9dc99efaa867d82dde4a33394dc025799363..603ddda5127fa34de3d501516be7e7fca24d4613 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -337,7 +337,6 @@ main_crypto: crypto@4e00000 { dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; dma-names = "tx", "rx1", "rx2"; - dma-coherent; rng: rng@4e10000 { compatible = "inside-secure,safexcel-eip76"; diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi index 34e7d577ae13b3deb474844bd023414c8b975554..c89f28235812a86611724745a4572c7c005b8195 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi @@ -60,7 +60,7 @@ main_gpio_intr: interrupt-controller@a00000 { #interrupt-cells = <1>; ti,sci = <&sms>; ti,sci-dev-id = <148>; - ti,interrupt-ranges = <8 360 56>; + ti,interrupt-ranges = <8 392 56>; }; main_pmx0: pinctrl@11c000 { diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi index 4d1bfabd1313a560471436110998fc3927e4be88..f0644851602cd3e1062087f1b60defe4811a0ba0 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi @@ -65,7 +65,7 @@ wkup_gpio_intr: interrupt-controller@42200000 { #interrupt-cells = <1>; ti,sci = <&sms>; ti,sci-dev-id = <125>; - ti,interrupt-ranges = <16 928 16>; + ti,interrupt-ranges = <16 960 16>; }; mcu_conf: syscon@40f00000 { diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 8bd80508a710d1120afc320701111b8397f34f86..4b121dc0cfba246144f710828141eced05ce53e9 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -96,6 +96,17 @@ config CRYPTO_SHA3_ARM64 Architecture: arm64 using: - ARMv8.2 Crypto Extensions +config CRYPTO_SM3_NEON + tristate "Hash functions: SM3 (NEON)" + depends on KERNEL_MODE_NEON + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012) + + Architecture: arm64 using: + - NEON (Advanced SIMD) extensions + config CRYPTO_SM3_ARM64_CE tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 24bb0c4610de24f52e06f2230a8d87da34eeee7e..087f1625e77511dfd4c92de7c1d2dcf1a4312449 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -17,6 +17,9 @@ sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o +obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o +sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o + obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o diff --git a/arch/arm64/crypto/sm3-neon-core.S b/arch/arm64/crypto/sm3-neon-core.S new file mode 100644 index 0000000000000000000000000000000000000000..4357e0e51be3881bd44e7d5837294c222f0a787b --- /dev/null +++ b/arch/arm64/crypto/sm3-neon-core.S @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sm3-neon-core.S - SM3 secure hash using NEON instructions + * + * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64 + * + * Copyright (C) 2021 Jussi Kivilinna + * Copyright (c) 2022 Tianjia Zhang + */ + +#include +#include +#include + +/* Context structure */ + +#define state_h0 0 +#define state_h1 4 +#define state_h2 8 +#define state_h3 12 +#define state_h4 16 +#define state_h5 20 +#define state_h6 24 +#define state_h7 28 + +/* Stack structure */ + +#define STACK_W_SIZE (32 * 2 * 3) + +#define STACK_W (0) +#define STACK_SIZE (STACK_W + STACK_W_SIZE) + +/* Register macros */ + +#define RSTATE x0 +#define RDATA x1 +#define RNBLKS x2 +#define RKPTR x28 +#define RFRAME x29 + +#define ra w3 +#define rb w4 +#define rc w5 +#define rd w6 +#define re w7 +#define rf w8 +#define rg w9 +#define rh w10 + +#define t0 w11 +#define t1 w12 +#define t2 w13 +#define t3 w14 +#define t4 w15 +#define t5 w16 +#define t6 w17 + +#define k_even w19 +#define k_odd w20 + +#define addr0 x21 +#define addr1 x22 + +#define s0 w23 +#define s1 w24 +#define s2 w25 +#define s3 w26 + +#define W0 v0 +#define W1 v1 +#define W2 v2 +#define W3 v3 +#define W4 v4 +#define W5 v5 + +#define XTMP0 v6 +#define XTMP1 v7 +#define XTMP2 v16 +#define XTMP3 v17 +#define XTMP4 v18 +#define XTMP5 v19 +#define XTMP6 v20 + +/* Helper macros. */ + +#define _(...) /*_*/ + +#define clear_vec(x) \ + movi x.8h, #0; + +#define rolw(o, a, n) \ + ror o, a, #(32 - n); + +/* Round function macros. */ + +#define GG1_1(x, y, z, o, t) \ + eor o, x, y; +#define GG1_2(x, y, z, o, t) \ + eor o, o, z; +#define GG1_3(x, y, z, o, t) + +#define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t) +#define FF1_2(x, y, z, o, t) +#define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t) + +#define GG2_1(x, y, z, o, t) \ + bic o, z, x; +#define GG2_2(x, y, z, o, t) \ + and t, y, x; +#define GG2_3(x, y, z, o, t) \ + eor o, o, t; + +#define FF2_1(x, y, z, o, t) \ + eor o, x, y; +#define FF2_2(x, y, z, o, t) \ + and t, x, y; \ + and o, o, z; +#define FF2_3(x, y, z, o, t) \ + eor o, o, t; + +#define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + K_LOAD(round); \ + ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \ + rolw(t0, a, 12); /* rol(a, 12) => t0 */ \ + IOP(1, iop_param); \ + FF##i##_1(a, b, c, t1, t2); \ + ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \ + add k, k, e; \ + IOP(2, iop_param); \ + GG##i##_1(e, f, g, t3, t4); \ + FF##i##_2(a, b, c, t1, t2); \ + IOP(3, iop_param); \ + add k, k, t0; \ + add h, h, t5; \ + add d, d, t6; /* w1w2 + d => d */ \ + IOP(4, iop_param); \ + rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \ + GG##i##_2(e, f, g, t3, t4); \ + add h, h, k; /* h + w1 + k => h */ \ + IOP(5, iop_param); \ + FF##i##_3(a, b, c, t1, t2); \ + eor t0, t0, k; /* k ^ t0 => t0 */ \ + GG##i##_3(e, f, g, t3, t4); \ + add d, d, t1; /* FF(a,b,c) + d => d */ \ + IOP(6, iop_param); \ + add t3, t3, h; /* GG(e,f,g) + h => t3 */ \ + rolw(b, b, 9); /* rol(b, 9) => b */ \ + eor h, t3, t3, ror #(32-9); \ + IOP(7, iop_param); \ + add d, d, t0; /* t0 + d => d */ \ + rolw(f, f, 19); /* rol(f, 19) => f */ \ + IOP(8, iop_param); \ + eor h, h, t3, ror #(32-17); /* P0(t3) => h */ + +#define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) + +#define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ + R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) + +#define KL(round) \ + ldp k_even, k_odd, [RKPTR, #(4*(round))]; + +/* Input expansion macros. */ + +/* Byte-swapped input address. */ +#define IW_W_ADDR(round, widx, offs) \ + (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4)) + +/* Expanded input address. */ +#define XW_W_ADDR(round, widx, offs) \ + (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4)) + +/* Rounds 1-12, byte-swapped input block addresses. */ +#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32) +#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48) + +/* Rounds 1-12, expanded input block addresses. */ +#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) +#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16) + +/* Input block loading. + * Interleaving within round function needed for in-order CPUs. */ +#define LOAD_W_VEC_1_1() \ + add addr0, sp, #IW_W1_ADDR(0, 0); +#define LOAD_W_VEC_1_2() \ + add addr1, sp, #IW_W1_ADDR(4, 0); +#define LOAD_W_VEC_1_3() \ + ld1 {W0.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_4() \ + ld1 {W1.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_5() \ + ld1 {W2.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_6() \ + ld1 {W3.16b}, [RDATA], #16; +#define LOAD_W_VEC_1_7() \ + rev32 XTMP0.16b, W0.16b; +#define LOAD_W_VEC_1_8() \ + rev32 XTMP1.16b, W1.16b; +#define LOAD_W_VEC_2_1() \ + rev32 XTMP2.16b, W2.16b; +#define LOAD_W_VEC_2_2() \ + rev32 XTMP3.16b, W3.16b; +#define LOAD_W_VEC_2_3() \ + eor XTMP4.16b, XTMP1.16b, XTMP0.16b; +#define LOAD_W_VEC_2_4() \ + eor XTMP5.16b, XTMP2.16b, XTMP1.16b; +#define LOAD_W_VEC_2_5() \ + st1 {XTMP0.16b}, [addr0], #16; +#define LOAD_W_VEC_2_6() \ + st1 {XTMP4.16b}, [addr0]; \ + add addr0, sp, #IW_W1_ADDR(8, 0); +#define LOAD_W_VEC_2_7() \ + eor XTMP6.16b, XTMP3.16b, XTMP2.16b; +#define LOAD_W_VEC_2_8() \ + ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */ +#define LOAD_W_VEC_3_1() \ + mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */ +#define LOAD_W_VEC_3_2() \ + st1 {XTMP1.16b}, [addr1], #16; +#define LOAD_W_VEC_3_3() \ + st1 {XTMP5.16b}, [addr1]; \ + ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */ +#define LOAD_W_VEC_3_4() \ + ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */ +#define LOAD_W_VEC_3_5() \ + ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */ +#define LOAD_W_VEC_3_6() \ + st1 {XTMP2.16b}, [addr0], #16; +#define LOAD_W_VEC_3_7() \ + st1 {XTMP6.16b}, [addr0]; +#define LOAD_W_VEC_3_8() \ + ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */ + +#define LOAD_W_VEC_1(iop_num, ...) \ + LOAD_W_VEC_1_##iop_num() +#define LOAD_W_VEC_2(iop_num, ...) \ + LOAD_W_VEC_2_##iop_num() +#define LOAD_W_VEC_3(iop_num, ...) \ + LOAD_W_VEC_3_##iop_num() + +/* Message scheduling. Note: 3 words per vector register. + * Interleaving within round function needed for in-order CPUs. */ +#define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \ + /* Load (w[i - 16]) => XTMP0 */ \ + /* Load (w[i - 13]) => XTMP5 */ \ + ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */ +#define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP5.16b, w1.16b, w1.16b, #12; +#define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */ +#define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP5.16b, XTMP5.16b, w2.16b, #12; +#define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 9] == w3 */ \ + /* W3 ^ XTMP0 => XTMP0 */ \ + eor XTMP0.16b, XTMP0.16b, w3.16b; +#define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 3] == w5 */ \ + /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ + /* rol(XTMP5, 7) => XTMP1 */ \ + add addr0, sp, #XW_W1_ADDR((round), 0); \ + shl XTMP2.4s, w5.4s, #15; +#define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \ + shl XTMP1.4s, XTMP5.4s, #7; +#define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP2.4s, w5.4s, #(32-15); +#define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP1.4s, XTMP5.4s, #(32-7); +#define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \ + eor XTMP0.16b, XTMP0.16b, XTMP2.16b; +#define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 6] == W4 */ \ + /* W4 ^ XTMP1 => XTMP1 */ \ + eor XTMP1.16b, XTMP1.16b, w4.16b; +#define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \ + /* P1(XTMP0) ^ XTMP1 => W0 */ \ + shl XTMP3.4s, XTMP0.4s, #15; +#define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \ + shl XTMP4.4s, XTMP0.4s, #23; +#define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, XTMP1.16b, XTMP0.16b; +#define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP3.4s, XTMP0.4s, #(32-15); +#define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \ + sri XTMP4.4s, XTMP0.4s, #(32-23); +#define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, w0.16b, XTMP3.16b; +#define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \ + /* Load (w[i - 3]) => XTMP2 */ \ + ext XTMP2.16b, w4.16b, w4.16b, #12; +#define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \ + eor w0.16b, w0.16b, XTMP4.16b; +#define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \ + ext XTMP2.16b, XTMP2.16b, w5.16b, #12; +#define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \ + /* W1 ^ W2 => XTMP3 */ \ + eor XTMP3.16b, XTMP2.16b, w0.16b; +#define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5) +#define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \ + st1 {XTMP2.16b-XTMP3.16b}, [addr0]; +#define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5) + +#define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5) +#define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5) +#define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5) + +#define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0) +#define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0) +#define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0) + +#define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1) +#define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1) +#define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1) + +#define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2) +#define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2) +#define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2) + +#define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3) +#define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3) +#define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3) + +#define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \ + SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4) +#define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \ + SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4) +#define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \ + SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4) + + + /* + * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'. + * + * void sm3_neon_transform(struct sm3_state *sst, u8 const *src, + * int blocks) + */ + .text +.align 3 +SYM_TYPED_FUNC_START(sm3_neon_transform) + ldp ra, rb, [RSTATE, #0] + ldp rc, rd, [RSTATE, #8] + ldp re, rf, [RSTATE, #16] + ldp rg, rh, [RSTATE, #24] + + stp x28, x29, [sp, #-16]! + stp x19, x20, [sp, #-16]! + stp x21, x22, [sp, #-16]! + stp x23, x24, [sp, #-16]! + stp x25, x26, [sp, #-16]! + mov RFRAME, sp + + sub addr0, sp, #STACK_SIZE + adr_l RKPTR, .LKtable + and sp, addr0, #(~63) + + /* Preload first block. */ + LOAD_W_VEC_1(1, 0) + LOAD_W_VEC_1(2, 0) + LOAD_W_VEC_1(3, 0) + LOAD_W_VEC_1(4, 0) + LOAD_W_VEC_1(5, 0) + LOAD_W_VEC_1(6, 0) + LOAD_W_VEC_1(7, 0) + LOAD_W_VEC_1(8, 0) + LOAD_W_VEC_2(1, 0) + LOAD_W_VEC_2(2, 0) + LOAD_W_VEC_2(3, 0) + LOAD_W_VEC_2(4, 0) + LOAD_W_VEC_2(5, 0) + LOAD_W_VEC_2(6, 0) + LOAD_W_VEC_2(7, 0) + LOAD_W_VEC_2(8, 0) + LOAD_W_VEC_3(1, 0) + LOAD_W_VEC_3(2, 0) + LOAD_W_VEC_3(3, 0) + LOAD_W_VEC_3(4, 0) + LOAD_W_VEC_3(5, 0) + LOAD_W_VEC_3(6, 0) + LOAD_W_VEC_3(7, 0) + LOAD_W_VEC_3(8, 0) + +.balign 16 +.Loop: + /* Transform 0-3 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) + + /* Transform 4-7 + Precalc 12-14 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12) + + /* Transform 8-11 + Precalc 12-17 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15) + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15) + + /* Transform 12-14 + Precalc 18-20 */ + R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18) + R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18) + R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18) + + /* Transform 15-17 + Precalc 21-23 */ + R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21) + + /* Transform 18-20 + Precalc 24-26 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24) + + /* Transform 21-23 + Precalc 27-29 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27) + + /* Transform 24-26 + Precalc 30-32 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30) + + /* Transform 27-29 + Precalc 33-35 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33) + + /* Transform 30-32 + Precalc 36-38 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36) + + /* Transform 33-35 + Precalc 39-41 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39) + + /* Transform 36-38 + Precalc 42-44 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42) + + /* Transform 39-41 + Precalc 45-47 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45) + + /* Transform 42-44 + Precalc 48-50 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48) + + /* Transform 45-47 + Precalc 51-53 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51) + + /* Transform 48-50 + Precalc 54-56 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54) + + /* Transform 51-53 + Precalc 57-59 */ + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57) + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57) + + /* Transform 54-56 + Precalc 60-62 */ + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60) + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60) + + /* Transform 57-59 + Precalc 63 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63) + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63) + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63) + + /* Transform 60 */ + R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _) + subs RNBLKS, RNBLKS, #1 + b.eq .Lend + + /* Transform 61-63 + Preload next block */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _) + ldp s0, s1, [RSTATE, #0] + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _) + ldp s2, s3, [RSTATE, #8] + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _) + + /* Update the chaining variables. */ + eor ra, ra, s0 + eor rb, rb, s1 + ldp s0, s1, [RSTATE, #16] + eor rc, rc, s2 + ldp k_even, k_odd, [RSTATE, #24] + eor rd, rd, s3 + eor re, re, s0 + stp ra, rb, [RSTATE, #0] + eor rf, rf, s1 + stp rc, rd, [RSTATE, #8] + eor rg, rg, k_even + stp re, rf, [RSTATE, #16] + eor rh, rh, k_odd + stp rg, rh, [RSTATE, #24] + b .Loop + +.Lend: + /* Transform 61-63 */ + R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _) + ldp s0, s1, [RSTATE, #0] + R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _) + ldp s2, s3, [RSTATE, #8] + R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _) + + /* Update the chaining variables. */ + eor ra, ra, s0 + clear_vec(W0) + eor rb, rb, s1 + clear_vec(W1) + ldp s0, s1, [RSTATE, #16] + clear_vec(W2) + eor rc, rc, s2 + clear_vec(W3) + ldp k_even, k_odd, [RSTATE, #24] + clear_vec(W4) + eor rd, rd, s3 + clear_vec(W5) + eor re, re, s0 + clear_vec(XTMP0) + stp ra, rb, [RSTATE, #0] + clear_vec(XTMP1) + eor rf, rf, s1 + clear_vec(XTMP2) + stp rc, rd, [RSTATE, #8] + clear_vec(XTMP3) + eor rg, rg, k_even + clear_vec(XTMP4) + stp re, rf, [RSTATE, #16] + clear_vec(XTMP5) + eor rh, rh, k_odd + clear_vec(XTMP6) + stp rg, rh, [RSTATE, #24] + + /* Clear message expansion area */ + add addr0, sp, #STACK_W + st1 {W0.16b-W3.16b}, [addr0], #64 + st1 {W0.16b-W3.16b}, [addr0], #64 + st1 {W0.16b-W3.16b}, [addr0] + + mov sp, RFRAME + + ldp x25, x26, [sp], #16 + ldp x23, x24, [sp], #16 + ldp x21, x22, [sp], #16 + ldp x19, x20, [sp], #16 + ldp x28, x29, [sp], #16 + + ret +SYM_FUNC_END(sm3_neon_transform) + + + .section ".rodata", "a" + + .align 4 +.LKtable: + .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb + .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc + .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce + .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6 + .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c + .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce + .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec + .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 + .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53 + .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d + .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4 + .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43 + .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c + .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce + .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec + .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..7182ee683f14ace6ea8c23ec9af0ab40fd3db97c --- /dev/null +++ b/arch/arm64/crypto/sm3-neon-glue.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * sm3-neon-glue.c - SM3 secure hash using NEON instructions + * + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src, + int blocks); + +static int sm3_neon_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + if (!crypto_simd_usable()) { + sm3_update(shash_desc_ctx(desc), data, len); + return 0; + } + + kernel_neon_begin(); + sm3_base_do_update(desc, data, len, sm3_neon_transform); + kernel_neon_end(); + + return 0; +} + +static int sm3_neon_final(struct shash_desc *desc, u8 *out) +{ + if (!crypto_simd_usable()) { + sm3_final(shash_desc_ctx(desc), out); + return 0; + } + + kernel_neon_begin(); + sm3_base_do_finalize(desc, sm3_neon_transform); + kernel_neon_end(); + + return sm3_base_finish(desc, out); +} + +static int sm3_neon_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + if (!crypto_simd_usable()) { + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (len) + sm3_update(sctx, data, len); + sm3_final(sctx, out); + return 0; + } + + kernel_neon_begin(); + if (len) + sm3_base_do_update(desc, data, len, sm3_neon_transform); + sm3_base_do_finalize(desc, sm3_neon_transform); + kernel_neon_end(); + + return sm3_base_finish(desc, out); +} + +static struct shash_alg sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = sm3_base_init, + .update = sm3_neon_update, + .final = sm3_neon_final, + .finup = sm3_neon_finup, + .descsize = sizeof(struct sm3_state), + .base.cra_name = "sm3", + .base.cra_driver_name = "sm3-neon", + .base.cra_blocksize = SM3_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, + .base.cra_priority = 200, +}; + +static int __init sm3_neon_init(void) +{ + return crypto_register_shash(&sm3_alg); +} + +static void __exit sm3_neon_fini(void) +{ + crypto_unregister_shash(&sm3_alg); +} + +module_init(sm3_neon_init); +module_exit(sm3_neon_fini); + +MODULE_DESCRIPTION("SM3 secure hash using NEON instructions"); +MODULE_AUTHOR("Jussi Kivilinna "); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 445aa3af3b76239e724f0b8ea09284512961a753..400f8956328b97db27c8d6d937ab66bd24228c2b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -308,13 +308,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, } #endif -static inline bool is_ttbr0_addr(unsigned long addr) +static __always_inline bool is_ttbr0_addr(unsigned long addr) { /* entry assembly clears tags for TTBR0 addrs */ return addr < TASK_SIZE; } -static inline bool is_ttbr1_addr(unsigned long addr) +static __always_inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 634279b3b03d1b078e7554fe82191ec302cd2bbe..117e2c180f3c77d8f65af2ca51012c2ba075212a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -23,8 +23,8 @@ * * The regs must be on a stack currently owned by the calling task. */ -static inline void unwind_init_from_regs(struct unwind_state *state, - struct pt_regs *regs) +static __always_inline void unwind_init_from_regs(struct unwind_state *state, + struct pt_regs *regs) { unwind_init_common(state, current); @@ -58,8 +58,8 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) * duration of the unwind, or the unwind will be bogus. It is never valid to * call this for the current task. */ -static inline void unwind_init_from_task(struct unwind_state *state, - struct task_struct *task) +static __always_inline void unwind_init_from_task(struct unwind_state *state, + struct task_struct *task) { unwind_init_common(state, task); @@ -186,7 +186,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) : stackinfo_get_unknown(); \ }) -noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5b391490e045be91b9cf1e85a2b964474f4d8c4d..74f76514a48d00732adf385a511b6b207c344951 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -353,6 +353,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) return false; } +static bool is_translation_fault(unsigned long esr) +{ + return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; +} + static void __do_kernel_fault(unsigned long addr, unsigned long esr, struct pt_regs *regs) { @@ -385,7 +390,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { - if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) + if (is_translation_fault(esr) && + kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) return; msg = "paging request"; diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index 6e6756e8fa0a92be1ecc0a99d4b1554363af2710..86a6e25908664b59dafae3ca2297f70e296220c9 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -361,6 +361,8 @@ static struct clk clk_periph = { */ int clk_enable(struct clk *clk) { + if (!clk) + return 0; mutex_lock(&clocks_mutex); clk_enable_unlocked(clk); mutex_unlock(&clocks_mutex); diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 37c46720c719a6fb528fc530a3a93b2d7c527677..f38c39572a9e82f2412fd0b4ee2a7ada7cd69ca3 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -438,7 +438,7 @@ dm9000@6 { ingenic,nemc-tAW = <50>; ingenic,nemc-tSTRV = <100>; - reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>; vcc-supply = <ð0_power>; interrupt-parent = <&gpe>; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index d09d0769f549674ceab5076dc2269fa3379a2c7c..0fd9ac76eb742d04e8eb60b7cdd20daf12f75ea7 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -211,7 +211,7 @@ union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port) { union cvmx_helper_link_info result; - WARN(!octeon_is_simulation(), + WARN_ONCE(!octeon_is_simulation(), "Using deprecated link status - please update your DT"); /* Unless we fix it later, all links are defaulted to down */ diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 6f49fd9be1f3cce1e7ae1a20c2b8ea42c3d5f522..9abfc4bf9bd83a1316c4c0cbe93f6433790e7c5b 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -1096,7 +1096,7 @@ union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) if (index == 0) result = __cvmx_helper_rgmii_link_get(ipd_port); else { - WARN(1, "Using deprecated link status - please update your DT"); + WARN_ONCE(1, "Using deprecated link status - please update your DT"); result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h index 6bbf082dd149ef90571ae8137d66eb9d4f14af05..79d5bb0e06d63c6fe324ca25743063d477f8c5ad 100644 --- a/arch/mips/include/asm/mach-ralink/mt7621.h +++ b/arch/mips/include/asm/mach-ralink/mt7621.h @@ -7,10 +7,12 @@ #ifndef _MT7621_REGS_H_ #define _MT7621_REGS_H_ +#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(x))) + #define MT7621_PALMBUS_BASE 0x1C000000 #define MT7621_PALMBUS_SIZE 0x03FFFFFF -#define MT7621_SYSC_BASE 0x1E000000 +#define MT7621_SYSC_BASE IOMEM(0x1E000000) #define SYSC_REG_CHIP_NAME0 0x00 #define SYSC_REG_CHIP_NAME1 0x04 diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c index e673603e11e5d6d1331f18b1427a10c328dc32c8..92140edb3ce3edc0a2371acc70988d92c825102f 100644 --- a/arch/mips/kernel/vpe-cmp.c +++ b/arch/mips/kernel/vpe-cmp.c @@ -75,7 +75,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -157,6 +156,7 @@ int __init vpe_module_init(void) device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -169,7 +169,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index bad6b0891b2b5fd1f29fba50bb25091fbff98268..84a82b551ec35345314969ae46d25fc628b835aa 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -313,7 +313,6 @@ ATTRIBUTE_GROUPS(vpe); static void vpe_device_release(struct device *cd) { - kfree(cd); } static struct class vpe_class = { @@ -497,6 +496,7 @@ int __init vpe_module_init(void) device_del(&vpe_device); out_class: + put_device(&vpe_device); class_unregister(&vpe_class); out_chrdev: @@ -509,7 +509,7 @@ void __exit vpe_module_exit(void) { struct vpe *v, *n; - device_del(&vpe_device); + device_unregister(&vpe_device); class_unregister(&vpe_class); unregister_chrdev(major, VPE_MODULE_NAME); diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c index fb0565bc34fda33de75f7dd922074431a1dd8d3f..bbf5811afbf2c073cb13a3bc75a725f51acdb364 100644 --- a/arch/mips/ralink/mt7621.c +++ b/arch/mips/ralink/mt7621.c @@ -25,6 +25,7 @@ #define MT7621_MEM_TEST_PATTERN 0xaa5555aa static u32 detect_magic __initdata; +static struct ralink_soc_info *soc_info_ptr; int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { @@ -97,41 +98,83 @@ void __init ralink_of_remap(void) panic("Failed to remap core resources"); } -static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev) +static unsigned int __init mt7621_get_soc_name0(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME0); +} + +static unsigned int __init mt7621_get_soc_name1(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_NAME1); +} + +static bool __init mt7621_soc_valid(void) +{ + if (mt7621_get_soc_name0() == MT7621_CHIP_NAME0 && + mt7621_get_soc_name1() == MT7621_CHIP_NAME1) + return true; + else + return false; +} + +static const char __init *mt7621_get_soc_id(void) +{ + if (mt7621_soc_valid()) + return "MT7621"; + else + return "invalid"; +} + +static unsigned int __init mt7621_get_soc_rev(void) +{ + return __raw_readl(MT7621_SYSC_BASE + SYSC_REG_CHIP_REV); +} + +static unsigned int __init mt7621_get_soc_ver(void) +{ + return (mt7621_get_soc_rev() >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK; +} + +static unsigned int __init mt7621_get_soc_eco(void) +{ + return (mt7621_get_soc_rev() & CHIP_REV_ECO_MASK); +} + +static const char __init *mt7621_get_soc_revision(void) +{ + if (mt7621_get_soc_rev() == 1 && mt7621_get_soc_eco() == 1) + return "E2"; + else + return "E1"; +} + +static int __init mt7621_soc_dev_init(void) { struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) - return; + return -ENOMEM; soc_dev_attr->soc_id = "mt7621"; soc_dev_attr->family = "Ralink"; + soc_dev_attr->revision = mt7621_get_soc_revision(); - if (((rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK) == 1 && - (rev & CHIP_REV_ECO_MASK) == 1) - soc_dev_attr->revision = "E2"; - else - soc_dev_attr->revision = "E1"; - - soc_dev_attr->data = soc_info; + soc_dev_attr->data = soc_info_ptr; soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); - return; + return PTR_ERR(soc_dev); } + + return 0; } +device_initcall(mt7621_soc_dev_init); void __init prom_soc_init(struct ralink_soc_info *soc_info) { - void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); - unsigned char *name = NULL; - u32 n0; - u32 n1; - u32 rev; - /* Early detection of CMP support */ mips_cm_probe(); mips_cpc_probe(); @@ -154,27 +197,23 @@ void __init prom_soc_init(struct ralink_soc_info *soc_info) __sync(); } - n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); - n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); - - if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { - name = "MT7621"; + if (mt7621_soc_valid()) soc_info->compatible = "mediatek,mt7621-soc"; - } else { - panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); - } + else + panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", + mt7621_get_soc_name0(), + mt7621_get_soc_name1()); ralink_soc = MT762X_SOC_MT7621AT; - rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, "MediaTek %s ver:%u eco:%u", - name, - (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, - (rev & CHIP_REV_ECO_MASK)); + mt7621_get_soc_id(), + mt7621_get_soc_ver(), + mt7621_get_soc_eco()); soc_info->mem_detect = mt7621_memory_detect; - soc_dev_init(soc_info, rev); + soc_info_ptr = soc_info; if (!register_cps_smp_ops()) return; diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index ea8072acf8d94ebc66b121285091342adbeb21b0..4d06de77d92a69a80af6a1e996b9d8a47a2e1f0d 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "common.h" @@ -63,7 +64,7 @@ void __init plat_mem_setup(void) dtb = get_fdt(); __dt_setup_arch(dtb); - if (!early_init_dt_scan_memory()) + if (early_init_dt_scan_memory()) return; if (soc_info.mem_detect) @@ -81,7 +82,8 @@ static int __init plat_of_setup(void) __dt_register_buses(soc_info.compatible, "palmbus"); /* make sure that the reset controller is setup early */ - ralink_rst_init(); + if (ralink_soc != MT762X_SOC_MT7621AT) + ralink_rst_init(); return 0; } diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index ecd028854469856e0493e191e10b8738cf4fefed..68ae77069d23f0b148e6595377ad49ec4e0a5490 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -166,8 +166,8 @@ extern void __update_cache(pte_t pte); /* This calculates the number of initial pages we need for the initial * page tables */ -#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) -# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE) +# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE)) #else # define PT_INITIAL (1) /* all initial PTEs fit into one page */ #endif diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 6a7e315bcc2e5b8215dd3d8769a5aa736d03c099..a115315d88e69196ddbdcfb5275918aaaa0279ef 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1288,9 +1288,8 @@ void pdc_io_reset_devices(void) #endif /* defined(BOOTLOADER) */ -/* locked by pdc_console_lock */ -static int __attribute__((aligned(8))) iodc_retbuf[32]; -static char __attribute__((aligned(64))) iodc_dbuf[4096]; +/* locked by pdc_lock */ +static char iodc_dbuf[4096] __page_aligned_bss; /** * pdc_iodc_print - Console print using IODC. @@ -1307,6 +1306,9 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) unsigned int i; unsigned long flags; + count = min_t(unsigned int, count, sizeof(iodc_dbuf)); + + spin_lock_irqsave(&pdc_lock, flags); for (i = 0; i < count;) { switch(str[i]) { case '\n': @@ -1322,12 +1324,11 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) } print: - spin_lock_irqsave(&pdc_lock, flags); - real32_call(PAGE0->mem_cons.iodc_io, - (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, - PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0); - spin_unlock_irqrestore(&pdc_lock, flags); + real32_call(PAGE0->mem_cons.iodc_io, + (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT, + PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers), + __pa(pdc_result), 0, __pa(iodc_dbuf), i, 0); + spin_unlock_irqrestore(&pdc_lock, flags); return i; } @@ -1354,10 +1355,11 @@ int pdc_iodc_getc(void) real32_call(PAGE0->mem_kbd.iodc_io, (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN, PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers), - __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0); + __pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0); ch = *iodc_dbuf; - status = *iodc_retbuf; + /* like convert_to_wide() but for first return value only: */ + status = *(int *)&pdc_result; spin_unlock_irqrestore(&pdc_lock, flags); if (status == 0) diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c index ab7620f695be1143ff27b3111922ae974581f71c..b16fa9bac5f44ccc27d2ba7d0bef2aa4090e98fb 100644 --- a/arch/parisc/kernel/kgdb.c +++ b/arch/parisc/kernel/kgdb.c @@ -208,23 +208,3 @@ int kgdb_arch_handle_exception(int trap, int signo, } return -1; } - -/* KGDB console driver which uses PDC to read chars from keyboard */ - -static void kgdb_pdc_write_char(u8 chr) -{ - /* no need to print char. kgdb will do it. */ -} - -static struct kgdb_io kgdb_pdc_io_ops = { - .name = "kgdb_pdc", - .read_char = pdc_iodc_getc, - .write_char = kgdb_pdc_write_char, -}; - -static int __init kgdb_pdc_init(void) -{ - kgdb_register_io_module(&kgdb_pdc_io_ops); - return 0; -} -early_initcall(kgdb_pdc_init); diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 7d0989f523d031bf74888e4ab96ed22b291ea457..cf3bf82323746864e5d76b87051bf804b1f7e08f 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -12,37 +12,27 @@ #include /* for PAGE0 */ #include /* for iodc_call() proto and friends */ -static DEFINE_SPINLOCK(pdc_console_lock); - static void pdc_console_write(struct console *co, const char *s, unsigned count) { int i = 0; - unsigned long flags; - spin_lock_irqsave(&pdc_console_lock, flags); do { i += pdc_iodc_print(s + i, count - i); } while (i < count); - spin_unlock_irqrestore(&pdc_console_lock, flags); } #ifdef CONFIG_KGDB static int kgdb_pdc_read_char(void) { - int c; - unsigned long flags; - - spin_lock_irqsave(&pdc_console_lock, flags); - c = pdc_iodc_getc(); - spin_unlock_irqrestore(&pdc_console_lock, flags); + int c = pdc_iodc_getc(); return (c <= 0) ? NO_POLL_CHAR : c; } static void kgdb_pdc_write_char(u8 chr) { - if (PAGE0->mem_cons.cl_class != CL_DUPLEX) - pdc_console_write(NULL, &chr, 1); + /* no need to print char as it's shown on standard console */ + /* pdc_iodc_print(&chr, 1); */ } static struct kgdb_io kgdb_pdc_io_ops = { diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile index 85b1c6d261d128586796db698cd8ab73ab4a40a0..4459a48d2303350b7ca8ec12e315c7a201193863 100644 --- a/arch/parisc/kernel/vdso32/Makefile +++ b/arch/parisc/kernel/vdso32/Makefile @@ -26,7 +26,7 @@ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) +$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso32ld) # assembly rules for the .S files @@ -38,7 +38,7 @@ $(obj-cvdso32): %.o: %.c FORCE # actual build commands quiet_cmd_vdso32ld = VDSO32L $@ - cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso32ld = $(CROSS32CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile index a30f5ec5eb4bfab62e192779784548305b9146b6..f3d6045793f4c88c1ab395f494d6334f4edb6b7b 100644 --- a/arch/parisc/kernel/vdso64/Makefile +++ b/arch/parisc/kernel/vdso64/Makefile @@ -26,7 +26,7 @@ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE # Force dependency (incbin is bad) # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) +$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE $(call if_changed,vdso64ld) # assembly rules for the .S files @@ -35,7 +35,7 @@ $(obj-vdso64): %.o: %.S FORCE # actual build commands quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts index 045af668e92847844c2431ef6532da430289adcf..e9cda34a140e0fb282b02f22d1b3b81d4d5376f1 100644 --- a/arch/powerpc/boot/dts/turris1x.dts +++ b/arch/powerpc/boot/dts/turris1x.dts @@ -69,6 +69,20 @@ temperature-sensor@4c { interrupt-parent = <&gpio>; interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */ <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */ + #address-cells = <1>; + #size-cells = <0>; + + /* Local temperature sensor (SA56004ED internal) */ + channel@0 { + reg = <0>; + label = "board"; + }; + + /* Remote temperature sensor (D+/D- connected to P2020 CPU Temperature Diode) */ + channel@1 { + reg = <1>; + label = "cpu"; + }; }; /* DDR3 SPD/EEPROM */ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 3cee7115441b41cb92bbe6230bb5f936edda45c0..e3d1f377bc5b5bbc2a21b429804ffa965d546562 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -64,17 +64,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, * those. */ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME -#ifdef CONFIG_PPC64_ELF_ABI_V1 -static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) -{ - /* We need to skip past the initial dot, and the __se_sys alias */ - return !strcmp(sym + 1, name) || - (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) || - (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) || - (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) || - (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4)); -} -#else static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { return !strcmp(sym, name) || @@ -83,7 +72,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) || (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4)); } -#endif /* CONFIG_PPC64_ELF_ABI_V1 */ #endif /* CONFIG_FTRACE_SYSCALLS */ #if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 8abae463f6c12882fc85978d837011d96425d363..95fd7f9485d553f9f9cb188dca103103ee281b16 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -79,7 +79,7 @@ #define H_NOT_ENOUGH_RESOURCES -44 #define H_R_STATE -45 #define H_RESCINDED -46 -#define H_P1 -54 +#define H_ABORTED -54 #define H_P2 -55 #define H_P3 -56 #define H_P4 -57 @@ -100,7 +100,6 @@ #define H_COP_HW -74 #define H_STATE -75 #define H_IN_USE -77 -#define H_ABORTED -78 #define H_UNSUPPORTED_FLAG_START -256 #define H_UNSUPPORTED_FLAG_END -511 #define H_MULTI_THREADS_ACTIVE -9005 diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index e847f9b1c5b9fdd8190817207156f05e86ad70f7..767ab166933ba8e23116ec059e8aee7bce14abf8 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -889,6 +889,7 @@ void __noreturn rtas_halt(void) /* Must be in the RMO region, so we place it here */ static char rtas_os_term_buf[2048]; +static s32 ibm_os_term_token = RTAS_UNKNOWN_SERVICE; void rtas_os_term(char *str) { @@ -900,16 +901,20 @@ void rtas_os_term(char *str) * this property may terminate the partition which we want to avoid * since it interferes with panic_timeout. */ - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || - RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) + if (ibm_os_term_token == RTAS_UNKNOWN_SERVICE) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); + /* + * Keep calling as long as RTAS returns a "try again" status, + * but don't use rtas_busy_delay(), which potentially + * schedules. + */ do { - status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL, + status = rtas_call(ibm_os_term_token, 1, 1, NULL, __pa(rtas_os_term_buf)); - } while (rtas_busy_delay(status)); + } while (rtas_busy_delay_time(status)); if (status != 0) printk(KERN_EMERG "ibm,os-term call failed %d\n", status); @@ -1277,6 +1282,13 @@ void __init rtas_initialize(void) no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry); rtas.entry = no_entry ? rtas.base : entry; + /* + * Discover these now to avoid device tree lookups in the + * panic path. + */ + if (of_property_read_bool(rtas.dev, "ibm,extended-os-term")) + ibm_os_term_token = rtas_token("ibm,os-term"); + /* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any */ diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 082f6d0308a472916365d203e2cc19e1bd4563db..8718289c051dd950d4fd6a0d1b9769594623e575 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -61,6 +61,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re next_sp = fp[0]; if (next_sp == sp + STACK_INT_FRAME_SIZE && + validate_sp(sp, current, STACK_INT_FRAME_SIZE) && fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { /* * This looks like an interrupt frame for an diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h index 8965b4463d433a7aaaa940b47215acf59dd64bfd..5e86371a20c7860afdb2ea4baa1c207804a9d8ca 100644 --- a/arch/powerpc/perf/hv-gpci-requests.h +++ b/arch/powerpc/perf/hv-gpci-requests.h @@ -79,6 +79,7 @@ REQUEST(__field(0, 8, partition_id) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 /* * Not available for counter_info_version >= 0x8, use * run_instruction_cycles_by_partition(0x100) instead. @@ -92,6 +93,7 @@ REQUEST(__field(0, 8, partition_id) __count(0x10, 8, cycles) ) #include I(REQUEST_END) +#endif #define REQUEST_NAME system_performance_capabilities #define REQUEST_NUM 0x40 @@ -103,6 +105,7 @@ REQUEST(__field(0, 1, perf_collect_privileged) ) #include I(REQUEST_END) +#ifdef ENABLE_EVENTS_COUNTERINFO_V6 #define REQUEST_NAME processor_bus_utilization_abc_links #define REQUEST_NUM 0x50 #define REQUEST_IDX_KIND "hw_chip_id=?" @@ -194,6 +197,7 @@ REQUEST(__field(0, 4, phys_processor_idx) __count(0x28, 8, instructions_completed) ) #include I(REQUEST_END) +#endif /* Processor_core_power_mode (0x95) skipped, no counters */ /* Affinity_domain_information_by_virtual_processor (0xA0) skipped, diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 5eb60ed5b5e8a8a593e46790f29682d7d433633f..7ff8ff3509f5f6f2f414b6475a86b051a30c335e 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -70,9 +70,9 @@ static const struct attribute_group format_group = { .attrs = format_attrs, }; -static const struct attribute_group event_group = { +static struct attribute_group event_group = { .name = "events", - .attrs = hv_gpci_event_attrs, + /* .attrs is set in init */ }; #define HV_CAPS_ATTR(_name, _format) \ @@ -330,6 +330,7 @@ static int hv_gpci_init(void) int r; unsigned long hret; struct hv_perf_caps caps; + struct hv_gpci_request_buffer *arg; hv_gpci_assert_offsets_correct(); @@ -353,6 +354,36 @@ static int hv_gpci_init(void) /* sampling not supported */ h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + arg = (void *)get_cpu_var(hv_gpci_reqb); + memset(arg, 0, HGPCI_REQ_BUFFER_SIZE); + + /* + * hcall H_GET_PERF_COUNTER_INFO populates the output + * counter_info_version value based on the system hypervisor. + * Pass the counter request 0x10 corresponds to request type + * 'Dispatch_timebase_by_processor', to get the supported + * counter_info_version. + */ + arg->params.counter_request = cpu_to_be32(0x10); + + r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, + virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + if (r) { + pr_devel("hcall failed, can't get supported counter_info_version: 0x%x\n", r); + arg->params.counter_info_version_out = 0x8; + } + + /* + * Use counter_info_version_out value to assign + * required hv-gpci event list. + */ + if (arg->params.counter_info_version_out >= 0x8) + event_group.attrs = hv_gpci_event_attrs; + else + event_group.attrs = hv_gpci_event_attrs_v6; + + put_cpu_var(hv_gpci_reqb); + r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); if (r) return r; diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h index 4d108262bed791d4ec8282987dfc20304c6661f9..c72020912dea570a8312d304c87f9d560e73ce48 100644 --- a/arch/powerpc/perf/hv-gpci.h +++ b/arch/powerpc/perf/hv-gpci.h @@ -26,6 +26,7 @@ enum { #define REQUEST_FILE "../hv-gpci-requests.h" #define NAME_LOWER hv_gpci #define NAME_UPPER HV_GPCI +#define ENABLE_EVENTS_COUNTERINFO_V6 #include "req-gen/perf.h" #undef REQUEST_FILE #undef NAME_LOWER diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h index fa9bc804e67af1d3a31a9d3676457219a1c183b1..6b2a59fefffa79cd2194dcfffde4b71d431eacfa 100644 --- a/arch/powerpc/perf/req-gen/perf.h +++ b/arch/powerpc/perf/req-gen/perf.h @@ -139,6 +139,26 @@ PMU_EVENT_ATTR_STRING( \ #define REQUEST_(r_name, r_value, r_idx_1, r_fields) \ r_fields +/* Generate event list for platforms with counter_info_version 0x6 or below */ +static __maybe_unused struct attribute *hv_gpci_event_attrs_v6[] = { +#include REQUEST_FILE + NULL +}; + +/* + * Based on getPerfCountInfo v1.018 documentation, some of the hv-gpci + * events were deprecated for platform firmware that supports + * counter_info_version 0x8 or above. + * Those deprecated events are still part of platform firmware that + * support counter_info_version 0x6 and below. As per the getPerfCountInfo + * v1.018 documentation there is no counter_info_version 0x7. + * Undefining macro ENABLE_EVENTS_COUNTERINFO_V6, to disable the addition of + * deprecated events in "hv_gpci_event_attrs" attribute group, for platforms + * that supports counter_info_version 0x8 or above. + */ +#undef ENABLE_EVENTS_COUNTERINFO_V6 + +/* Generate event list for platforms with counter_info_version 0x8 or above*/ static __maybe_unused struct attribute *hv_gpci_event_attrs[] = { #include REQUEST_FILE NULL diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 48038aaedbd3672e42c92193cb3f794231785264..2875c206ac0f83c90ebc1639f3987ebc09fb6e84 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -531,6 +531,7 @@ static int mpc52xx_lpbfifo_probe(struct platform_device *op) err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: + free_irq(lpbfifo.irq, &lpbfifo); err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index e12cb44e717f1f084226235b3386c7e79e96bc09..caa96edf0e72acce568ec38982f369150b84d0e5 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -107,7 +107,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, goto next; unreg: - platform_device_del(pdev); + platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 8e40ccac0f44eea03043d2668b6144ba4deda5d1..e5a58a9b2fe9fb20c9057bcb5d4c1607f45a777b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -848,16 +848,7 @@ static int __init eeh_pseries_init(void) } /* Initialize error log size */ - eeh_error_buf_size = rtas_token("rtas-error-log-max"); - if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { - pr_info("%s: unknown EEH error log size\n", - __func__); - eeh_error_buf_size = 1024; - } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { - pr_info("%s: EEH error log size %d exceeds the maximal %d\n", - __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); - eeh_error_buf_size = RTAS_ERROR_LOG_MAX; - } + eeh_error_buf_size = rtas_get_error_log_max(); /* Set EEH probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG); diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c index f4b5b5a64db3d3a0e9ae65282f39ec7c60e160e6..63a1e1fe01851d884a0517752b0e1e46c060ddbf 100644 --- a/arch/powerpc/platforms/pseries/plpks.c +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -75,7 +75,7 @@ static int pseries_status_to_err(int rc) case H_FUNCTION: err = -ENXIO; break; - case H_P1: + case H_PARAMETER: case H_P2: case H_P3: case H_P4: @@ -111,7 +111,7 @@ static int pseries_status_to_err(int rc) err = -EEXIST; break; case H_ABORTED: - err = -EINTR; + err = -EIO; break; default: err = -EINVAL; @@ -366,22 +366,24 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 }; struct plpks_auth *auth; - struct label *label; + struct label *label = NULL; u8 *output; int rc; if (var->namelen > MAX_NAME_SIZE) return -EINVAL; - auth = construct_auth(PKS_OS_OWNER); + auth = construct_auth(consumer); if (IS_ERR(auth)) return PTR_ERR(auth); - label = construct_label(var->component, var->os, var->name, - var->namelen); - if (IS_ERR(label)) { - rc = PTR_ERR(label); - goto out_free_auth; + if (consumer == PKS_OS_OWNER) { + label = construct_label(var->component, var->os, var->name, + var->namelen); + if (IS_ERR(label)) { + rc = PTR_ERR(label); + goto out_free_auth; + } } output = kzalloc(maxobjsize, GFP_KERNEL); @@ -390,9 +392,15 @@ static int plpks_read_var(u8 consumer, struct plpks_var *var) goto out_free_label; } - rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), - virt_to_phys(label), label->size, virt_to_phys(output), - maxobjsize); + if (consumer == PKS_OS_OWNER) + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(label), label->size, virt_to_phys(output), + maxobjsize); + else + rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth), + virt_to_phys(var->name), var->namelen, virt_to_phys(output), + maxobjsize); + if (rc != H_SUCCESS) { pr_err("Failed to read variable %s for component %s with error %d\n", diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h index c6a291367bb139c2029f77030501fdc54059842b..275ccd86bfb5e46ffff11c06e2adb4b992bc0657 100644 --- a/arch/powerpc/platforms/pseries/plpks.h +++ b/arch/powerpc/platforms/pseries/plpks.h @@ -17,7 +17,7 @@ #define WORLDREADABLE 0x08000000 #define SIGNEDUPDATE 0x01000000 -#define PLPKS_VAR_LINUX 0x01 +#define PLPKS_VAR_LINUX 0x02 #define PLPKS_VAR_COMMON 0x04 struct plpks_var { diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index e2c8f93b535ba1d532e66c9110c89a4546865a66..e454192643910075d3e501382453aba8ee0fae08 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -439,6 +439,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); if (!data->trig_mmio) { + iounmap(data->eoi_mmio); pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); return -ENOMEM; } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index f51c882bf9023af3e8007c79fd4bbfee16b49a3e..e34d7809f6c9f03925c80d3d7a3da04d8cc8b115 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1525,9 +1525,9 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { - static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; - int mode; - case 'd': /* bd - hardware data breakpoint */ + case 'd': { /* bd - hardware data breakpoint */ + static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n"; + int mode; if (xmon_is_ro) { printf(xmon_ro_msg); break; @@ -1560,6 +1560,7 @@ bpt_cmds(void) force_enable_xmon(); break; + } case 'i': /* bi - hardware instr breakpoint */ if (xmon_is_ro) { diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 593cf09264d80a6afbe3704b176d44ff0f024929..8e5fd568201893ab38b84cf707e364fefb0d76bc 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -502,7 +502,7 @@ config KEXEC_FILE select KEXEC_CORE select KEXEC_ELF select HAVE_IMA_KEXEC if IMA - depends on 64BIT + depends on 64BIT && MMU help This is new version of kexec system call. This system call is file based and takes file descriptors as system call argument diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi index 24b1cfb9a73e432f2881595ecf2a4319aa6d6fd9..5d3e5240e33aef1d733e7def56d48ffca5536b21 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi @@ -9,7 +9,7 @@ core_pwm0: pwm@40000000 { compatible = "microchip,corepwm-rtl-v4"; reg = <0x0 0x40000000 0x0 0xF0>; microchip,sync-update-mask = /bits/ 32 <0>; - #pwm-cells = <2>; + #pwm-cells = <3>; clocks = <&fabric_clk3>; status = "disabled"; }; diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts index ec7b7c2a3ce2827e819f31b855f7ced83d55e482..8ced67c3b00b2bddf22f20791ade643df1d6af23 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts @@ -37,7 +37,7 @@ ddrc_cache_lo: memory@80000000 { status = "okay"; }; - ddrc_cache_hi: memory@1000000000 { + ddrc_cache_hi: memory@1040000000 { device_type = "memory"; reg = <0x10 0x40000000 0x0 0x40000000>; status = "okay"; diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi index 8545baf4d1290b49251fbf60c8739520e631bbb8..39a77df489abf51ff36adad685f8ef68805e10d8 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi @@ -13,33 +13,4 @@ fabric_clk1: fabric-clk1 { #clock-cells = <0>; clock-frequency = <125000000>; }; - - pcie: pcie@2000000000 { - compatible = "microchip,pcie-host-1.0"; - #address-cells = <0x3>; - #interrupt-cells = <0x1>; - #size-cells = <0x2>; - device_type = "pci"; - reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>; - reg-names = "cfg", "apb"; - bus-range = <0x0 0x7f>; - interrupt-parent = <&plic>; - interrupts = <119>; - interrupt-map = <0 0 0 1 &pcie_intc 0>, - <0 0 0 2 &pcie_intc 1>, - <0 0 0 3 &pcie_intc 2>, - <0 0 0 4 &pcie_intc 3>; - interrupt-map-mask = <0 0 0 7>; - clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>; - clock-names = "fic0", "fic1", "fic3"; - ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>; - msi-parent = <&pcie>; - msi-controller; - status = "disabled"; - pcie_intc: interrupt-controller { - #address-cells = <0>; - #interrupt-cells = <1>; - interrupt-controller; - }; - }; }; diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index a5c2ca1d1cd8ba8a7a958318aa503836ead36ebd..ec19d6afc89653b59d883ba414dbc7ecff6e8397 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,4 +5,10 @@ #include #include +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + #endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 92080a227937283b2d094c2d204b979928d9e16d..42497d487a174642bee085b3a950ee6211396fd3 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) #include +#ifdef CONFIG_MMU +#define arch_memremap_wb(addr, size) \ + ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) +#endif + #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h index eee260e8ab308e8a033995082e1119df4739eeb3..2b56769cb530cbcc164206a1fe0361a6a9fa013f 100644 --- a/arch/riscv/include/asm/kexec.h +++ b/arch/riscv/include/asm/kexec.h @@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs, #define ARCH_HAS_KIMAGE_ARCH struct kimage_arch { + void *fdt; /* For CONFIG_KEXEC_FILE */ unsigned long fdt_addr; }; @@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +struct kimage; +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup #endif #endif diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 0099dc1161683ddd1e3c45460309e331b7e6b0a7..5ff1f19fd45c29b4fc7d2c8b44ac4984caf1e75c 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,6 +19,8 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; + /* A local tlb flush is needed before user execution can resume. */ + cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index dc42375c2357136356df1ca7f42480eeea98a3dc..42a042c0e13ed6ddac188dc8f9940a4008feb6c7 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) /* p4d is folded into pgd in case of 4-level page table */ -#define P4D_SHIFT 39 +#define P4D_SHIFT_L3 30 +#define P4D_SHIFT_L4 39 +#define P4D_SHIFT_L5 39 +#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \ + (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3)) #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE - 1)) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 92ec2d9d7273f450a03d17ce7682d9b6f7d5c240..ec6fb83349cedb8c3c7ef94767bc5c339fac2371 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, * Relying on flush_tlb_fix_spurious_fault would suffice, but * the extra traps reduce performance. So, eagerly SFENCE.VMA. */ - local_flush_tlb_page(address); + flush_tlb_page(vma, address); } static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 801019381dea3fec53f50fe443fb350c3ab92c43..907b9efd39a87dd3853c1f8f21f4baa2fdd1125c 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} + #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 855450bed9f52c2b7b06360dc5976b5519e5592b..ec0cab9fbddd0da98cb415af2732a4ede083886b 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -165,7 +165,7 @@ do { \ might_fault(); \ access_ok(__p, sizeof(*__p)) ? \ __get_user((x), __p) : \ - ((x) = 0, -EFAULT); \ + ((x) = (__force __typeof__(x))0, -EFAULT); \ }) #define __put_user_asm(insn, x, ptr, err) \ diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 0cb94992c15b32a852716ca877d95b5f14f63c6f..5372b708fae21a072f72434625d27d792b83abc7 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -21,6 +21,18 @@ #include #include +int arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + kvfree(image->arch.fdt); + image->arch.fdt = NULL; + + vfree(image->elf_headers); + image->elf_headers = NULL; + image->elf_headers_sz = 0; + + return kexec_image_post_load_cleanup_default(image); +} + static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_elf_info *elf_info, unsigned long old_pbase, unsigned long new_pbase) @@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, pr_err("Error add DTB kbuf ret=%d\n", ret); goto out_free_fdt; } + /* Cache the fdt buffer address for memory cleanup */ + image->arch.fdt = fdt; pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); goto out; diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 186abd146eaffca06c6c3db10902c42ffa769dbc..3221a9e5f3724cafb8a35703b47fb86e4b200e24 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -263,12 +263,11 @@ ret_from_exception: #endif bnez s0, resume_kernel -resume_userspace: /* Interrupts must be disabled here so flags are checked atomically */ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ andi s1, s0, _TIF_WORK_MASK - bnez s1, work_pending - + bnez s1, resume_userspace_slow +resume_userspace: #ifdef CONFIG_CONTEXT_TRACKING_USER call user_enter_callable #endif @@ -368,19 +367,12 @@ resume_kernel: j restore_all #endif -work_pending: +resume_userspace_slow: /* Enter slow path for supplementary processing */ - la ra, ret_from_exception - andi s1, s0, _TIF_NEED_RESCHED - bnez s1, work_resched -work_notifysig: - /* Handle pending signals and notify-resume requests */ - csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ move a0, sp /* pt_regs */ move a1, s0 /* current_thread_info->flags */ - tail do_notify_resume -work_resched: - tail schedule + call do_work_pending + j resume_userspace /* Slow paths for ptrace. */ handle_syscall_trace_enter: diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h index cb6ff7dccb92e18bde5f9b517e42ec851a98b986..de8474146a9b6e71e025fcb79daef39e1cd8d2e7 100644 --- a/arch/riscv/kernel/probes/simulate-insn.h +++ b/arch/riscv/kernel/probes/simulate-insn.h @@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence, 0x7f, 0x0f); } while (0) __RISCV_INSN_FUNCS(c_j, 0xe003, 0xa001); -__RISCV_INSN_FUNCS(c_jr, 0xf007, 0x8002); +__RISCV_INSN_FUNCS(c_jr, 0xf07f, 0x8002); __RISCV_INSN_FUNCS(c_jal, 0xe003, 0x2001); -__RISCV_INSN_FUNCS(c_jalr, 0xf007, 0x9002); +__RISCV_INSN_FUNCS(c_jalr, 0xf07f, 0x9002); __RISCV_INSN_FUNCS(c_beqz, 0xe003, 0xc001); __RISCV_INSN_FUNCS(c_bnez, 0xe003, 0xe001); __RISCV_INSN_FUNCS(c_ebreak, 0xffff, 0x9002); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 5c591123c440910594f0051be1bb6387d72d05d6..bfb2afa4135f89690b90da20d281252a539d8cda 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs) } /* - * notification of userspace execution resumption - * - triggered by the _TIF_WORK_MASK flags + * Handle any pending work on the resume-to-userspace path, as indicated by + * _TIF_WORK_MASK. Entered from assembly with IRQs off. */ -asmlinkage __visible void do_notify_resume(struct pt_regs *regs, - unsigned long thread_info_flags) +asmlinkage __visible void do_work_pending(struct pt_regs *regs, + unsigned long thread_info_flags) { - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - - /* Handle pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) - resume_user_mode_work(regs); + do { + if (thread_info_flags & _TIF_NEED_RESCHED) { + schedule(); + } else { + local_irq_enable(); + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + /* Handle pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | + _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + } + local_irq_disable(); + thread_info_flags = read_thread_flags(); + } while (thread_info_flags & _TIF_WORK_MASK); } diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 08d11a53f39e7a767a643b27b996b2ce36a3fb16..bcfe9eb55f80fbfa9b09f093a9339617d882e8aa 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -58,7 +58,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, } else { fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, - (unsigned long *)(fp - 8)); + &frame->ra); } } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 7abd8e4c4df63e29ec4b88ca31900e0bce377483..f77cb8e42bd2ad1cfe3c2f1894e953907ce61375 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -214,7 +214,7 @@ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used * to get per-cpu overflow stack(get_overflow_stack). */ -long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)]; +long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); asmlinkage unsigned long get_overflow_stack(void) { return (unsigned long)this_cpu_ptr(overflow_stack) + diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 71ebbc4821f0e34fbb0f5bcdfb1b4f9a654cd236..5174ef54ad1d9e05f111ec2f48dcacecb51ca6eb 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -296,12 +296,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; - /* This ONE REG interface is only defined for single letter extensions */ - if (fls(reg_val) >= RISCV_ISA_EXT_BASE) - return -EINVAL; - switch (reg_num) { case KVM_REG_RISCV_CONFIG_REG(isa): + /* + * This ONE REG interface is only defined for + * single letter extensions. + */ + if (fls(reg_val) >= RISCV_ISA_EXT_BASE) + return -EINVAL; + if (!vcpu->arch.ran_atleast_once) { /* Ignore the enable/disable request for certain extensions */ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 7acbfbd14557e600ba69b888c7389fef64f72428..80ce9caba8d225979426f01f9a636d11890f9de6 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,6 +196,16 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) if (need_flush_tlb) local_flush_tlb_all(); +#ifdef CONFIG_SMP + else { + cpumask_t *mask = &mm->context.tlb_stale_mask; + + if (cpumask_test_cpu(cpu, mask)) { + cpumask_clear_cpu(cpu, mask); + local_flush_tlb_all_asid(cntx & asid_mask); + } + } +#endif } static void set_mm_noasid(struct mm_struct *mm) diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c index 19cf25a74ee29dc4fafb31fa1ef5eda17255906c..9b18bda74154e8a3b2b807cd0ae2065eb7c28612 100644 --- a/arch/riscv/mm/physaddr.c +++ b/arch/riscv/mm/physaddr.c @@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys); phys_addr_t __phys_addr_symbol(unsigned long x) { unsigned long kernel_start = kernel_map.virt_addr; - unsigned long kernel_end = (unsigned long)_end; + unsigned long kernel_end = kernel_start + kernel_map.size; /* * Boundary checking aginst the kernel image mapping. diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 37ed760d007c3eef9c302adda361cd4ad7d1db9d..ce7dfc81bb3fe386748557f44310aa4b1a86f3a9 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,23 +5,7 @@ #include #include #include - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} +#include void flush_tlb_all(void) { @@ -31,6 +15,7 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { + struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); + /* + * TLB will be immediately flushed on harts concurrently + * executing this MM context. TLB flush on other harts + * is deferred until this MM context migrates there. + */ + cpumask_setall(pmask); + cpumask_clear_cpu(cpuid, pmask); + cpumask_andnot(pmask, pmask, cmask); + if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) { diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 00df3a8f92acd8d23c7253e1e15fb0af1a5cb8d8..f2417ac54edd6db01ab3cbdd2bd1d5be3fc942ab 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -136,6 +136,25 @@ static bool in_auipc_jalr_range(s64 val) val < ((1L << 31) - (1L << 11)); } +/* Emit fixed-length instructions for address */ +static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) +{ + u64 ip = (u64)(ctx->insns + ctx->ninsns); + s64 off = addr - ip; + s64 upper = (off + (1 << 11)) >> 12; + s64 lower = off & 0xfff; + + if (extra_pass && !in_auipc_jalr_range(off)) { + pr_err("bpf-jit: target offset 0x%llx is out of range\n", off); + return -ERANGE; + } + + emit(rv_auipc(rd, upper), ctx); + emit(rv_addi(rd, rd, lower), ctx); + return 0; +} + +/* Emit variable-length instructions for 32-bit and 64-bit imm */ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) { /* Note that the immediate from the add is sign-extended, @@ -1050,7 +1069,15 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, u64 imm64; imm64 = (u64)insn1.imm << 32 | (u32)imm; - emit_imm(rd, imm64, ctx); + if (bpf_pseudo_func(insn)) { + /* fixed-length insns for extra jit pass */ + ret = emit_addr(rd, imm64, extra_pass, ctx); + if (ret) + return ret; + } else { + emit_imm(rd, imm64, ctx); + } + return 1; } diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index acb55b302b14c34d7dfe14fb6ace9067d5441308..3ac220dafec4a506ac7c2ed993f2baa58043ba6a 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -97,7 +97,8 @@ static int um_pci_send_cmd(struct um_pci_device *dev, } buf = get_cpu_var(um_pci_msg_bufs); - memcpy(buf, cmd, cmd_size); + if (buf) + memcpy(buf, cmd, cmd_size); if (posted) { u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC); @@ -182,6 +183,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, struct um_pci_message_buffer *buf; u8 *data; unsigned long ret = ULONG_MAX; + size_t bytes = sizeof(buf->data); if (!dev) return ULONG_MAX; @@ -189,7 +191,8 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, buf = get_cpu_var(um_pci_msg_bufs); data = buf->data; - memset(buf->data, 0xff, sizeof(buf->data)); + if (buf) + memset(data, 0xff, bytes); switch (size) { case 1: @@ -204,7 +207,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, goto out; } - if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, 8)) + if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, bytes)) goto out; switch (size) { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 67745ceab0dbc64cbca3e63e94ab92dbf976d571..b2c0fce3f257ce163deccd8946d006de12936b13 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -462,8 +462,8 @@ config X86_X2APIC Some Intel systems circa 2022 and later are locked into x2APIC mode and can not fall back to the legacy APIC modes if SGX or TDX are - enabled in the BIOS. They will be unable to boot without enabling - this option. + enabled in the BIOS. They will boot with very reduced functionality + without enabling this option. If you don't know what to do here, say N. diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index b48ddebb474894eecfb695efda35c74c5984c63d..cdf3215ec272ced263a234efab1dc754d240debd 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -7,6 +7,7 @@ */ #include +#include #include #define STATE0 %xmm0 @@ -402,7 +403,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_ad) * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_enc) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN @@ -499,7 +500,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc) * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ @@ -556,7 +557,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_dec) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN @@ -653,7 +654,7 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec) * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ -SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) +SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S index c75fd7d015ed8c958819f30d363b59b8b972321c..03ae4cd1d976a2e0fab0e18dc5b597545c4fadd1 100644 --- a/arch/x86/crypto/aria-aesni-avx-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S @@ -7,6 +7,7 @@ */ #include +#include #include /* struct aria_ctx: */ @@ -913,7 +914,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way) RET; SYM_FUNC_END(__aria_aesni_avx_crypt_16way) -SYM_FUNC_START(aria_aesni_avx_encrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -938,7 +939,7 @@ SYM_FUNC_START(aria_aesni_avx_encrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_encrypt_16way) -SYM_FUNC_START(aria_aesni_avx_decrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1039,7 +1040,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way) RET; SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way) -SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst @@ -1208,7 +1209,7 @@ SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) RET; SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1233,7 +1234,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -1258,7 +1259,7 @@ SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way) RET; SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way) -SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) +SYM_TYPED_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way) /* input: * %rdi: ctx * %rsi: dst diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index 2f94ec0e763bfefb088b7558b34c4cfefe177757..3cae5a1bb3d6ec29c89805c28aabaa74fa12d606 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -93,7 +94,7 @@ */ .text .align 32 -SYM_FUNC_START(sha1_ni_transform) +SYM_TYPED_FUNC_START(sha1_ni_transform) push %rbp mov %rsp, %rbp sub $FRAME_SIZE, %rsp diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 263f916362e025ead9d059c81017e3974be263ae..f54988c80eb405126de5e56ad87e747663d5d563 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -25,6 +25,7 @@ */ #include +#include #define CTX %rdi // arg1 #define BUF %rsi // arg2 @@ -67,7 +68,7 @@ * param: function's name */ .macro SHA1_VECTOR_ASM name - SYM_FUNC_START(\name) + SYM_TYPED_FUNC_START(\name) push %rbx push %r12 diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 3baa1ec390974a86064819234f66c199af1169ba..06ea30c20828dbfb4423da92e2906696010b712c 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -346,7 +347,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_avx) +SYM_TYPED_FUNC_START(sha256_transform_avx) .align 32 pushq %rbx pushq %r12 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 9bcdbc47b8b4beeee9f8caaf7a825cd09bea8311..2d2be531a11ed7933676d37dc522e38f782926bc 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -49,6 +49,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define VMOVDQ vmovdqu @@ -523,7 +524,7 @@ STACK_SIZE = _CTX + _CTX_SIZE ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_rorx) +SYM_TYPED_FUNC_START(sha256_transform_rorx) .align 32 pushq %rbx pushq %r12 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index c4a5db612c3276380d914bbe65170811ddc1e7d1..7db28839108dda4a3e64c9682207518d9902b4c1 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -47,6 +47,7 @@ ######################################################################## #include +#include ## assume buffers not aligned #define MOVDQ movdqu @@ -355,7 +356,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -SYM_FUNC_START(sha256_transform_ssse3) +SYM_TYPED_FUNC_START(sha256_transform_ssse3) .align 32 pushq %rbx pushq %r12 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index 94d50dd27cb532edb5c89fd7e939372fa275eb7c..47f93937f798a6d79199b49d99540bd42993e925 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -54,6 +54,7 @@ */ #include +#include #define DIGEST_PTR %rdi /* 1st arg */ #define DATA_PTR %rsi /* 2nd arg */ @@ -97,7 +98,7 @@ .text .align 32 -SYM_FUNC_START(sha256_ni_transform) +SYM_TYPED_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 1fefe6dd3a9e2f90de0deff603b04bafa08dc5c2..b0984f19fdb408e952773793fa0244f2a50ff8c9 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include .text @@ -273,7 +274,7 @@ frame_size = frame_WK + WK_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## -SYM_FUNC_START(sha512_transform_avx) +SYM_TYPED_FUNC_START(sha512_transform_avx) test msglen, msglen je nowork diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 5cdaab7d6901544a868f95bb0cf894cb8bbe8f3a..b1ca99055ef994f5301a53b2e1ccfc7754e7f03e 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -50,6 +50,7 @@ ######################################################################## #include +#include .text @@ -565,7 +566,7 @@ frame_size = frame_CTX + CTX_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks ######################################################################## -SYM_FUNC_START(sha512_transform_rorx) +SYM_TYPED_FUNC_START(sha512_transform_rorx) # Save GPRs push %rbx push %r12 diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index b84c22e06c5f7987f8b458810b23e96127f6e7b0..c06afb5270e5f5789ba0d45c948a5f68ac4235f3 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -48,6 +48,7 @@ ######################################################################## #include +#include .text @@ -274,7 +275,7 @@ frame_size = frame_WK + WK_SIZE # of SHA512 message blocks. # "blocks" is the message length in SHA512 blocks. ######################################################################## -SYM_FUNC_START(sha512_transform_ssse3) +SYM_TYPED_FUNC_START(sha512_transform_ssse3) test msglen, msglen je nowork diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S index b12b9efb5ec5142dea9da76087e1dd84625aa96a..8fc5ac681fd63450328c217412ce5e020b37ad74 100644 --- a/arch/x86/crypto/sm3-avx-asm_64.S +++ b/arch/x86/crypto/sm3-avx-asm_64.S @@ -12,6 +12,7 @@ */ #include +#include #include /* Context structure */ @@ -328,7 +329,7 @@ * const u8 *data, int nblocks); */ .align 16 -SYM_FUNC_START(sm3_transform_avx) +SYM_TYPED_FUNC_START(sm3_transform_avx) /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S index 4767ab61ff489bebf206cd2a6ddea678b3d2304b..22b6560eb9e1eba7138f94396e6363081607c114 100644 --- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S @@ -14,6 +14,7 @@ */ #include +#include #include #define rRIP (%rip) @@ -420,7 +421,7 @@ SYM_FUNC_END(sm4_aesni_avx_crypt8) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@ -495,7 +496,7 @@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) @@ -545,7 +546,7 @@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) +SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S index 4732fe8bb65b6579e2ce8a4489dd7df8e89be74c..23ee39a8ada8cb5489a46a1e2491317db0209eae 100644 --- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S @@ -14,6 +14,7 @@ */ #include +#include #include #define rRIP (%rip) @@ -282,7 +283,7 @@ SYM_FUNC_END(__sm4_crypt_blk16) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@ -395,7 +396,7 @@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) @@ -449,7 +450,7 @@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) * const u8 *src, u8 *iv) */ .align 8 -SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16) /* input: * %rdi: round key array, CTX * %rsi: dst (16 blocks) diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S index 4bf48462fca7a133c39fd850d4d3ed55959d29cf..e8c60ae7a7c838313de4024f89b9a6db84d14c0c 100644 --- a/arch/x86/entry/vdso/vdso.lds.S +++ b/arch/x86/entry/vdso/vdso.lds.S @@ -27,7 +27,9 @@ VERSION { __vdso_time; clock_getres; __vdso_clock_getres; +#ifdef CONFIG_X86_SGX __vdso_sgx_enter_enclave; +#endif local: *; }; } diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 2adeaf4de4df6f5bdf5fd843d233a9db72f89154..b363fddc2a89e0415c7406885c008bf3611fb8db 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -2,6 +2,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 1ef4f7861e2ecb323bad131846cf9df92fc291f0..1f4869227efb95f1a37b90f602a8cfda587418f4 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -1338,6 +1338,7 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box, /* MCHBAR is disabled */ if (!(mch_bar & BIT(0))) { pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n"); + pci_dev_put(pdev); return; } mch_bar &= ~BIT(0); @@ -1352,6 +1353,8 @@ static void __uncore_imc_init_box(struct intel_uncore_box *box, box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); + + pci_dev_put(pdev); } static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ed869443efb21dc91ce5a0e8e30e4c0db2487069..8f371f3cbbd244b27f8be0e409ecde3f9ca74aae 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2891,6 +2891,7 @@ static bool hswep_has_limit_sbox(unsigned int device) return false; pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4); + pci_dev_put(dev); if (!hswep_get_chop(capid4)) return true; @@ -3803,6 +3804,21 @@ static const struct attribute_group *skx_iio_attr_update[] = { NULL, }; +static void pmu_clear_mapping_attr(const struct attribute_group **groups, + struct attribute_group *ag) +{ + int i; + + for (i = 0; groups[i]; i++) { + if (groups[i] == ag) { + for (i++; groups[i]; i++) + groups[i - 1] = groups[i]; + groups[i - 1] = NULL; + break; + } + } +} + static int pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) { @@ -3851,7 +3867,7 @@ pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag) clear_topology: kfree(type->topology); clear_attr_update: - type->attr_update = NULL; + pmu_clear_mapping_attr(type->attr_update, ag); return ret; } @@ -4492,6 +4508,8 @@ static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_map type->topology = NULL; } + pci_dev_put(dev); + return ret; } @@ -4857,6 +4875,8 @@ static int snr_uncore_mmio_map(struct intel_uncore_box *box, addr += box_ctl; + pci_dev_put(pdev); + box->io_addr = ioremap(addr, type->mmio_map_size); if (!box->io_addr) { pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name); @@ -5139,6 +5159,11 @@ static int icx_iio_get_topology(struct intel_uncore_type *type) static int icx_iio_set_mapping(struct intel_uncore_type *type) { + /* Detect ICX-D system. This case is not supported */ + if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) { + pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group); + return -EPERM; + } return pmu_iio_set_mapping(type, &icx_iio_mapping_group); } diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a269049a43ce311c7db256685495cfb9b2a3e691..85863b9c9e684d8b4206c8d599f41fb99d71fa27 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -535,8 +535,6 @@ void hyperv_cleanup(void) union hv_x64_msr_hypercall_contents hypercall_msr; union hv_reference_tsc_msr tsc_msr; - unregister_syscore_ops(&hv_syscore_ops); - /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 3415321c8240c4b0b2b4536b147c0ce586c69c4f..3216da7074bad80515872a72a69e75ff7add4d66 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -249,7 +249,6 @@ static inline u64 native_x2apic_icr_read(void) extern int x2apic_mode; extern int x2apic_phys; extern void __init x2apic_set_max_apicid(u32 apicid); -extern void __init check_x2apic(void); extern void x2apic_setup(void); static inline int x2apic_enabled(void) { @@ -258,13 +257,13 @@ static inline int x2apic_enabled(void) #define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC)) #else /* !CONFIG_X86_X2APIC */ -static inline void check_x2apic(void) { } static inline void x2apic_setup(void) { } static inline int x2apic_enabled(void) { return 0; } #define x2apic_mode (0) #define x2apic_supported() (0) #endif /* !CONFIG_X86_X2APIC */ +extern void __init check_x2apic(void); struct irq_data; diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index fd6f6e5b755a7823c50ef4f6e2fe4daaee09caf5..a336feef0af14318bf3776e417c4fbcd8277b976 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -91,6 +91,7 @@ static inline void set_real_mode_mem(phys_addr_t mem) void reserve_real_mode(void); void load_trampoline_pgtable(void); +void init_real_mode(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index e9170457697e4ca6605c151ef08865799ad533e6..c1c8c581759d69bfe807a81bfa5eb3a799d2ebc8 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -285,6 +285,8 @@ struct x86_hyper_runtime { * possible in x86_early_init_platform_quirks() by * only using the current x86_hardware_subarch * semantics. + * @realmode_reserve: reserve memory for realmode trampoline + * @realmode_init: initialize realmode trampoline * @hyper: x86 hypervisor specific runtime callbacks */ struct x86_platform_ops { @@ -301,6 +303,8 @@ struct x86_platform_ops { void (*apic_post_init)(void); struct x86_legacy_features legacy; void (*set_legacy_features)(void); + void (*realmode_reserve)(void); + void (*realmode_init)(void); struct x86_hyper_runtime hyper; struct x86_guest guest; }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c6876d3ea4b17893462828f9dfb6e94c1747a83a..20d9a604da7c4b624f701182f0ead13244d1323f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1931,16 +1931,19 @@ void __init check_x2apic(void) } } #else /* CONFIG_X86_X2APIC */ -static int __init validate_x2apic(void) +void __init check_x2apic(void) { if (!apic_is_x2apic_enabled()) - return 0; + return; /* - * Checkme: Can we simply turn off x2apic here instead of panic? + * Checkme: Can we simply turn off x2APIC here instead of disabling the APIC? */ - panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n"); + pr_err("Kernel does not support x2APIC, please recompile with CONFIG_X86_X2APIC.\n"); + pr_err("Disabling APIC, expect reduced performance and functionality.\n"); + + disable_apic = 1; + setup_clear_cpu_cap(X86_FEATURE_APIC); } -early_initcall(validate_x2apic); static inline void try_to_enable_x2apic(int remap_mode) { } static inline void __x2apic_enable(void) { } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6daf842295489e34f3e1283a2d1b2c8101e116d4..16d8e43be7758e783eee6a7f27f171c69f58c7b1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1951,6 +1951,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); task_update_spec_tif(task); + if (task == current) + indirect_branch_prediction_barrier(); break; default: return -ERANGE; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 2d7ea5480ec3394d7422b7c06df98d759ed8ad84..42789965048339786387c0cb4691d62bedbc5b4d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1034,8 +1034,32 @@ static const struct { static struct ratelimit_state bld_ratelimit; +static unsigned int sysctl_sld_mitigate = 1; static DEFINE_SEMAPHORE(buslock_sem); +#ifdef CONFIG_PROC_SYSCTL +static struct ctl_table sld_sysctls[] = { + { + .procname = "split_lock_mitigate", + .data = &sysctl_sld_mitigate, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; + +static int __init sld_mitigate_sysctl_init(void) +{ + register_sysctl_init("kernel", sld_sysctls); + return 0; +} + +late_initcall(sld_mitigate_sysctl_init); +#endif + static inline bool match_option(const char *arg, int arglen, const char *opt) { int len = strlen(opt), ratelimit; @@ -1146,12 +1170,20 @@ static void split_lock_init(void) split_lock_verify_msr(sld_state != sld_off); } -static void __split_lock_reenable(struct work_struct *work) +static void __split_lock_reenable_unlock(struct work_struct *work) { sld_update_msr(true); up(&buslock_sem); } +static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock); + +static void __split_lock_reenable(struct work_struct *work) +{ + sld_update_msr(true); +} +static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); + /* * If a CPU goes offline with pending delayed work to re-enable split lock * detection then the delayed work will be executed on some other CPU. That @@ -1169,10 +1201,9 @@ static int splitlock_cpu_offline(unsigned int cpu) return 0; } -static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable); - static void split_lock_warn(unsigned long ip) { + struct delayed_work *work; int cpu; if (!current->reported_split_lock) @@ -1180,14 +1211,26 @@ static void split_lock_warn(unsigned long ip) current->comm, current->pid, ip); current->reported_split_lock = 1; - /* misery factor #1, sleep 10ms before trying to execute split lock */ - if (msleep_interruptible(10) > 0) - return; - /* Misery factor #2, only allow one buslocked disabled core at a time */ - if (down_interruptible(&buslock_sem) == -EINTR) - return; + if (sysctl_sld_mitigate) { + /* + * misery factor #1: + * sleep 10ms before trying to execute split lock. + */ + if (msleep_interruptible(10) > 0) + return; + /* + * Misery factor #2: + * only allow one buslocked disabled core at a time. + */ + if (down_interruptible(&buslock_sem) == -EINTR) + return; + work = &sl_reenable_unlock; + } else { + work = &sl_reenable; + } + cpu = get_cpu(); - schedule_delayed_work_on(cpu, &split_lock_reenable, 2); + schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ sld_update_msr(false); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 1c87501e0fa3dd82fd6c290f562d062bb54d10d6..10fb5b5c9efa4c7f29ed7da7a3eb57137458f034 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -788,6 +788,24 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) return status & MCI_STATUS_DEFERRED; } +static bool _log_error_deferred(unsigned int bank, u32 misc) +{ + if (!_log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), + mca_msr_reg(bank, MCA_ADDR), misc)) + return false; + + /* + * Non-SMCA systems don't have MCA_DESTAT/MCA_DEADDR registers. + * Return true here to avoid accessing these registers. + */ + if (!mce_flags.smca) + return true; + + /* Clear MCA_DESTAT if the deferred error was logged from MCA_STATUS. */ + wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + return true; +} + /* * We have three scenarios for checking for Deferred errors: * @@ -799,19 +817,8 @@ _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc) */ static void log_error_deferred(unsigned int bank) { - bool defrd; - - defrd = _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), - mca_msr_reg(bank, MCA_ADDR), 0); - - if (!mce_flags.smca) - return; - - /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */ - if (defrd) { - wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0); + if (_log_error_deferred(bank, 0)) return; - } /* * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check @@ -832,7 +839,7 @@ static void amd_deferred_error_interrupt(void) static void log_error_thresholding(unsigned int bank, u64 misc) { - _log_error_bank(bank, mca_msr_reg(bank, MCA_STATUS), mca_msr_reg(bank, MCA_ADDR), misc); + _log_error_deferred(bank, misc); } static void log_and_reset_block(struct threshold_block *block) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 1fcbd671f1dffc515672ccd2c57d5fdbf5d42436..048e38ec99e7179fa10d0e69a99c81240bec2c61 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -621,7 +621,6 @@ void load_ucode_intel_ap(void) else iup = &intel_ucode_patch; -reget: if (!*iup) { patch = __load_ucode_intel(&uci); if (!patch) @@ -632,12 +631,7 @@ void load_ucode_intel_ap(void) uci.mc = *iup; - if (apply_microcode_early(&uci, true)) { - /* Mixed-silicon system? Try to refetch the proper patch: */ - *iup = NULL; - - goto reget; - } + apply_microcode_early(&uci, true); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 1ec20807de1e84642cd6c5cfee1397accb7d4c40..2c258255a62968eac44265ba318a79e8710f29c6 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -680,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = { void sgx_encl_release(struct kref *ref) { struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); + unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1); struct sgx_va_page *va_page; struct sgx_encl_page *entry; - unsigned long index; + unsigned long count = 0; + + XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); - xa_for_each(&encl->page_array, index, entry) { + xas_lock(&xas); + xas_for_each(&xas, entry, max_page_index) { if (entry->epc_page) { /* * The page and its radix tree entry cannot be freed @@ -699,9 +703,20 @@ void sgx_encl_release(struct kref *ref) } kfree(entry); - /* Invoke scheduler to prevent soft lockups. */ - cond_resched(); + /* + * Invoke scheduler on every XA_CHECK_SCHED iteration + * to prevent soft lockups. + */ + if (!(++count % XA_CHECK_SCHED)) { + xas_pause(&xas); + xas_unlock(&xas); + + cond_resched(); + + xas_lock(&xas); + } } + xas_unlock(&xas); xa_destroy(&encl->page_array); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 9730c88530fc8b4c0a6c0bfc770acd087524ea10..305514431f26e03d9b50709cdaa4a2732f74e5c3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image) kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); - if (ret) { - vfree((void *)image->elf_headers); + if (ret) return ret; - } image->elf_load_addr = kbuf.mem; pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.memsz); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 59e543b95a3c6478d8a8a02aca22dbd0f59f5b8f..c2dde46a538e768f5b2e2c59e5ab9be6b12915b2 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -440,8 +440,8 @@ static void __init __xstate_dump_leaves(void) } } -#define XSTATE_WARN_ON(x) do { \ - if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \ +#define XSTATE_WARN_ON(x, fmt, ...) do { \ + if (WARN_ONCE(x, "XSAVE consistency problem: " fmt, ##__VA_ARGS__)) { \ __xstate_dump_leaves(); \ } \ } while (0) @@ -554,8 +554,7 @@ static bool __init check_xstate_against_struct(int nr) (nr >= XFEATURE_MAX) || (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) || ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_RSRVD_COMP_16))) { - WARN_ONCE(1, "no structure for xstate: %d\n", nr); - XSTATE_WARN_ON(1); + XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr); return false; } return true; @@ -598,12 +597,13 @@ static bool __init paranoid_xstate_size_valid(unsigned int kernel_size) * XSAVES. */ if (!xsaves && xfeature_is_supervisor(i)) { - XSTATE_WARN_ON(1); + XSTATE_WARN_ON(1, "Got supervisor feature %d, but XSAVES not advertised\n", i); return false; } } size = xstate_calculate_size(fpu_kernel_cfg.max_features, compacted); - XSTATE_WARN_ON(size != kernel_size); + XSTATE_WARN_ON(size != kernel_size, + "size %u != kernel_size %u\n", size, kernel_size); return size == kernel_size; } diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index bd165004776d93bf998c708cac2ec2567f9739a6..e07234ec7e237a78d1d6b51d13de41992bc571ad 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -217,7 +217,9 @@ void ftrace_replace_code(int enable) ret = ftrace_verify_code(rec->ip, old); if (ret) { + ftrace_expected = old; ftrace_bug(ret, rec); + ftrace_expected = NULL; return; } } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index eb8bc82846b997031da4529d7009bbc737bee0a9..5be7f23099e1f592d0abf4385f9be8cfad7b6e81 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr) if (ret < 0) return 0; +#ifdef CONFIG_KGDB /* - * Another debugging subsystem might insert this breakpoint. - * In that case, we can't recover it. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) return 0; +#endif addr += insn.length; } diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index e6b8c5362b945586bc08f1c4817aeba512f9cc34..e57e07b0edb64cf5b4d82d64adfc61de94c60424 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn) return ret; } -static bool is_padding_int3(unsigned long addr, unsigned long eaddr) -{ - unsigned char ops; - - for (; addr < eaddr; addr++) { - if (get_kernel_nofault(ops, (void *)addr) < 0 || - ops != INT3_INSN_OPCODE) - return false; - } - - return true; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr) ret = insn_decode_kernel(&insn, (void *)recovered_insn); if (ret < 0) return 0; - +#ifdef CONFIG_KGDB /* - * In the case of detecting unknown breakpoint, this could be - * a padding INT3 between functions. Let's check that all the - * rest of the bytes are also INT3. + * If there is a dynamically installed kgdb sw breakpoint, + * this function should not be probed. */ - if (insn.opcode.bytes[0] == INT3_INSN_OPCODE) - return is_padding_int3(addr, paddr - offset + size) ? 1 : 0; - + if (insn.opcode.bytes[0] == INT3_INSN_OPCODE && + kgdb_has_hit_break(addr)) + return 0; +#endif /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 216fee7144eefd4f76eca0d34df9bc84c2754bbc..892609cde4a20fb60469da7e1a788536cf470dd2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1175,7 +1175,7 @@ void __init setup_arch(char **cmdline_p) * Moreover, on machines with SandyBridge graphics or in setups that use * crashkernel the entire 1M is reserved anyway. */ - reserve_real_mode(); + x86_platform.realmode_reserve(); init_mem_mapping(); diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b63cf8f7745ee689df45f72da750570bce66c143..6c07f6daaa227a91da0864ecf35de79e23c10243 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -722,8 +722,9 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ - case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ break; + case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */ + goto setup; case 0xe8: /* call relative */ branch_clear_offset(auprobe, insn); @@ -753,6 +754,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) return -ENOTSUPP; } +setup: auprobe->branch.opc1 = opc1; auprobe->branch.ilen = insn->length; auprobe->branch.offs = insn->immediate.value; diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 57353519bc119e7458f9a35d172d5f199ba9e75b..ef80d361b4632ec64bb8aacd0f0bf6e88acb1021 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -25,6 +25,7 @@ #include #include #include +#include void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } @@ -145,6 +146,8 @@ struct x86_platform_ops x86_platform __ro_after_init = { .get_nmi_reason = default_get_nmi_reason, .save_sched_clock_state = tsc_save_sched_clock_state, .restore_sched_clock_state = tsc_restore_sched_clock_state, + .realmode_reserve = reserve_real_mode, + .realmode_init = init_real_mode, .hyper.pin_vcpu = x86_op_int_noop, .guest = { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index d7639d126e6c7ae255f6bce48634ebe1102a5b84..bf5ce862c4dafc341ae0c4d87447a70ef95d5375 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2722,8 +2722,6 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR); __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32); } - } else { - kvm_lapic_xapic_id_updated(vcpu->arch.apic); } return 0; @@ -2759,6 +2757,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) } memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); + if (!apic_x2apic_mode(apic)) + kvm_lapic_xapic_id_updated(apic); + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); kvm_recalculate_apic_map(vcpu->kvm); kvm_apic_set_version(vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 5b0d4859e4b783b9cd60212e1d8d95e4e51c1bf1..10c63b1bf92faa72606fd6e0135fde9762ee187a 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5100,24 +5100,35 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; /* - * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks - * that have higher priority than VM-Exit (see Intel SDM's pseudocode - * for VMXON), as KVM must load valid CR0/CR4 values into hardware while - * running the guest, i.e. KVM needs to check the _guest_ values. + * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter + * the guest and so cannot rely on hardware to perform the check, + * which has higher priority than VM-Exit (see Intel SDM's pseudocode + * for VMXON). * - * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and - * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real - * Mode, but KVM will never take the guest out of those modes. + * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86 + * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't + * force any of the relevant guest state. For a restricted guest, KVM + * does force CR0.PE=1, but only to also force VM86 in order to emulate + * Real Mode, and so there's no need to check CR0.PE manually. */ - if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || - !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* - * CPL=0 and all other checks that are lower priority than VM-Exit must - * be checked manually. + * The CPL is checked for "not in VMX operation" and for "in VMX root", + * and has higher priority than the VM-Fail due to being post-VMXON, + * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, + * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits + * from L2 to L1, i.e. there's no need to check for the vCPU being in + * VMX non-root. + * + * Forwarding the VM-Exit unconditionally, i.e. without performing the + * #UD checks (see above), is functionally ok because KVM doesn't allow + * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's + * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are + * missed by hardware due to shadowing CR0 and/or CR4. */ if (vmx_get_cpl(vcpu)) { kvm_inject_gp(vcpu, 0); @@ -5127,6 +5138,17 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) if (vmx->nested.vmxon) return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); + /* + * Invalid CR0/CR4 generates #GP. These checks are performed if and + * only if the vCPU isn't already in VMX operation, i.e. effectively + * have lower priority than the VM-Fail above. + */ + if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) || + !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) { + kvm_inject_gp(vcpu, 0); + return 1; + } + if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); @@ -6808,7 +6830,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDSEED_EXITING | SECONDARY_EXEC_XSAVES | - SECONDARY_EXEC_TSC_SCALING; + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; /* * We can emulate "VMCS shadowing," even if the hardware diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 8f95c7c0143359cc617641eddbf422018d77cf63..b12da2a6dec95644941c67200c242fb2982b0742 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -182,8 +182,10 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, /* Enforce CPUID restriction on max enclave size. */ max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : sgx_12_0->edx; - if (size >= BIT_ULL(max_size_log2)) + if (size >= BIT_ULL(max_size_log2)) { kvm_inject_gp(vcpu, 0); + return 1; + } /* * sgx_virt_ecreate() returns: diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 41d7669a97ad167f50485fab3ac1ebafe255ddfc..af565816d2ba6aed5df706910d9e43e773b53fcb 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -200,14 +200,18 @@ static void __init set_real_mode_permissions(void) set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); } -static int __init init_real_mode(void) +void __init init_real_mode(void) { if (!real_mode_header) panic("Real mode trampoline was not allocated"); setup_real_mode(); set_real_mode_permissions(); +} +static int __init do_init_real_mode(void) +{ + x86_platform.realmode_init(); return 0; } -early_initcall(init_real_mode); +early_initcall(do_init_real_mode); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 038da45f057a7a85ade569160b8518dd76d70f44..8944726255c9c2e4c7760b9381b0bb1b3e8a7a7b 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1266,6 +1266,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) xen_vcpu_info_reset(0); x86_platform.get_nmi_reason = xen_get_nmi_reason; + x86_platform.realmode_reserve = x86_init_noop; + x86_platform.realmode_init = x86_init_noop; x86_init.resources.memory_setup = xen_memory_setup; x86_init.irqs.intr_mode_select = x86_init_noop; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c3e1f9a7d43aa3da044c168b3d0ce9026ca4ec27..4b0d6fff88de5a544e2ef91a8c3f7c5fa1339aa5 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) void xen_smp_intr_free(unsigned int cpu) { + kfree(per_cpu(xen_resched_irq, cpu).name); + per_cpu(xen_resched_irq, cpu).name = NULL; if (per_cpu(xen_resched_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); per_cpu(xen_resched_irq, cpu).irq = -1; - kfree(per_cpu(xen_resched_irq, cpu).name); - per_cpu(xen_resched_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfunc_irq, cpu).name); + per_cpu(xen_callfunc_irq, cpu).name = NULL; if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); per_cpu(xen_callfunc_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfunc_irq, cpu).name); - per_cpu(xen_callfunc_irq, cpu).name = NULL; } + kfree(per_cpu(xen_debug_irq, cpu).name); + per_cpu(xen_debug_irq, cpu).name = NULL; if (per_cpu(xen_debug_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); per_cpu(xen_debug_irq, cpu).irq = -1; - kfree(per_cpu(xen_debug_irq, cpu).name); - per_cpu(xen_debug_irq, cpu).name = NULL; } + kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); + per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, NULL); per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; - kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); - per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; } } @@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu) char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + per_cpu(xen_resched_irq, cpu).name = resched_name; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, @@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_resched_irq, cpu).irq = rc; - per_cpu(xen_resched_irq, cpu).name = resched_name; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, @@ -86,10 +87,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfunc_irq, cpu).irq = rc; - per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; if (!xen_fifo_events) { debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + per_cpu(xen_debug_irq, cpu).name = debug_name; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, IRQF_PERCPU | IRQF_NOBALANCING, @@ -97,10 +98,10 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_debug_irq, cpu).irq = rc; - per_cpu(xen_debug_irq, cpu).name = debug_name; } callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, @@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; - per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; return 0; diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 480be82e9b7be3c3e6856da908afa6cb8eebc4c8..6175f2c5c8224b212f332f63a7f7fb8c6401bf4c 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void) void xen_smp_intr_free_pv(unsigned int cpu) { + kfree(per_cpu(xen_irq_work, cpu).name); + per_cpu(xen_irq_work, cpu).name = NULL; if (per_cpu(xen_irq_work, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); per_cpu(xen_irq_work, cpu).irq = -1; - kfree(per_cpu(xen_irq_work, cpu).name); - per_cpu(xen_irq_work, cpu).name = NULL; } + kfree(per_cpu(xen_pmu_irq, cpu).name); + per_cpu(xen_pmu_irq, cpu).name = NULL; if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); per_cpu(xen_pmu_irq, cpu).irq = -1; - kfree(per_cpu(xen_pmu_irq, cpu).name); - per_cpu(xen_pmu_irq, cpu).name = NULL; } } @@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) char *callfunc_name, *pmu_name; callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); + per_cpu(xen_irq_work, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, @@ -127,10 +128,10 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_irq_work, cpu).irq = rc; - per_cpu(xen_irq_work, cpu).name = callfunc_name; if (is_xen_pmu) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); + per_cpu(xen_pmu_irq, cpu).name = pmu_name; rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, IRQF_PERCPU|IRQF_NOBALANCING, @@ -138,7 +139,6 @@ int xen_smp_intr_init_pv(unsigned int cpu) if (rc < 0) goto fail; per_cpu(xen_pmu_irq, cpu).irq = rc; - per_cpu(xen_pmu_irq, cpu).name = pmu_name; } return 0; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 043c73dfd2c98388c4adcdb9cb001ce0de0b15dc..5c6fc16e4b925a74fac6620ae53559900d3801c3 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu) cpu, per_cpu(lock_kicker_irq, cpu)); name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); + per_cpu(irq_name, cpu) = name; irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, @@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu) if (irq >= 0) { disable_irq(irq); /* make sure it's never delivered */ per_cpu(lock_kicker_irq, cpu) = irq; - per_cpu(irq_name, cpu) = name; } printk("cpu %d spinlock event irq %d\n", cpu, irq); @@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu) if (!xen_pvspin) return; + kfree(per_cpu(irq_name, cpu)); + per_cpu(irq_name, cpu) = NULL; /* * When booting the kernel with 'mitigations=auto,nosmt', the secondary * CPUs are not activated, and lock_kicker_irq is not initialized. @@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu) unbind_from_irqhandler(irq, NULL); per_cpu(lock_kicker_irq, cpu) = -1; - kfree(per_cpu(irq_name, cpu)); - per_cpu(irq_name, cpu) = NULL; } PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index b0bc8897c924ff2006d9930b94067052d21dfe2b..2a31b1ab0c9f20ed800b3f2490afe5e33b9bd579 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -62,6 +62,7 @@ extern int __modsi3(int, int); extern int __mulsi3(int, int); extern unsigned int __udivsi3(unsigned int, unsigned int); extern unsigned int __umodsi3(unsigned int, unsigned int); +extern unsigned long long __umulsidi3(unsigned int, unsigned int); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); @@ -71,6 +72,7 @@ EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); +EXPORT_SYMBOL(__umulsidi3); unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { diff --git a/arch/xtensa/lib/Makefile b/arch/xtensa/lib/Makefile index d4e9c397e3fdefb35fcd4a57953b04b2d7149007..7ecef0519a27caac48855896aa6042959f315c56 100644 --- a/arch/xtensa/lib/Makefile +++ b/arch/xtensa/lib/Makefile @@ -5,7 +5,7 @@ lib-y += memcopy.o memset.o checksum.o \ ashldi3.o ashrdi3.o lshrdi3.o \ - divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \ + divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \ usercopy.o strncpy_user.o strnlen_user.o lib-$(CONFIG_PCI) += pci-auto.o lib-$(CONFIG_KCSAN) += kcsan-stubs.o diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S new file mode 100644 index 0000000000000000000000000000000000000000..13608164794274250c02b703edfc7ceb49ef5e97 --- /dev/null +++ b/arch/xtensa/lib/umulsidi3.S @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */ +#include +#include +#include + +#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 +#define XCHAL_NO_MUL 1 +#endif + +ENTRY(__umulsidi3) + +#ifdef __XTENSA_CALL0_ABI__ + abi_entry(32) + s32i a12, sp, 16 + s32i a13, sp, 20 + s32i a14, sp, 24 + s32i a15, sp, 28 +#elif XCHAL_NO_MUL + /* This is not really a leaf function; allocate enough stack space + to allow CALL12s to a helper function. */ + abi_entry(32) +#else + abi_entry_default +#endif + +#ifdef __XTENSA_EB__ +#define wh a2 +#define wl a3 +#else +#define wh a3 +#define wl a2 +#endif /* __XTENSA_EB__ */ + + /* This code is taken from the mulsf3 routine in ieee754-sf.S. + See more comments there. */ + +#if XCHAL_HAVE_MUL32_HIGH + mull a6, a2, a3 + muluh wh, a2, a3 + mov wl, a6 + +#else /* ! MUL32_HIGH */ + +#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL + /* a0 and a8 will be clobbered by calling the multiply function + but a8 is not used here and need not be saved. */ + s32i a0, sp, 0 +#endif + +#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 + +#define a2h a4 +#define a3h a5 + + /* Get the high halves of the inputs into registers. */ + srli a2h, a2, 16 + srli a3h, a3, 16 + +#define a2l a2 +#define a3l a3 + +#if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 + /* Clear the high halves of the inputs. This does not matter + for MUL16 because the high bits are ignored. */ + extui a2, a2, 0, 16 + extui a3, a3, 0, 16 +#endif +#endif /* MUL16 || MUL32 */ + + +#if XCHAL_HAVE_MUL16 + +#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ + mul16u dst, xreg ## xhalf, yreg ## yhalf + +#elif XCHAL_HAVE_MUL32 + +#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ + mull dst, xreg ## xhalf, yreg ## yhalf + +#elif XCHAL_HAVE_MAC16 + +/* The preprocessor insists on inserting a space when concatenating after + a period in the definition of do_mul below. These macros are a workaround + using underscores instead of periods when doing the concatenation. */ +#define umul_aa_ll umul.aa.ll +#define umul_aa_lh umul.aa.lh +#define umul_aa_hl umul.aa.hl +#define umul_aa_hh umul.aa.hh + +#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ + umul_aa_ ## xhalf ## yhalf xreg, yreg; \ + rsr dst, ACCLO + +#else /* no multiply hardware */ + +#define set_arg_l(dst, src) \ + extui dst, src, 0, 16 +#define set_arg_h(dst, src) \ + srli dst, src, 16 + +#ifdef __XTENSA_CALL0_ABI__ +#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ + set_arg_ ## xhalf (a13, xreg); \ + set_arg_ ## yhalf (a14, yreg); \ + call0 .Lmul_mulsi3; \ + mov dst, a12 +#else +#define do_mul(dst, xreg, xhalf, yreg, yhalf) \ + set_arg_ ## xhalf (a14, xreg); \ + set_arg_ ## yhalf (a15, yreg); \ + call12 .Lmul_mulsi3; \ + mov dst, a14 +#endif /* __XTENSA_CALL0_ABI__ */ + +#endif /* no multiply hardware */ + + /* Add pp1 and pp2 into a6 with carry-out in a9. */ + do_mul(a6, a2, l, a3, h) /* pp 1 */ + do_mul(a11, a2, h, a3, l) /* pp 2 */ + movi a9, 0 + add a6, a6, a11 + bgeu a6, a11, 1f + addi a9, a9, 1 +1: + /* Shift the high half of a9/a6 into position in a9. Note that + this value can be safely incremented without any carry-outs. */ + ssai 16 + src a9, a9, a6 + + /* Compute the low word into a6. */ + do_mul(a11, a2, l, a3, l) /* pp 0 */ + sll a6, a6 + add a6, a6, a11 + bgeu a6, a11, 1f + addi a9, a9, 1 +1: + /* Compute the high word into wh. */ + do_mul(wh, a2, h, a3, h) /* pp 3 */ + add wh, wh, a9 + mov wl, a6 + +#endif /* !MUL32_HIGH */ + +#if defined(__XTENSA_CALL0_ABI__) && XCHAL_NO_MUL + /* Restore the original return address. */ + l32i a0, sp, 0 +#endif +#ifdef __XTENSA_CALL0_ABI__ + l32i a12, sp, 16 + l32i a13, sp, 20 + l32i a14, sp, 24 + l32i a15, sp, 28 + abi_ret(32) +#else + abi_ret_default +#endif + +#if XCHAL_NO_MUL + + .macro do_addx2 dst, as, at, tmp +#if XCHAL_HAVE_ADDX + addx2 \dst, \as, \at +#else + slli \tmp, \as, 1 + add \dst, \tmp, \at +#endif + .endm + + .macro do_addx4 dst, as, at, tmp +#if XCHAL_HAVE_ADDX + addx4 \dst, \as, \at +#else + slli \tmp, \as, 2 + add \dst, \tmp, \at +#endif + .endm + + .macro do_addx8 dst, as, at, tmp +#if XCHAL_HAVE_ADDX + addx8 \dst, \as, \at +#else + slli \tmp, \as, 3 + add \dst, \tmp, \at +#endif + .endm + + /* For Xtensa processors with no multiply hardware, this simplified + version of _mulsi3 is used for multiplying 16-bit chunks of + the floating-point mantissas. When using CALL0, this function + uses a custom ABI: the inputs are passed in a13 and a14, the + result is returned in a12, and a8 and a15 are clobbered. */ + .align 4 +.Lmul_mulsi3: + abi_entry_default + + .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 + movi \dst, 0 +1: add \tmp1, \src2, \dst + extui \tmp2, \src1, 0, 1 + movnez \dst, \tmp1, \tmp2 + + do_addx2 \tmp1, \src2, \dst, \tmp1 + extui \tmp2, \src1, 1, 1 + movnez \dst, \tmp1, \tmp2 + + do_addx4 \tmp1, \src2, \dst, \tmp1 + extui \tmp2, \src1, 2, 1 + movnez \dst, \tmp1, \tmp2 + + do_addx8 \tmp1, \src2, \dst, \tmp1 + extui \tmp2, \src1, 3, 1 + movnez \dst, \tmp1, \tmp2 + + srli \src1, \src1, 4 + slli \src2, \src2, 4 + bnez \src1, 1b + .endm + +#ifdef __XTENSA_CALL0_ABI__ + mul_mulsi3_body a12, a13, a14, a15, a8 +#else + /* The result will be written into a2, so save that argument in a4. */ + mov a4, a2 + mul_mulsi3_body a2, a4, a3, a5, a6 +#endif + abi_ret_default +#endif /* XCHAL_NO_MUL */ + +ENDPROC(__umulsidi3) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 7ea427817f7f5fdcff8630cced9e67023d548b19..7b894df32e3208b05e0a0b2409f684cfe8c349de 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq); void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) { + struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; + + /* Clear bic pointer if bfqq is detached from this bic */ + if (old_bfqq && old_bfqq->bic == bic) + old_bfqq->bic = NULL; + /* * If bfqq != NULL, then a non-stable queue merge between * bic->bfqq and bfqq is happening here. This causes troubles @@ -5377,9 +5383,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) unsigned long flags; spin_lock_irqsave(&bfqd->lock, flags); - bfqq->bic = NULL; - bfq_exit_bfqq(bfqd, bfqq); bic_set_bfqq(bic, NULL, is_sync); + bfq_exit_bfqq(bfqd, bfqq); spin_unlock_irqrestore(&bfqd->lock, flags); } } @@ -6784,6 +6789,12 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, true, is_sync, NULL); + if (unlikely(bfqq == &bfqd->oom_bfqq)) + bfqq_already_existing = true; + } else + bfqq_already_existing = true; + + if (!bfqq_already_existing) { bfqq->waker_bfqq = old_bfqq->waker_bfqq; bfqq->tentative_waker_bfqq = NULL; @@ -6797,8 +6808,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq) if (bfqq->waker_bfqq) hlist_add_head(&bfqq->woken_list_node, &bfqq->waker_bfqq->woken_list); - } else - bfqq_already_existing = true; + } } } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ed761c62ad0a72b581d58f44d4be22194e003901..fcf9cf49f5de1f5208ee6302635a4bee99168839 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -33,6 +33,7 @@ #include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-throttle.h" +#include "blk-rq-qos.h" /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. @@ -1275,6 +1276,7 @@ int blkcg_init_disk(struct gendisk *disk) void blkcg_exit_disk(struct gendisk *disk) { blkg_destroy_all(disk); + rq_qos_exit(disk->queue); blk_throtl_exit(disk); } diff --git a/block/blk-merge.c b/block/blk-merge.c index ff04e9290715a6981cacedd1981af6b2d8ee3792..f46c87ef951df46aa0610ce6fb44409e82c3f799 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -300,6 +300,16 @@ static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim, *segs = nsegs; return NULL; split: + /* + * We can't sanely support splitting for a REQ_NOWAIT bio. End it + * with EAGAIN if splitting is required and return an error pointer. + */ + if (bio->bi_opf & REQ_NOWAIT) { + bio->bi_status = BLK_STS_AGAIN; + bio_endio(bio); + return ERR_PTR(-EAGAIN); + } + *segs = nsegs; /* diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 93997d297d4274fafd0b35944f30a9604127cb62..4515288fbe3518ac07cc1cee473741954084e7f8 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -185,7 +185,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; - int i, ret; + int i, j, ret; if (!hctx->nr_ctx) return 0; @@ -197,9 +197,16 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) hctx_for_each_ctx(hctx, ctx, i) { ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); if (ret) - break; + goto out; } + return 0; +out: + hctx_for_each_ctx(hctx, ctx, j) { + if (j < i) + kobject_del(&ctx->kobj); + } + kobject_del(&hctx->kobj); return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 228a6696d8351894625f196705eba01e376cc29a..0b855e033a8348f0b3748bb358ec2e304a551f5c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1529,7 +1529,13 @@ static void blk_mq_rq_timed_out(struct request *req) blk_add_timer(req); } -static bool blk_mq_req_expired(struct request *rq, unsigned long *next) +struct blk_expired_data { + bool has_timedout_rq; + unsigned long next; + unsigned long timeout_start; +}; + +static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) { unsigned long deadline; @@ -1539,13 +1545,13 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) return false; deadline = READ_ONCE(rq->deadline); - if (time_after_eq(jiffies, deadline)) + if (time_after_eq(expired->timeout_start, deadline)) return true; - if (*next == 0) - *next = deadline; - else if (time_after(*next, deadline)) - *next = deadline; + if (expired->next == 0) + expired->next = deadline; + else if (time_after(expired->next, deadline)) + expired->next = deadline; return false; } @@ -1561,7 +1567,7 @@ void blk_mq_put_rq_ref(struct request *rq) static bool blk_mq_check_expired(struct request *rq, void *priv) { - unsigned long *next = priv; + struct blk_expired_data *expired = priv; /* * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot @@ -1570,7 +1576,18 @@ static bool blk_mq_check_expired(struct request *rq, void *priv) * it was completed and reallocated as a new request after returning * from blk_mq_check_expired(). */ - if (blk_mq_req_expired(rq, next)) + if (blk_mq_req_expired(rq, expired)) { + expired->has_timedout_rq = true; + return false; + } + return true; +} + +static bool blk_mq_handle_expired(struct request *rq, void *priv) +{ + struct blk_expired_data *expired = priv; + + if (blk_mq_req_expired(rq, expired)) blk_mq_rq_timed_out(rq); return true; } @@ -1579,7 +1596,9 @@ static void blk_mq_timeout_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, timeout_work); - unsigned long next = 0; + struct blk_expired_data expired = { + .timeout_start = jiffies, + }; struct blk_mq_hw_ctx *hctx; unsigned long i; @@ -1599,10 +1618,23 @@ static void blk_mq_timeout_work(struct work_struct *work) if (!percpu_ref_tryget(&q->q_usage_counter)) return; - blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); + /* check if there is any timed-out request */ + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); + if (expired.has_timedout_rq) { + /* + * Before walking tags, we must ensure any submit started + * before the current time has finished. Since the submit + * uses srcu or rcu, wait for a synchronization point to + * ensure all running submits have finished + */ + blk_mq_wait_quiesce_done(q); + + expired.next = 0; + blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); + } - if (next != 0) { - mod_timer(&q->timeout, next); + if (expired.next != 0) { + mod_timer(&q->timeout, expired.next); } else { /* * Request timeouts are handled as a forward rolling timer. If diff --git a/block/blk.h b/block/blk.h index a186ea20f39d8a50721a2e4c4b6bd86db6bd6a0d..8b75a95b28d60657603d4f9c56560e694260b5e3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu) } struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode); +int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner); int disk_alloc_events(struct gendisk *disk); void disk_add_events(struct gendisk *disk); diff --git a/block/genhd.c b/block/genhd.c index 0f9769db2de83405352dab021dacbe3f0ca91d0c..c4765681a8b4b04ed6aac9dbad75d481a35e72c8 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -356,7 +356,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action) } EXPORT_SYMBOL_GPL(disk_uevent); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode) +int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner) { struct block_device *bdev; @@ -366,6 +366,9 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode) return -EINVAL; if (disk->open_partitions) return -EBUSY; + /* Someone else has bdev exclusively open? */ + if (disk->part0->bd_holder && disk->part0->bd_holder != owner) + return -EBUSY; set_bit(GD_NEED_PART_SCAN, &disk->state); bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL); @@ -500,7 +503,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, bdev_add(disk->part0, ddev->devt); if (get_capacity(disk)) - disk_scan_partitions(disk, FMODE_READ); + disk_scan_partitions(disk, FMODE_READ, NULL); /* * Announce the disk and partitions after all partitions are @@ -530,6 +533,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, rq_qos_exit(disk->queue); out_put_slave_dir: kobject_put(disk->slave_dir); + disk->slave_dir = NULL; out_put_holder_dir: kobject_put(disk->part0->bd_holder_dir); out_del_integrity: @@ -629,6 +633,7 @@ void del_gendisk(struct gendisk *disk) kobject_put(disk->part0->bd_holder_dir); kobject_put(disk->slave_dir); + disk->slave_dir = NULL; part_stat_set_all(disk->part0, 0); disk->part0->bd_stamp = 0; diff --git a/block/ioctl.c b/block/ioctl.c index 60121e89052bcf03b987bcf62ac36e9456dcb6d2..96617512982e57d98a6372400df94114fe4c2447 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -467,9 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, * user space. Note the separate arg/argp parameters that are needed * to deal with the compat_ptr() conversion. */ -static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long arg, void __user *argp) +static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd, + unsigned long arg, void __user *argp) { + struct block_device *bdev = I_BDEV(file->f_mapping->host); unsigned int max_sectors; switch (cmd) { @@ -527,7 +528,8 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, return -EACCES; if (bdev_is_partition(bdev)) return -EINVAL; - return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL); + return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL, + file); case BLKTRACESTART: case BLKTRACESTOP: case BLKTRACETEARDOWN: @@ -605,7 +607,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) break; } - ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); + ret = blkdev_common_ioctl(file, mode, cmd, arg, argp); if (ret != -ENOIOCTLCMD) return ret; @@ -674,7 +676,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) break; } - ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); + ret = blkdev_common_ioctl(file, mode, cmd, arg, argp); if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 5639921dfa922a51699c376e000703fd5d0ecc07..6672f1bce3795bbafb2ed92251c6dc59b8ee679f 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -130,6 +130,20 @@ static u8 dd_rq_ioclass(struct request *rq) return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); } +/* + * get the request before `rq' in sector-sorted order + */ +static inline struct request * +deadline_earlier_request(struct request *rq) +{ + struct rb_node *node = rb_prev(&rq->rb_node); + + if (node) + return rb_entry_rq(node); + + return NULL; +} + /* * get the request after `rq' in sector-sorted order */ @@ -277,6 +291,39 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio, return 0; } +/* + * Check if rq has a sequential request preceding it. + */ +static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq) +{ + struct request *prev = deadline_earlier_request(rq); + + if (!prev) + return false; + + return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); +} + +/* + * Skip all write requests that are sequential from @rq, even if we cross + * a zone boundary. + */ +static struct request *deadline_skip_seq_writes(struct deadline_data *dd, + struct request *rq) +{ + sector_t pos = blk_rq_pos(rq); + sector_t skipped_sectors = 0; + + while (rq) { + if (blk_rq_pos(rq) != pos + skipped_sectors) + break; + skipped_sectors += blk_rq_sectors(rq); + rq = deadline_latter_request(rq); + } + + return rq; +} + /* * For the specified data direction, return the next request to * dispatch using arrival ordered lists. @@ -297,11 +344,16 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* * Look for a write request that can be dispatched, that is one with - * an unlocked target zone. + * an unlocked target zone. For some HDDs, breaking a sequential + * write stream can lead to lower throughput, so make sure to preserve + * sequential write streams, even if that stream crosses into the next + * zones and these zones are unlocked. */ spin_lock_irqsave(&dd->zone_lock, flags); list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { - if (blk_req_can_dispatch_to_zone(rq)) + if (blk_req_can_dispatch_to_zone(rq) && + (blk_queue_nonrot(rq->q) || + !deadline_is_seq_writes(dd, rq))) goto out; } rq = NULL; @@ -331,13 +383,19 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* * Look for a write request that can be dispatched, that is one with - * an unlocked target zone. + * an unlocked target zone. For some HDDs, breaking a sequential + * write stream can lead to lower throughput, so make sure to preserve + * sequential write streams, even if that stream crosses into the next + * zones and these zones are unlocked. */ spin_lock_irqsave(&dd->zone_lock, flags); while (rq) { if (blk_req_can_dispatch_to_zone(rq)) break; - rq = deadline_latter_request(rq); + if (blk_queue_nonrot(rq->q)) + rq = deadline_latter_request(rq); + else + rq = deadline_skip_seq_writes(dd, rq); } spin_unlock_irqrestore(&dd->zone_lock, flags); @@ -789,6 +847,18 @@ static void dd_prepare_request(struct request *rq) rq->elv.priv[0] = NULL; } +static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx) +{ + struct deadline_data *dd = hctx->queue->elevator->elevator_data; + enum dd_prio p; + + for (p = 0; p <= DD_PRIO_MAX; p++) + if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE])) + return true; + + return false; +} + /* * Callback from inside blk_mq_free_request(). * @@ -828,9 +898,10 @@ static void dd_finish_request(struct request *rq) spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); - if (!list_empty(&per_prio->fifo_list[DD_WRITE])) - blk_mq_sched_mark_restart_hctx(rq->mq_hctx); spin_unlock_irqrestore(&dd->zone_lock, flags); + + if (dd_has_write_work(rq->mq_hctx)) + blk_mq_sched_mark_restart_hctx(rq->mq_hctx); } } diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 668095eca0fafb52137e33ee70a97c2695b3180b..ca3a40fc7da911816b956e9aa37d6534592c493d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -68,11 +68,12 @@ struct aead_instance_ctx { struct cryptd_skcipher_ctx { refcount_t refcnt; - struct crypto_sync_skcipher *child; + struct crypto_skcipher *child; }; struct cryptd_skcipher_request_ctx { crypto_completion_t complete; + struct skcipher_request req; }; struct cryptd_hash_ctx { @@ -227,13 +228,13 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_sync_skcipher *child = ctx->child; + struct crypto_skcipher *child = ctx->child; - crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_sync_skcipher_set_flags(child, - crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - return crypto_sync_skcipher_setkey(child, key, keylen); + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, + crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + return crypto_skcipher_setkey(child, key, keylen); } static void cryptd_skcipher_complete(struct skcipher_request *req, int err) @@ -258,13 +259,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -286,13 +287,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base, struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_sync_skcipher *child = ctx->child; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); + struct skcipher_request *subreq = &rctx->req; + struct crypto_skcipher *child = ctx->child; if (unlikely(err == -EINPROGRESS)) goto out; - skcipher_request_set_sync_tfm(subreq, child); + skcipher_request_set_tfm(subreq, child); skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, @@ -343,9 +344,10 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = (struct crypto_sync_skcipher *)cipher; + ctx->child = cipher; crypto_skcipher_set_reqsize( - tfm, sizeof(struct cryptd_skcipher_request_ctx)); + tfm, sizeof(struct cryptd_skcipher_request_ctx) + + crypto_skcipher_reqsize(cipher)); return 0; } @@ -353,7 +355,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_sync_skcipher(ctx->child); + crypto_free_skcipher(ctx->child); } static void cryptd_skcipher_free(struct skcipher_instance *inst) @@ -931,7 +933,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) { struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); - return &ctx->child->base; + return ctx->child; } EXPORT_SYMBOL_GPL(cryptd_skcipher_child); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index a82679b576bb4381771a0f87ec0e401ef219a68e..b23235d58a122268c4f5cd95f787f9dd6c758f01 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1090,15 +1090,6 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, goto out_free_tfm; } - - for (i = 0; i < num_mb; ++i) - if (testmgr_alloc_buf(data[i].xbuf)) { - while (i--) - testmgr_free_buf(data[i].xbuf); - goto out_free_tfm; - } - - for (i = 0; i < num_mb; ++i) { data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!data[i].req) { @@ -1471,387 +1462,387 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) } for (i = 1; i < 200; i++) - ret += do_test(NULL, 0, 0, i, num_mb); + ret = min(ret, do_test(NULL, 0, 0, i, num_mb)); break; case 1: - ret += tcrypt_test("md5"); + ret = min(ret, tcrypt_test("md5")); break; case 2: - ret += tcrypt_test("sha1"); + ret = min(ret, tcrypt_test("sha1")); break; case 3: - ret += tcrypt_test("ecb(des)"); - ret += tcrypt_test("cbc(des)"); - ret += tcrypt_test("ctr(des)"); + ret = min(ret, tcrypt_test("ecb(des)")); + ret = min(ret, tcrypt_test("cbc(des)")); + ret = min(ret, tcrypt_test("ctr(des)")); break; case 4: - ret += tcrypt_test("ecb(des3_ede)"); - ret += tcrypt_test("cbc(des3_ede)"); - ret += tcrypt_test("ctr(des3_ede)"); + ret = min(ret, tcrypt_test("ecb(des3_ede)")); + ret = min(ret, tcrypt_test("cbc(des3_ede)")); + ret = min(ret, tcrypt_test("ctr(des3_ede)")); break; case 5: - ret += tcrypt_test("md4"); + ret = min(ret, tcrypt_test("md4")); break; case 6: - ret += tcrypt_test("sha256"); + ret = min(ret, tcrypt_test("sha256")); break; case 7: - ret += tcrypt_test("ecb(blowfish)"); - ret += tcrypt_test("cbc(blowfish)"); - ret += tcrypt_test("ctr(blowfish)"); + ret = min(ret, tcrypt_test("ecb(blowfish)")); + ret = min(ret, tcrypt_test("cbc(blowfish)")); + ret = min(ret, tcrypt_test("ctr(blowfish)")); break; case 8: - ret += tcrypt_test("ecb(twofish)"); - ret += tcrypt_test("cbc(twofish)"); - ret += tcrypt_test("ctr(twofish)"); - ret += tcrypt_test("lrw(twofish)"); - ret += tcrypt_test("xts(twofish)"); + ret = min(ret, tcrypt_test("ecb(twofish)")); + ret = min(ret, tcrypt_test("cbc(twofish)")); + ret = min(ret, tcrypt_test("ctr(twofish)")); + ret = min(ret, tcrypt_test("lrw(twofish)")); + ret = min(ret, tcrypt_test("xts(twofish)")); break; case 9: - ret += tcrypt_test("ecb(serpent)"); - ret += tcrypt_test("cbc(serpent)"); - ret += tcrypt_test("ctr(serpent)"); - ret += tcrypt_test("lrw(serpent)"); - ret += tcrypt_test("xts(serpent)"); + ret = min(ret, tcrypt_test("ecb(serpent)")); + ret = min(ret, tcrypt_test("cbc(serpent)")); + ret = min(ret, tcrypt_test("ctr(serpent)")); + ret = min(ret, tcrypt_test("lrw(serpent)")); + ret = min(ret, tcrypt_test("xts(serpent)")); break; case 10: - ret += tcrypt_test("ecb(aes)"); - ret += tcrypt_test("cbc(aes)"); - ret += tcrypt_test("lrw(aes)"); - ret += tcrypt_test("xts(aes)"); - ret += tcrypt_test("ctr(aes)"); - ret += tcrypt_test("rfc3686(ctr(aes))"); - ret += tcrypt_test("ofb(aes)"); - ret += tcrypt_test("cfb(aes)"); - ret += tcrypt_test("xctr(aes)"); + ret = min(ret, tcrypt_test("ecb(aes)")); + ret = min(ret, tcrypt_test("cbc(aes)")); + ret = min(ret, tcrypt_test("lrw(aes)")); + ret = min(ret, tcrypt_test("xts(aes)")); + ret = min(ret, tcrypt_test("ctr(aes)")); + ret = min(ret, tcrypt_test("rfc3686(ctr(aes))")); + ret = min(ret, tcrypt_test("ofb(aes)")); + ret = min(ret, tcrypt_test("cfb(aes)")); + ret = min(ret, tcrypt_test("xctr(aes)")); break; case 11: - ret += tcrypt_test("sha384"); + ret = min(ret, tcrypt_test("sha384")); break; case 12: - ret += tcrypt_test("sha512"); + ret = min(ret, tcrypt_test("sha512")); break; case 13: - ret += tcrypt_test("deflate"); + ret = min(ret, tcrypt_test("deflate")); break; case 14: - ret += tcrypt_test("ecb(cast5)"); - ret += tcrypt_test("cbc(cast5)"); - ret += tcrypt_test("ctr(cast5)"); + ret = min(ret, tcrypt_test("ecb(cast5)")); + ret = min(ret, tcrypt_test("cbc(cast5)")); + ret = min(ret, tcrypt_test("ctr(cast5)")); break; case 15: - ret += tcrypt_test("ecb(cast6)"); - ret += tcrypt_test("cbc(cast6)"); - ret += tcrypt_test("ctr(cast6)"); - ret += tcrypt_test("lrw(cast6)"); - ret += tcrypt_test("xts(cast6)"); + ret = min(ret, tcrypt_test("ecb(cast6)")); + ret = min(ret, tcrypt_test("cbc(cast6)")); + ret = min(ret, tcrypt_test("ctr(cast6)")); + ret = min(ret, tcrypt_test("lrw(cast6)")); + ret = min(ret, tcrypt_test("xts(cast6)")); break; case 16: - ret += tcrypt_test("ecb(arc4)"); + ret = min(ret, tcrypt_test("ecb(arc4)")); break; case 17: - ret += tcrypt_test("michael_mic"); + ret = min(ret, tcrypt_test("michael_mic")); break; case 18: - ret += tcrypt_test("crc32c"); + ret = min(ret, tcrypt_test("crc32c")); break; case 19: - ret += tcrypt_test("ecb(tea)"); + ret = min(ret, tcrypt_test("ecb(tea)")); break; case 20: - ret += tcrypt_test("ecb(xtea)"); + ret = min(ret, tcrypt_test("ecb(xtea)")); break; case 21: - ret += tcrypt_test("ecb(khazad)"); + ret = min(ret, tcrypt_test("ecb(khazad)")); break; case 22: - ret += tcrypt_test("wp512"); + ret = min(ret, tcrypt_test("wp512")); break; case 23: - ret += tcrypt_test("wp384"); + ret = min(ret, tcrypt_test("wp384")); break; case 24: - ret += tcrypt_test("wp256"); + ret = min(ret, tcrypt_test("wp256")); break; case 26: - ret += tcrypt_test("ecb(anubis)"); - ret += tcrypt_test("cbc(anubis)"); + ret = min(ret, tcrypt_test("ecb(anubis)")); + ret = min(ret, tcrypt_test("cbc(anubis)")); break; case 30: - ret += tcrypt_test("ecb(xeta)"); + ret = min(ret, tcrypt_test("ecb(xeta)")); break; case 31: - ret += tcrypt_test("pcbc(fcrypt)"); + ret = min(ret, tcrypt_test("pcbc(fcrypt)")); break; case 32: - ret += tcrypt_test("ecb(camellia)"); - ret += tcrypt_test("cbc(camellia)"); - ret += tcrypt_test("ctr(camellia)"); - ret += tcrypt_test("lrw(camellia)"); - ret += tcrypt_test("xts(camellia)"); + ret = min(ret, tcrypt_test("ecb(camellia)")); + ret = min(ret, tcrypt_test("cbc(camellia)")); + ret = min(ret, tcrypt_test("ctr(camellia)")); + ret = min(ret, tcrypt_test("lrw(camellia)")); + ret = min(ret, tcrypt_test("xts(camellia)")); break; case 33: - ret += tcrypt_test("sha224"); + ret = min(ret, tcrypt_test("sha224")); break; case 35: - ret += tcrypt_test("gcm(aes)"); + ret = min(ret, tcrypt_test("gcm(aes)")); break; case 36: - ret += tcrypt_test("lzo"); + ret = min(ret, tcrypt_test("lzo")); break; case 37: - ret += tcrypt_test("ccm(aes)"); + ret = min(ret, tcrypt_test("ccm(aes)")); break; case 38: - ret += tcrypt_test("cts(cbc(aes))"); + ret = min(ret, tcrypt_test("cts(cbc(aes))")); break; case 39: - ret += tcrypt_test("xxhash64"); + ret = min(ret, tcrypt_test("xxhash64")); break; case 40: - ret += tcrypt_test("rmd160"); + ret = min(ret, tcrypt_test("rmd160")); break; case 42: - ret += tcrypt_test("blake2b-512"); + ret = min(ret, tcrypt_test("blake2b-512")); break; case 43: - ret += tcrypt_test("ecb(seed)"); + ret = min(ret, tcrypt_test("ecb(seed)")); break; case 45: - ret += tcrypt_test("rfc4309(ccm(aes))"); + ret = min(ret, tcrypt_test("rfc4309(ccm(aes))")); break; case 46: - ret += tcrypt_test("ghash"); + ret = min(ret, tcrypt_test("ghash")); break; case 47: - ret += tcrypt_test("crct10dif"); + ret = min(ret, tcrypt_test("crct10dif")); break; case 48: - ret += tcrypt_test("sha3-224"); + ret = min(ret, tcrypt_test("sha3-224")); break; case 49: - ret += tcrypt_test("sha3-256"); + ret = min(ret, tcrypt_test("sha3-256")); break; case 50: - ret += tcrypt_test("sha3-384"); + ret = min(ret, tcrypt_test("sha3-384")); break; case 51: - ret += tcrypt_test("sha3-512"); + ret = min(ret, tcrypt_test("sha3-512")); break; case 52: - ret += tcrypt_test("sm3"); + ret = min(ret, tcrypt_test("sm3")); break; case 53: - ret += tcrypt_test("streebog256"); + ret = min(ret, tcrypt_test("streebog256")); break; case 54: - ret += tcrypt_test("streebog512"); + ret = min(ret, tcrypt_test("streebog512")); break; case 55: - ret += tcrypt_test("gcm(sm4)"); + ret = min(ret, tcrypt_test("gcm(sm4)")); break; case 56: - ret += tcrypt_test("ccm(sm4)"); + ret = min(ret, tcrypt_test("ccm(sm4)")); break; case 57: - ret += tcrypt_test("polyval"); + ret = min(ret, tcrypt_test("polyval")); break; case 58: - ret += tcrypt_test("gcm(aria)"); + ret = min(ret, tcrypt_test("gcm(aria)")); break; case 100: - ret += tcrypt_test("hmac(md5)"); + ret = min(ret, tcrypt_test("hmac(md5)")); break; case 101: - ret += tcrypt_test("hmac(sha1)"); + ret = min(ret, tcrypt_test("hmac(sha1)")); break; case 102: - ret += tcrypt_test("hmac(sha256)"); + ret = min(ret, tcrypt_test("hmac(sha256)")); break; case 103: - ret += tcrypt_test("hmac(sha384)"); + ret = min(ret, tcrypt_test("hmac(sha384)")); break; case 104: - ret += tcrypt_test("hmac(sha512)"); + ret = min(ret, tcrypt_test("hmac(sha512)")); break; case 105: - ret += tcrypt_test("hmac(sha224)"); + ret = min(ret, tcrypt_test("hmac(sha224)")); break; case 106: - ret += tcrypt_test("xcbc(aes)"); + ret = min(ret, tcrypt_test("xcbc(aes)")); break; case 108: - ret += tcrypt_test("hmac(rmd160)"); + ret = min(ret, tcrypt_test("hmac(rmd160)")); break; case 109: - ret += tcrypt_test("vmac64(aes)"); + ret = min(ret, tcrypt_test("vmac64(aes)")); break; case 111: - ret += tcrypt_test("hmac(sha3-224)"); + ret = min(ret, tcrypt_test("hmac(sha3-224)")); break; case 112: - ret += tcrypt_test("hmac(sha3-256)"); + ret = min(ret, tcrypt_test("hmac(sha3-256)")); break; case 113: - ret += tcrypt_test("hmac(sha3-384)"); + ret = min(ret, tcrypt_test("hmac(sha3-384)")); break; case 114: - ret += tcrypt_test("hmac(sha3-512)"); + ret = min(ret, tcrypt_test("hmac(sha3-512)")); break; case 115: - ret += tcrypt_test("hmac(streebog256)"); + ret = min(ret, tcrypt_test("hmac(streebog256)")); break; case 116: - ret += tcrypt_test("hmac(streebog512)"); + ret = min(ret, tcrypt_test("hmac(streebog512)")); break; case 150: - ret += tcrypt_test("ansi_cprng"); + ret = min(ret, tcrypt_test("ansi_cprng")); break; case 151: - ret += tcrypt_test("rfc4106(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4106(gcm(aes))")); break; case 152: - ret += tcrypt_test("rfc4543(gcm(aes))"); + ret = min(ret, tcrypt_test("rfc4543(gcm(aes))")); break; case 153: - ret += tcrypt_test("cmac(aes)"); + ret = min(ret, tcrypt_test("cmac(aes)")); break; case 154: - ret += tcrypt_test("cmac(des3_ede)"); + ret = min(ret, tcrypt_test("cmac(des3_ede)")); break; case 155: - ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))")); break; case 156: - ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))")); break; case 157: - ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))")); break; case 158: - ret += tcrypt_test("cbcmac(sm4)"); + ret = min(ret, tcrypt_test("cbcmac(sm4)")); break; case 159: - ret += tcrypt_test("cmac(sm4)"); + ret = min(ret, tcrypt_test("cmac(sm4)")); break; case 181: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))")); break; case 182: - ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))")); break; case 183: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))")); break; case 184: - ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))")); break; case 185: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))")); break; case 186: - ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))")); break; case 187: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))")); break; case 188: - ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))")); break; case 189: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))")); break; case 190: - ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); + ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))")); break; case 191: - ret += tcrypt_test("ecb(sm4)"); - ret += tcrypt_test("cbc(sm4)"); - ret += tcrypt_test("cfb(sm4)"); - ret += tcrypt_test("ctr(sm4)"); + ret = min(ret, tcrypt_test("ecb(sm4)")); + ret = min(ret, tcrypt_test("cbc(sm4)")); + ret = min(ret, tcrypt_test("cfb(sm4)")); + ret = min(ret, tcrypt_test("ctr(sm4)")); break; case 192: - ret += tcrypt_test("ecb(aria)"); - ret += tcrypt_test("cbc(aria)"); - ret += tcrypt_test("cfb(aria)"); - ret += tcrypt_test("ctr(aria)"); + ret = min(ret, tcrypt_test("ecb(aria)")); + ret = min(ret, tcrypt_test("cbc(aria)")); + ret = min(ret, tcrypt_test("cfb(aria)")); + ret = min(ret, tcrypt_test("ctr(aria)")); break; case 200: test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 32953646caebf6a57bac1d367ad0d79ec104d822..0c79f463fbfd4bdd7e73b7002febc61756db05ba 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444); static int only_lcd = -1; module_param(only_lcd, int, 0444); -/* - * Display probing is known to take up to 5 seconds, so delay the fallback - * backlight registration by 5 seconds + 3 seconds for some extra margin. - */ -static int register_backlight_delay = 8; +static int register_backlight_delay; module_param(register_backlight_delay, int, 0444); MODULE_PARM_DESC(register_backlight_delay, "Delay in seconds before doing fallback (non GPU driver triggered) " @@ -2178,6 +2174,17 @@ static bool should_check_lcd_flag(void) return false; } +/* + * At least one graphics driver has reported that no LCD is connected + * via the native interface. cancel the registration for fallback acpi_video0. + * If another driver still deems this necessary, it can explicitly register it. + */ +void acpi_video_report_nolcd(void) +{ + cancel_delayed_work(&video_bus_register_backlight_work); +} +EXPORT_SYMBOL(acpi_video_report_nolcd); + int acpi_video_register(void) { int ret = 0; diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index ae2e768830bfc779fae6b928d72d3e6418c980d7..9332bc688713c4dee6f5c00afaaa7071788c5870 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -517,7 +517,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; - goto cleanup; + goto pop_walk_state; } info->parameters = &this_walk_state->operands[0]; @@ -529,7 +529,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ACPI_FREE(info); if (ACPI_FAILURE(status)) { - goto cleanup; + goto pop_walk_state; } next_walk_state->method_nesting_depth = @@ -575,6 +575,12 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(status); +pop_walk_state: + + /* On error, pop the walk state to be deleted from thread */ + + acpi_ds_pop_walk_state(thread); + cleanup: /* On error, we must terminate the method properly */ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index 400b9e15a709cee579adf9078d53caaf49e7ddb3..63c17f420fb86154281ab27cba84aa9c51304bfa 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -916,13 +916,6 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj, status = acpi_ut_walk_package_tree(source_obj, dest_obj, acpi_ut_copy_ielement_to_ielement, walk_state); - if (ACPI_FAILURE(status)) { - - /* On failure, delete the destination package object */ - - acpi_ut_remove_reference(dest_obj); - } - return_ACPI_STATUS(status); } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9b42628cf21b328963285e2c04b1e92d912fbb1a..9751b84c1b22193bf75e551a91f018ce8463573a 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1875,6 +1875,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"), }, }, + { + /* + * HP Pavilion Gaming Laptop 15-cx0041ur + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"), + }, + }, { /* * Samsung hardware diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 1cc4647f78b86d70875f84615b1c131e64d7a4b5..c2c786eb95abc1fed2a773cee1ebf0080c8de6c8 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -94,6 +94,7 @@ EXPORT_SYMBOL_GPL(acpi_unregister_gsi); /** * acpi_get_irq_source_fwhandle() - Retrieve fwhandle from IRQ resource source. * @source: acpi_resource_source to use for the lookup. + * @gsi: GSI IRQ number * * Description: * Retrieve the fwhandle of the device referenced by the given IRQ resource @@ -297,8 +298,8 @@ EXPORT_SYMBOL_GPL(acpi_irq_get); /** * acpi_set_irq_model - Setup the GSI irqdomain information * @model: the value assigned to acpi_irq_model - * @fwnode: the irq_domain identifier for mapping and looking up - * GSI interrupts + * @fn: a dispatcher function that will return the domain fwnode + * for a given GSI */ void __init acpi_set_irq_model(enum acpi_irq_model_id model, struct fwnode_handle *(*fn)(u32)) diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c index 9abf350bd7a5a28cc63d8960c395b02a0482b240..27fb6cdad75f978f055e87301c2540c576c8882a 100644 --- a/drivers/acpi/pfr_telemetry.c +++ b/drivers/acpi/pfr_telemetry.c @@ -144,7 +144,7 @@ static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info, ret = 0; free_acpi_buffer: - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } @@ -180,7 +180,7 @@ static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev) ret = -EBUSY; } - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } @@ -218,7 +218,7 @@ static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev) ret = obj->integer.value; free_acpi_buffer: - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c index 6bb0b778b5da527025772478e0f6d3113bac2607..9d2bdc13253a526c4008ec9af33d54de24ac747d 100644 --- a/drivers/acpi/pfr_update.c +++ b/drivers/acpi/pfr_update.c @@ -178,7 +178,7 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr, ret = 0; free_acpi_buffer: - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } @@ -224,7 +224,7 @@ static int query_buffer(struct pfru_com_buf_info *info, ret = 0; free_acpi_buffer: - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } @@ -385,7 +385,7 @@ static int start_update(int action, struct pfru_device *pfru_dev) ret = 0; free_acpi_buffer: - kfree(out_obj); + ACPI_FREE(out_obj); return ret; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index acfabfe07c4fa5f16c7b1b0df90bb3f19b98189a..fc5b5b2c9e8194e6de3ca51724bb847f15102e98 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1134,6 +1134,9 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) status = acpi_get_parent(handle, &pr_ahandle); while (ACPI_SUCCESS(status)) { d = acpi_fetch_acpi_dev(pr_ahandle); + if (!d) + break; + handle = pr_ahandle; if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID)) diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index f27914aedbd5ad9262c7ca3340e136847b72c090..16dcd31d124fe5370f3b2f83bdb8c3a12a3b3760 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + .ident = "Asus ExpertBook B2502", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), + }, + }, { } }; -static const struct dmi_system_id lenovo_82ra[] = { +static const struct dmi_system_id lenovo_laptop[] = { + { + .ident = "LENOVO IdeaPad Flex 5 14ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), + }, + }, { .ident = "LENOVO IdeaPad Flex 5 16ALC7", .matches = { @@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = { { } }; +static const struct dmi_system_id schenker_gm_rg[] = { + { + .ident = "XMG CORE 15 (M22)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -458,8 +483,9 @@ struct irq_override_cmp { static const struct irq_override_cmp override_table[] = { { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, - { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, - { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b2a61628763879a4aac329a8c18448ad8645af9c..76b7e7f8894e750a6aa5509eea3096a397e182d5 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void) } #endif +static bool apple_gmux_backlight_present(void) +{ + struct acpi_device *adev; + struct device *dev; + + adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1); + if (!adev) + return false; + + dev = acpi_get_first_physical_node(adev); + if (!dev) + return false; + + /* + * drivers/platform/x86/apple-gmux.c only supports old style + * Apple GMUX with an IO-resource. + */ + return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL; +} + /* Force to use vendor driver when the ACPI device is known to be * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) @@ -132,6 +153,10 @@ static int video_detect_force_none(const struct dmi_system_id *d) } static const struct dmi_system_id video_detect_dmi_table[] = { + /* + * Models which should use the vendor backlight interface, + * because of broken ACPI video backlight control. + */ { /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */ .callback = video_detect_force_vendor, @@ -197,14 +222,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), }, }, - { - .callback = video_detect_force_vendor, - /* GIGABYTE GB-BXBT-2807 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), - DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"), - }, - }, { .callback = video_detect_force_vendor, /* Samsung N150/N210/N220 */ @@ -234,18 +251,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_vendor, - /* Sony VPCEH3U1E */ + /* Xiaomi Mi Pad 2 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"), + DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), + DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), }, }, + + /* + * Models which should use the vendor backlight interface, + * because of broken native backlight control. + */ { .callback = video_detect_force_vendor, - /* Xiaomi Mi Pad 2 */ + /* Sony Vaio PCG-FRV35 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), - DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"), }, }, @@ -609,6 +631,23 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "N250P"), }, }, + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=202401 */ + .callback = video_detect_force_native, + /* Sony Vaio VPCEH3U1E */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"), + }, + }, + { + .callback = video_detect_force_native, + /* Sony Vaio VPCY11S1E */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCY11S1E"), + }, + }, /* * These Toshibas have a broken acpi-video interface for brightness @@ -671,6 +710,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"), }, }, + { + .callback = video_detect_force_none, + /* GIGABYTE GB-BXBT-2807 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"), + }, + }, { .callback = video_detect_force_none, /* MSI MS-7721 */ @@ -687,6 +734,16 @@ static bool google_cros_ec_present(void) return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C"); } +/* + * Windows 8 and newer no longer use the ACPI video interface, so it often + * does not work. So on win8+ systems prefer native brightness control. + * Chromebooks should always prefer native backlight control. + */ +static bool prefer_native_over_acpi_video(void) +{ + return acpi_osi_is_win8() || google_cros_ec_present(); +} + /* * Determine which type of backlight interface to use on this system, * First check cmdline, then dmi quirks, then do autodetect. @@ -729,31 +786,19 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) if (nvidia_wmi_ec_present) return acpi_backlight_nvidia_wmi_ec; - if (apple_gmux_present()) + if (apple_gmux_backlight_present()) return acpi_backlight_apple_gmux; - /* Chromebooks should always prefer native backlight control. */ - if (google_cros_ec_present() && native_available) - return acpi_backlight_native; + /* Use ACPI video if available, except when native should be preferred. */ + if ((video_caps & ACPI_VIDEO_BACKLIGHT) && + !(native_available && prefer_native_over_acpi_video())) + return acpi_backlight_video; - /* On systems with ACPI video use either native or ACPI video. */ - if (video_caps & ACPI_VIDEO_BACKLIGHT) { - /* - * Windows 8 and newer no longer use the ACPI video interface, - * so it often does not work. If the ACPI tables are written - * for win8 and native brightness ctl is available, use that. - * - * The native check deliberately is inside the if acpi-video - * block on older devices without acpi-video support native - * is usually not the best choice. - */ - if (acpi_osi_is_win8() && native_available) - return acpi_backlight_native; - else - return acpi_backlight_video; - } + /* Use native if available */ + if (native_available) + return acpi_backlight_native; - /* No ACPI video (old hw), use vendor specific fw methods. */ + /* No ACPI video/native (old hw), use vendor specific fw methods. */ return acpi_backlight_vendor; } @@ -765,18 +810,6 @@ EXPORT_SYMBOL(acpi_video_get_backlight_type); bool acpi_video_backlight_use_native(void) { - /* - * Call __acpi_video_get_backlight_type() to let it know that - * a native backlight is available. - */ - __acpi_video_get_backlight_type(true); - - /* - * For now just always return true. There is a whole bunch of laptop - * models where (video_caps & ACPI_VIDEO_BACKLIGHT) is false causing - * __acpi_video_get_backlight_type() to return vendor, while these - * models only have a native backlight control. - */ - return true; + return __acpi_video_get_backlight_type(true) == acpi_backlight_native; } EXPORT_SYMBOL(acpi_video_backlight_use_native); diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index 5350c73564b601fffa1f31ee566ae67a50d74de0..c7afce465a0710d0f57bfc37506857b6a8345830 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly; module_param(sleep_no_lps0, bool, 0644); MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface"); -static bool prefer_microsoft_dsm_guid __read_mostly; -module_param(prefer_microsoft_dsm_guid, bool, 0644); -MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation"); - static const struct acpi_device_id lps0_device_ids[] = { {"PNP0D80", }, {"", }, @@ -369,27 +365,15 @@ static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *d } struct amd_lps0_hid_device_data { - const unsigned int rev_id; const bool check_off_by_one; - const bool prefer_amd_guid; }; static const struct amd_lps0_hid_device_data amd_picasso = { - .rev_id = 0, .check_off_by_one = true, - .prefer_amd_guid = false, }; static const struct amd_lps0_hid_device_data amd_cezanne = { - .rev_id = 0, - .check_off_by_one = false, - .prefer_amd_guid = false, -}; - -static const struct amd_lps0_hid_device_data amd_rembrandt = { - .rev_id = 2, .check_off_by_one = false, - .prefer_amd_guid = true, }; static const struct acpi_device_id amd_hid_ids[] = { @@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = { {"AMD0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0005", (kernel_ulong_t)&amd_picasso, }, {"AMDI0006", (kernel_ulong_t)&amd_cezanne, }, - {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, }, {} }; -static int lps0_prefer_microsoft(const struct dmi_system_id *id) +static int lps0_prefer_amd(const struct dmi_system_id *id) { - pr_debug("Preferring Microsoft GUID.\n"); - prefer_microsoft_dsm_guid = true; + pr_debug("Using AMD GUID w/ _REV 2.\n"); + rev_id = 2; return 0; } - static const struct dmi_system_id s2idle_dmi_table[] __initconst = { { /* - * ASUS TUF Gaming A17 FA707RE - * https://bugzilla.kernel.org/show_bug.cgi?id=216101 - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"), - }, - }, - { - /* ASUS ROG Zephyrus G14 (2022) */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"), - }, - }, - { - /* - * Lenovo Yoga Slim 7 Pro X 14ARH7 - * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2 - * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82"), - }, - }, - { - /* - * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE - * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 + * AMD Rembrandt based HP EliteBook 835/845/865 G9 + * Contains specialized AML in AMD/_REV 2 path to avoid + * triggering a bug in Qualcomm WLAN firmware. This may be + * removed in the future if that firmware is fixed. */ - .callback = lps0_prefer_microsoft, + .callback = lps0_prefer_amd, .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"), - }, - }, - { - /* - * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW - * https://gitlab.freedesktop.org/drm/amd/-/issues/2148 - */ - .callback = lps0_prefer_microsoft, - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + DMI_MATCH(DMI_BOARD_NAME, "8990"), }, }, {} @@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev, if (dev_id->id[0]) data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data; else - data = &amd_rembrandt; - rev_id = data->rev_id; + data = &amd_cezanne; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid); if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) { lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1; acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n", ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask); - } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid && - !prefer_microsoft_dsm_guid) { + } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) { lps0_dsm_func_mask_microsoft = -EINVAL; acpi_handle_debug(adev->handle, "_DSM Using AMD method\n"); } @@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev, rev_id = 1; lps0_dsm_func_mask = validate_dsm(adev->handle, ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid); - if (!prefer_microsoft_dsm_guid) - lps0_dsm_func_mask_microsoft = -EINVAL; + lps0_dsm_func_mask_microsoft = -EINVAL; } if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0) diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index d7d3f1669d4c0a7a94dc855740c1dabc17f2f8fb..4e816bb402f68cce61dd4272a3b57310515e1155 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -308,7 +308,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, { - /* Lenovo Yoga Tablet 1050F/L */ + /* Lenovo Yoga Tablet 2 1050F/L */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), @@ -319,6 +319,27 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), }, + { + /* Lenovo Yoga Tab 3 Pro X90F */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, + { + /* Medion Lifetab S10346 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Way too generic, also match on BIOS data */ + DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + }, { /* Nextbook Ares 8 */ .matches = { @@ -348,6 +369,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = { { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */ { "INT33F4", 0 }, /* X-Powers AXP288 PMIC */ { "INT33FD", 0 }, /* Intel Crystal Cove PMIC */ + { "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */ { "NPCE69A", 0 }, /* Asus Transformer keyboard dock */ {} }; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 639de2d75d63644d2feb09e23e4bf4ce5795321c..53ab2306da009d0f664de0fdaebaa8e5b7859ebd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -84,6 +84,7 @@ enum board_ids { static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev); +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv); static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, @@ -677,6 +678,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, ahci_save_initial_config(&pdev->dev, hpriv); } +static int ahci_pci_reset_controller(struct ata_host *host) +{ + struct pci_dev *pdev = to_pci_dev(host->dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_reset_controller(host); + if (rc) + return rc; + + /* + * If platform firmware failed to enable ports, try to enable + * them here. + */ + ahci_intel_pcs_quirk(pdev, hpriv); + + return 0; +} + static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; @@ -871,7 +891,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev) struct ata_host *host = pci_get_drvdata(pdev); int rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; ahci_pci_init_controller(host); @@ -907,7 +927,7 @@ static int ahci_pci_device_resume(struct device *dev) ahci_mcp89_apple_enable(pdev); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; @@ -1785,12 +1805,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); - /* - * If platform firmware failed to enable ports, try to enable - * them here. - */ - ahci_intel_pcs_quirk(pdev, hpriv); - /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -1900,7 +1914,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - rc = ahci_reset_controller(host); + rc = ahci_pci_reset_controller(host); if (rc) return rc; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 7add8e79912b18170698e0433eaff7320cdb3219..ff8e6ae1c6362f2a26abde461910bd1a388f1655 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -24,6 +24,7 @@ #include #include #include +#include /* Enclosure Management Control */ #define EM_CTRL_MSG_TYPE 0x000f0000 @@ -53,12 +54,12 @@ enum { AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + (AHCI_RX_FIS_SZ * 16), - AHCI_IRQ_ON_SG = (1 << 31), - AHCI_CMD_ATAPI = (1 << 5), - AHCI_CMD_WRITE = (1 << 6), - AHCI_CMD_PREFETCH = (1 << 7), - AHCI_CMD_RESET = (1 << 8), - AHCI_CMD_CLR_BUSY = (1 << 10), + AHCI_IRQ_ON_SG = BIT(31), + AHCI_CMD_ATAPI = BIT(5), + AHCI_CMD_WRITE = BIT(6), + AHCI_CMD_PREFETCH = BIT(7), + AHCI_CMD_RESET = BIT(8), + AHCI_CMD_CLR_BUSY = BIT(10), RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ @@ -76,37 +77,37 @@ enum { HOST_CAP2 = 0x24, /* host capabilities, extended */ /* HOST_CTL bits */ - HOST_RESET = (1 << 0), /* reset controller; self-clear */ - HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ - HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */ - HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ + HOST_RESET = BIT(0), /* reset controller; self-clear */ + HOST_IRQ_EN = BIT(1), /* global IRQ enable */ + HOST_MRSM = BIT(2), /* MSI Revert to Single Message */ + HOST_AHCI_EN = BIT(31), /* AHCI enabled */ /* HOST_CAP bits */ - HOST_CAP_SXS = (1 << 5), /* Supports External SATA */ - HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ - HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */ - HOST_CAP_PART = (1 << 13), /* Partial state capable */ - HOST_CAP_SSC = (1 << 14), /* Slumber state capable */ - HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */ - HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */ - HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ - HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */ - HOST_CAP_CLO = (1 << 24), /* Command List Override support */ - HOST_CAP_LED = (1 << 25), /* Supports activity LED */ - HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ - HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ - HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */ - HOST_CAP_SNTF = (1 << 29), /* SNotification register */ - HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ - HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ + HOST_CAP_SXS = BIT(5), /* Supports External SATA */ + HOST_CAP_EMS = BIT(6), /* Enclosure Management support */ + HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */ + HOST_CAP_PART = BIT(13), /* Partial state capable */ + HOST_CAP_SSC = BIT(14), /* Slumber state capable */ + HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */ + HOST_CAP_FBS = BIT(16), /* FIS-based switching support */ + HOST_CAP_PMP = BIT(17), /* Port Multiplier support */ + HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */ + HOST_CAP_CLO = BIT(24), /* Command List Override support */ + HOST_CAP_LED = BIT(25), /* Supports activity LED */ + HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */ + HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */ + HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */ + HOST_CAP_SNTF = BIT(29), /* SNotification register */ + HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */ + HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */ /* HOST_CAP2 bits */ - HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */ - HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */ - HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */ - HOST_CAP2_SDS = (1 << 3), /* Support device sleep */ - HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */ - HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */ + HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */ + HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */ + HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */ + HOST_CAP2_SDS = BIT(3), /* Support device sleep */ + HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */ + HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */ /* registers for each SATA port */ PORT_LST_ADDR = 0x00, /* command list DMA addr */ @@ -128,24 +129,24 @@ enum { PORT_DEVSLP = 0x44, /* device sleep */ /* PORT_IRQ_{STAT,MASK} bits */ - PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ - PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ - PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ - PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ - PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ - PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ - PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ - PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ - - PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ - PORT_IRQ_DMPS = (1 << 7), /* mechanical presence status */ - PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ - PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ - PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ - PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ - PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ - PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ - PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ + PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */ + PORT_IRQ_TF_ERR = BIT(30), /* task file error */ + PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */ + PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */ + PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */ + PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */ + PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */ + PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */ + + PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */ + PORT_IRQ_DMPS = BIT(7), /* mechanical presence status */ + PORT_IRQ_CONNECT = BIT(6), /* port connect change status */ + PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */ + PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */ + PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */ + PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */ + PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */ + PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | @@ -161,27 +162,27 @@ enum { PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, /* PORT_CMD bits */ - PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ - PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ - PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ - PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */ - PORT_CMD_ESP = (1 << 21), /* External Sata Port */ - PORT_CMD_CPD = (1 << 20), /* Cold Presence Detection */ - PORT_CMD_MPSP = (1 << 19), /* Mechanical Presence Switch */ - PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */ - PORT_CMD_PMP = (1 << 17), /* PMP attached */ - PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ - PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ - PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ - PORT_CMD_CLO = (1 << 3), /* Command list override */ - PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ - PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ - PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ - - PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ - PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ - PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ - PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ + PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */ + PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */ + PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */ + PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */ + PORT_CMD_ESP = BIT(21), /* External Sata Port */ + PORT_CMD_CPD = BIT(20), /* Cold Presence Detection */ + PORT_CMD_MPSP = BIT(19), /* Mechanical Presence Switch */ + PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */ + PORT_CMD_PMP = BIT(17), /* PMP attached */ + PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */ + PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */ + PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */ + PORT_CMD_CLO = BIT(3), /* Command list override */ + PORT_CMD_POWER_ON = BIT(2), /* Power up device */ + PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */ + PORT_CMD_START = BIT(0), /* Enable port DMA engine */ + + PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */ + PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */ + PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */ + PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */ /* PORT_CMD capabilities mask */ PORT_CMD_CAP = PORT_CMD_HPCP | PORT_CMD_MPSP | @@ -192,9 +193,9 @@ enum { PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */ - PORT_FBS_SDE = (1 << 2), /* FBS single device error */ - PORT_FBS_DEC = (1 << 1), /* FBS device error clear */ - PORT_FBS_EN = (1 << 0), /* Enable FBS */ + PORT_FBS_SDE = BIT(2), /* FBS single device error */ + PORT_FBS_DEC = BIT(1), /* FBS device error clear */ + PORT_FBS_EN = BIT(0), /* Enable FBS */ /* PORT_DEVSLP bits */ PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */ @@ -202,50 +203,50 @@ enum { PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */ PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */ PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */ - PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */ - PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */ + PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */ + PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */ /* hpriv->flags bits */ #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) - AHCI_HFLAG_NO_NCQ = (1 << 0), - AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ - AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ - AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ - AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ - AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ - AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ - AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ - AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ - AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ - AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as - link offline */ - AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ - AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ - AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */ - AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on - port start (wait until - error-handling stage) */ - AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ - AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ + AHCI_HFLAG_NO_NCQ = BIT(0), + AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */ + AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */ + AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */ + AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */ + AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */ + AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */ + AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */ + AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */ + AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */ + AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as + link offline */ + AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */ + AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */ + AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */ + AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on + port start (wait until + error-handling stage) */ + AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */ + AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */ #ifdef CONFIG_PCI_MSI - AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */ + AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */ #else /* compile out MSI infrastructure */ AHCI_HFLAG_MULTI_MSI = 0, #endif - AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ - AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */ - AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read - only registers */ - AHCI_HFLAG_USE_LPM_POLICY = (1 << 25), /* chipset that should use - SATA_MOBILE_LPM_POLICY - as default lpm_policy */ - AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during - suspend/resume */ - AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */ + AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */ + AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */ + AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read + only registers */ + AHCI_HFLAG_USE_LPM_POLICY = BIT(25), /* chipset that should use + SATA_MOBILE_LPM_POLICY + as default lpm_policy */ + AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during + suspend/resume */ + AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */ /* ap->flags bits */ @@ -261,22 +262,22 @@ enum { EM_MAX_RETRY = 5, /* em_ctl bits */ - EM_CTL_RST = (1 << 9), /* Reset */ - EM_CTL_TM = (1 << 8), /* Transmit Message */ - EM_CTL_MR = (1 << 0), /* Message Received */ - EM_CTL_ALHD = (1 << 26), /* Activity LED */ - EM_CTL_XMT = (1 << 25), /* Transmit Only */ - EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ - EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */ - EM_CTL_SES = (1 << 18), /* SES-2 messages supported */ - EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */ - EM_CTL_LED = (1 << 16), /* LED messages supported */ + EM_CTL_RST = BIT(9), /* Reset */ + EM_CTL_TM = BIT(8), /* Transmit Message */ + EM_CTL_MR = BIT(0), /* Message Received */ + EM_CTL_ALHD = BIT(26), /* Activity LED */ + EM_CTL_XMT = BIT(25), /* Transmit Only */ + EM_CTL_SMB = BIT(24), /* Single Message Buffer */ + EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */ + EM_CTL_SES = BIT(18), /* SES-2 messages supported */ + EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */ + EM_CTL_LED = BIT(16), /* LED messages supported */ /* em message type */ - EM_MSG_TYPE_LED = (1 << 0), /* LED */ - EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */ - EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */ - EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */ + EM_MSG_TYPE_LED = BIT(0), /* LED */ + EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */ + EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */ + EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */ }; struct ahci_cmd_hdr { diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index b6806d41a8c508fd4e330e5878d9498f736b543d..fd4dccc253896c1d77053a3d1eb15a8e6d9a1733 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -1392,7 +1392,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev, tf->hob_lbah = buf[10]; tf->nsect = buf[12]; tf->hob_nsect = buf[13]; - if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id)) + if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id) && + (tf->status & ATA_SENSE)) tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; return 0; @@ -1456,8 +1457,12 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) memcpy(&qc->result_tf, &tf, sizeof(tf)); qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; - if (dev->class == ATA_DEV_ZAC && - ((qc->result_tf.status & ATA_SENSE) || qc->result_tf.auxiliary)) { + + /* + * If the device supports NCQ autosense, ata_eh_read_log_10h() will have + * stored the sense data in qc->result_tf.auxiliary. + */ + if (qc->result_tf.auxiliary) { char sense_key, asc, ascq; sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; diff --git a/drivers/base/class.c b/drivers/base/class.c index 64f7b9a0970f7d5d89559cbb4872079021a14fea..8ceafb7d0203b26011c9e5ff84df914f0aba1f45 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -192,6 +192,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) } error = class_add_groups(class_get(cls), cls->class_groups); class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } return error; } EXPORT_SYMBOL_GPL(__class_register); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 3dda62503102f9ee77637d396031c5b133482b6f..9ae2b5c4fc496e3bb250e6d4c180fadacbdc8169 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -1162,7 +1162,11 @@ static int __driver_attach(struct device *dev, void *data) return 0; } else if (ret < 0) { dev_dbg(dev, "Bus failed to match device: %d\n", ret); - return ret; + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; } /* ret > 0 means positive match */ if (driver_allows_async_probing(drv)) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b52049098d4ee599c4636f4f81acad51a0c92e02..14088b5adb556525fcfeb0c8b77252c753086133 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -484,7 +484,17 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - retval = __rpm_callback(callback, dev); + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = callback(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 4ef9488d05cde46a5d7165c9081f75ac44c51070..3de89795f5843515cf584e0b6113934f86c1caca 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -722,6 +722,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, int i; int ret = -ENOMEM; int num_type_reg; + int num_regs; u32 reg; if (chip->num_regs <= 0) @@ -796,14 +797,20 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, goto err_alloc; } - num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; - if (num_type_reg) { - d->type_buf_def = kcalloc(num_type_reg, + /* + * Use num_config_regs if defined, otherwise fall back to num_type_reg + * to maintain backward compatibility. + */ + num_type_reg = chip->num_config_regs ? chip->num_config_regs + : chip->num_type_reg; + num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg; + if (num_regs) { + d->type_buf_def = kcalloc(num_regs, sizeof(*d->type_buf_def), GFP_KERNEL); if (!d->type_buf_def) goto err_alloc; - d->type_buf = kcalloc(num_type_reg, sizeof(*d->type_buf), + d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf), GFP_KERNEL); if (!d->type_buf) goto err_alloc; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8532b839a3435c895ca297efd14ffd421ae513ad..6772402326842f621cdedd1e52d0dd0fdba3f40a 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2217,7 +2217,8 @@ void drbd_destroy_device(struct kref *kref) kref_put(&peer_device->connection->kref, drbd_destroy_connection); kfree(peer_device); } - memset(device, 0xfd, sizeof(*device)); + if (device->submit.wq) + destroy_workqueue(device->submit.wq); kfree(device); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2309,7 +2310,6 @@ void drbd_destroy_resource(struct kref *kref) idr_destroy(&resource->devices); free_cpumask_var(resource->cpu_mask); kfree(resource->name); - memset(resource, 0xf2, sizeof(*resource)); kfree(resource); } @@ -2650,7 +2650,6 @@ void drbd_destroy_connection(struct kref *kref) drbd_free_socket(&connection->data); kfree(connection->int_dig_in); kfree(connection->int_dig_vv); - memset(connection, 0xfc, sizeof(*connection)); kfree(connection); kref_put(&resource->kref, drbd_destroy_resource); } @@ -2774,7 +2773,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig err = add_disk(disk); if (err) - goto out_idr_remove_from_resource; + goto out_destroy_workqueue; /* inherit the connection state */ device->state.conn = first_connection(resource)->cstate; @@ -2788,6 +2787,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_debugfs_device_add(device); return NO_ERROR; +out_destroy_workqueue: + destroy_workqueue(device->submit.wq); out_idr_remove_from_resource: for_each_connection_safe(connection, n, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 864c98e748757dd615163091f597be66bf6b4219..249eba7d21c28078c3e7bad4c51269e5291475b6 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1210,6 +1210,7 @@ static void decide_on_discard_support(struct drbd_device *device, struct drbd_connection *connection = first_peer_device(device)->connection; struct request_queue *q = device->rq_queue; + unsigned int max_discard_sectors; if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev)) goto not_supported; @@ -1230,15 +1231,14 @@ static void decide_on_discard_support(struct drbd_device *device, * topology on all peers. */ blk_queue_discard_granularity(q, 512); - q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); - q->limits.max_write_zeroes_sectors = - drbd_max_discard_sectors(connection); + max_discard_sectors = drbd_max_discard_sectors(connection); + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); return; not_supported: blk_queue_discard_granularity(q, 0); - q->limits.max_discard_sectors = 0; - q->limits.max_write_zeroes_sectors = 0; + blk_queue_max_discard_sectors(q, 0); } static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index ccad3d7b3ddd9a475c82755ab9e837c9b285f4dd..487840e3564dff0823768ad30fd174e1e3e4351a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4593,8 +4593,10 @@ static int __init do_floppy_init(void) goto out_put_disk; err = floppy_alloc_disk(drive, 0); - if (err) + if (err) { + blk_mq_free_tag_set(&tag_sets[drive]); goto out_put_disk; + } timer_setup(&motor_off_timer[drive], motor_off_callback, 0); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ad92192c7d617368194fa22de2f9c3a2845f8e62..d12d3d171ec4c54957a5fa3c24481b71a554c581 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = { /* * And now the modules code and kernel interface. */ -static int max_loop; + +/* + * If max_loop is specified, create that many devices upfront. + * This also becomes a hard limit. If max_loop is not specified, + * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module + * init time. Loop devices can be requested on-demand with the + * /dev/loop-control interface, or be instantiated by accessing + * a 'dead' device node. + */ +static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); module_param(max_part, int, 0444); @@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { - int i, nr; + int i; int err; part_shift = 0; @@ -2209,19 +2218,6 @@ static int __init loop_init(void) goto err_out; } - /* - * If max_loop is specified, create that many devices upfront. - * This also becomes a hard limit. If max_loop is not specified, - * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module - * init time. Loop devices can be requested on-demand with the - * /dev/loop-control interface, or be instantiated by accessing - * a 'dead' device node. - */ - if (max_loop) - nr = max_loop; - else - nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; - err = misc_register(&loop_misc); if (err < 0) goto err_out; @@ -2233,7 +2229,7 @@ static int __init loop_init(void) } /* pre-create number of devices given by config or max_loop */ - for (i = 0; i < nr; i++) + for (i = 0; i < max_loop; i++) loop_add(i); printk(KERN_INFO "loop: module loaded\n"); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index e9de9d846b730359623d5b451015c169937f9685..17b677b5d3b221fac658268958730dbc71dade52 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd; int ret = -EINVAL; + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + ublk_ctrl_cmd_dump(cmd); if (!(issue_flags & IO_URING_F_SQE128)) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 19da5defd7348d4c32bd0324062abec7f00f2b08..a7697027ce43b75ae5eac72a0cec2808b3cec3ca 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx) virtqueue_notify(vq->vq); } +static blk_status_t virtblk_fail_to_queue(struct request *req, int rc) +{ + virtblk_cleanup_cmd(req); + switch (rc) { + case -ENOSPC: + return BLK_STS_DEV_RESOURCE; + case -ENOMEM: + return BLK_STS_RESOURCE; + default: + return BLK_STS_IOERR; + } +} + static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, struct virtio_blk *vblk, struct request *req, struct virtblk_req *vbr) { blk_status_t status; + int num; status = virtblk_setup_cmd(vblk->vdev, req, vbr); if (unlikely(status)) return status; - vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr); - if (unlikely(vbr->sg_table.nents < 0)) { - virtblk_cleanup_cmd(req); - return BLK_STS_RESOURCE; - } + num = virtblk_map_data(hctx, req, vbr); + if (unlikely(num < 0)) + return virtblk_fail_to_queue(req, -ENOMEM); + vbr->sg_table.nents = num; blk_mq_start_request(req); @@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_stop_hw_queue(hctx); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); virtblk_unmap_data(req, vbr); - virtblk_cleanup_cmd(req); - switch (err) { - case -ENOSPC: - return BLK_STS_DEV_RESOURCE; - case -ENOMEM: - return BLK_STS_RESOURCE; - default: - return BLK_STS_IOERR; - } + return virtblk_fail_to_queue(req, err); } if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index a657e9a3e96a55ac9423f1b0f43080e12102940c..f6b4b7a1be4cca24cce443fec11a29a88874182e 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2524,7 +2524,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) */ err = btintel_read_version(hdev, &ver); if (err) - return err; + break; /* Apply the device specific HCI quirks * @@ -2566,7 +2566,8 @@ static int btintel_setup_combined(struct hci_dev *hdev) default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); - return -EINVAL; + err = -EINVAL; + break; } exit_error: diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f05018988a17754a15a471e065827e444bd67315..6beafd62d722618763e0ea75c7dd0a067d4349e1 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -802,13 +802,13 @@ static inline void btusb_free_frags(struct btusb_data *data) spin_lock_irqsave(&data->rxlock, flags); - kfree_skb(data->evt_skb); + dev_kfree_skb_irq(data->evt_skb); data->evt_skb = NULL; - kfree_skb(data->acl_skb); + dev_kfree_skb_irq(data->acl_skb); data->acl_skb = NULL; - kfree_skb(data->sco_skb); + dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index d7e0b75db8a607f90fdc94a704b67b981d3cf0ee..2b6c0e1922cb3bf3801210e219194728dfdfd4b2 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -53,11 +53,13 @@ * struct bcm_device_data - device specific data * @no_early_set_baudrate: Disallow set baudrate before driver setup() * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it + * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable * @max_autobaud_speed: max baudrate supported by device in autobaud mode */ struct bcm_device_data { bool no_early_set_baudrate; bool drive_rts_on_open; + bool no_uart_clock_set; u32 max_autobaud_speed; }; @@ -100,6 +102,7 @@ struct bcm_device_data { * @is_suspended: whether flow control is currently disabled * @no_early_set_baudrate: don't set_baudrate before setup() * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it + * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable * @pcm_int_params: keep the initial PCM configuration * @use_autobaud_mode: start Bluetooth device in autobaud mode * @max_autobaud_speed: max baudrate supported by device in autobaud mode @@ -140,6 +143,7 @@ struct bcm_device { #endif bool no_early_set_baudrate; bool drive_rts_on_open; + bool no_uart_clock_set; bool use_autobaud_mode; u8 pcm_int_params[5]; u32 max_autobaud_speed; @@ -172,10 +176,11 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; + struct bcm_data *bcm = hu->priv; struct sk_buff *skb; struct bcm_update_uart_baud_rate param; - if (speed > 3000000) { + if (speed > 3000000 && !bcm->dev->no_uart_clock_set) { struct bcm_write_uart_clock_setting clock; clock.type = BCM_UART_CLOCK_48MHZ; @@ -1529,6 +1534,7 @@ static int bcm_serdev_probe(struct serdev_device *serdev) bcmdev->max_autobaud_speed = data->max_autobaud_speed; bcmdev->no_early_set_baudrate = data->no_early_set_baudrate; bcmdev->drive_rts_on_open = data->drive_rts_on_open; + bcmdev->no_uart_clock_set = data->no_uart_clock_set; } return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto); @@ -1550,6 +1556,10 @@ static struct bcm_device_data bcm43438_device_data = { .drive_rts_on_open = true, }; +static struct bcm_device_data cyw4373a0_device_data = { + .no_uart_clock_set = true, +}; + static struct bcm_device_data cyw55572_device_data = { .max_autobaud_speed = 921600, }; @@ -1566,6 +1576,7 @@ static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, { .compatible = "brcm,bcm4335a0" }, + { .compatible = "cypress,cyw4373a0-bt", .data = &cyw4373a0_device_data }, { .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data }, { }, }; diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index cf4a560958173af884597768cc72c102b1b23804..8055f63603f45fdd1bd39394fa38e93416923549 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -378,7 +378,7 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) i++; __skb_unlink(skb, &bcsp->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&bcsp->unack)) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index c5a0409ef84fd31ff8b9a8fbaf95a0803eab831d..6455bc4fb5bb3f71b02cd01fa4828e5122528d8b 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -313,7 +313,7 @@ static void h5_pkt_cull(struct h5 *h5) break; __skb_unlink(skb, &h5->unack); - kfree_skb(skb); + dev_kfree_skb_irq(skb); } if (skb_queue_empty(&h5->unack)) diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 4eb420a9ed04e4591c2be15368baf88f63c11e1e..5abc01a2acf7219140830732583bdad62650c4ae 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -345,7 +345,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 8df11016fd51b2890cbc6d220f582b550a2d7585..bae9b2a408d95911a0a905dc94af019aad0623f7 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -912,7 +912,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) default: BT_ERR("Illegal tx state: %d (losing packet)", qca->tx_ibs_state); - kfree_skb(skb); + dev_kfree_skb_irq(skb); break; } diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 4a42186ff1112ef169b227e31af283d53fdfcc3c..083459028a4b81e35d01179265af95a8711e4835 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -301,7 +301,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_irq(&mhi_chan->lock); } diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index c22d4184bb612a61c83762e0b0feaf21f118fe15..0555e3838bce1fbead6536a0f8a841aecbec5207 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c @@ -143,15 +143,19 @@ static int __init amd_rng_mod_init(void) found: err = pci_read_config_dword(pdev, 0x58, &pmbase); if (err) - return err; + goto put_dev; pmbase &= 0x0000FF00; - if (pmbase == 0) - return -EIO; + if (pmbase == 0) { + err = -EIO; + goto put_dev; + } priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + err = -ENOMEM; + goto put_dev; + } if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", @@ -185,6 +189,8 @@ static int __init amd_rng_mod_init(void) release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); out: kfree(priv); +put_dev: + pci_dev_put(pdev); return err; } @@ -200,6 +206,8 @@ static void __exit amd_rng_mod_exit(void) release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); + pci_dev_put(priv->pcidev); + kfree(priv); } diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index 138ce434f86b20b7552ec962cb8267115b97c059..12fbe809183190209c371a3983b1270e2f3871f1 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c @@ -51,6 +51,10 @@ static const struct pci_device_id pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, pci_tbl); +struct amd_geode_priv { + struct pci_dev *pcidev; + void __iomem *membase; +}; static int geode_rng_data_read(struct hwrng *rng, u32 *data) { @@ -90,6 +94,7 @@ static int __init geode_rng_init(void) const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; + struct amd_geode_priv *priv; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); @@ -97,17 +102,26 @@ static int __init geode_rng_init(void) goto found; } /* Device not found. */ - goto out; + return err; found: + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto put_dev; + } + rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) - goto out; + goto free_priv; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) - goto out; - geode_rng.priv = (unsigned long)mem; + goto free_priv; + + geode_rng.priv = (unsigned long)priv; + priv->membase = mem; + priv->pcidev = pdev; pr_info("AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); @@ -116,20 +130,26 @@ static int __init geode_rng_init(void) err); goto err_unmap; } -out: return err; err_unmap: iounmap(mem); - goto out; +free_priv: + kfree(priv); +put_dev: + pci_dev_put(pdev); + return err; } static void __exit geode_rng_exit(void) { - void __iomem *mem = (void __iomem *)geode_rng.priv; + struct amd_geode_priv *priv; + priv = (struct amd_geode_priv *)geode_rng.priv; hwrng_unregister(&geode_rng); - iounmap(mem); + iounmap(priv->membase); + pci_dev_put(priv->pcidev); + kfree(priv); } module_init(geode_rng_init); diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 49a1707693c9ff6395cd8e880b4c2571418c1cd4..5d403fb5bd9297a0cafc7481f1513d8157a640c7 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1330,6 +1330,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; + struct module *owner; if (!acquire_ipmi_user(user, &i)) { /* @@ -1392,8 +1393,9 @@ static void _ipmi_destroy_user(struct ipmi_user *user) kfree(rcvr); } + owner = intf->owner; kref_put(&intf->refcount, intf_free); - module_put(intf->owner); + module_put(owner); } int ipmi_destroy_user(struct ipmi_user *user) @@ -3704,12 +3706,16 @@ static void deliver_smi_err_response(struct ipmi_smi *intf, struct ipmi_smi_msg *msg, unsigned char err) { + int rv; msg->rsp[0] = msg->data[0] | 4; msg->rsp[1] = msg->data[1]; msg->rsp[2] = err; msg->rsp_size = 3; - /* It's an error, so it will never requeue, no need to check return. */ - handle_one_recv_msg(intf, msg); + + /* This will never requeue, but it may ask us to free the message. */ + rv = handle_one_recv_msg(intf, msg); + if (rv == 0) + ipmi_free_smi_msg(msg); } static void cleanup_smi_msgs(struct ipmi_smi *intf) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 6e357ad76f2eba3a29513b4d69794d3660ac5b2a..abddd7e43a9a623601a6f2a153e50d938a43d1b8 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2153,6 +2153,20 @@ static int __init init_ipmi_si(void) } module_init(init_ipmi_si); +static void wait_msg_processed(struct smi_info *smi_info) +{ + unsigned long jiffies_now; + long time_diff; + + while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { + jiffies_now = jiffies; + time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) + * SI_USEC_PER_JIFFY); + smi_event_handler(smi_info, time_diff); + schedule_timeout_uninterruptible(1); + } +} + static void shutdown_smi(void *send_info) { struct smi_info *smi_info = send_info; @@ -2187,16 +2201,13 @@ static void shutdown_smi(void *send_info) * in the BMC. Note that timers and CPU interrupts are off, * so no need for locks. */ - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + wait_msg_processed(smi_info); + if (smi_info->handlers) disable_si_irq(smi_info); - while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { - poll(smi_info); - schedule_timeout_uninterruptible(1); - } + + wait_msg_processed(smi_info); + if (smi_info->handlers) smi_info->handlers->cleanup(smi_info->si_sm); diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c index 19c32bf50e0e9c66c6aae6eceaebccf3beace2b9..2dea8cd5a09ac9efc772fefa23e0346e0b4b779c 100644 --- a/drivers/char/ipmi/kcs_bmc_aspeed.c +++ b/drivers/char/ipmi/kcs_bmc_aspeed.c @@ -406,13 +406,31 @@ static void aspeed_kcs_check_obe(struct timer_list *timer) static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) { struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); + int rc; + u8 str; /* We don't have an OBE IRQ, emulate it */ if (mask & KCS_BMC_EVENT_TYPE_OBE) { - if (KCS_BMC_EVENT_TYPE_OBE & state) - mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); - else + if (KCS_BMC_EVENT_TYPE_OBE & state) { + /* + * Given we don't have an OBE IRQ, delay by polling briefly to see if we can + * observe such an event before returning to the caller. This is not + * incorrect because OBF may have already become clear before enabling the + * IRQ if we had one, under which circumstance no event will be propagated + * anyway. + * + * The onus is on the client to perform a race-free check that it hasn't + * missed the event. + */ + rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, + !(str & KCS_BMC_STR_OBF), 1, 100, false, + &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); + /* Time for the slow path? */ + if (rc == -ETIMEDOUT) + mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); + } else { del_timer(&priv->obe.timer); + } } if (mask & KCS_BMC_EVENT_TYPE_IBF) { diff --git a/drivers/char/random.c b/drivers/char/random.c index 69754155300ea7d9c8e17029d9c226385bb700c7..f5868dddbb618cbb891f475207055a67144413d9 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -160,6 +160,9 @@ EXPORT_SYMBOL(wait_for_random_bytes); * u8 get_random_u8() * u16 get_random_u16() * u32 get_random_u32() + * u32 get_random_u32_below(u32 ceil) + * u32 get_random_u32_above(u32 floor) + * u32 get_random_u32_inclusive(u32 floor, u32 ceil) * u64 get_random_u64() * unsigned long get_random_long() * @@ -510,6 +513,41 @@ DEFINE_BATCHED_ENTROPY(u16) DEFINE_BATCHED_ENTROPY(u32) DEFINE_BATCHED_ENTROPY(u64) +u32 __get_random_u32_below(u32 ceil) +{ + /* + * This is the slow path for variable ceil. It is still fast, most of + * the time, by doing traditional reciprocal multiplication and + * opportunistically comparing the lower half to ceil itself, before + * falling back to computing a larger bound, and then rejecting samples + * whose lower half would indicate a range indivisible by ceil. The use + * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable + * in 32-bits. + */ + u32 rand = get_random_u32(); + u64 mult; + + /* + * This function is technically undefined for ceil == 0, and in fact + * for the non-underscored constant version in the header, we build bug + * on that. But for the non-constant case, it's convenient to have that + * evaluate to being a straight call to get_random_u32(), so that + * get_random_u32_inclusive() can work over its whole range without + * undefined behavior. + */ + if (unlikely(!ceil)) + return rand; + + mult = (u64)ceil * rand; + if (unlikely((u32)mult < ceil)) { + u32 bound = -ceil % ceil; + while (unlikely((u32)mult < bound)) + mult = (u64)ceil * get_random_u32(); + } + return mult >> 32; +} +EXPORT_SYMBOL(__get_random_u32_below); + #ifdef CONFIG_SMP /* * This function is called when the CPU is coming up, with entry diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 1b18ce5ebab1ef4c49348d48a52f7de2fec48627..0913d3eb8d518e7658677d95326a698cadd093e4 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -90,16 +90,21 @@ int tpm_read_log_acpi(struct tpm_chip *chip) return -ENODEV; if (tbl->header.length < - sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) + sizeof(*tbl) + sizeof(struct acpi_tpm2_phy)) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } tpm2_phy = (void *)tbl + sizeof(*tbl); len = tpm2_phy->log_area_minimum_length; start = tpm2_phy->log_area_start_address; - if (!start || !len) + if (!start || !len) { + acpi_put_table((struct acpi_table_header *)tbl); return -ENODEV; + } + acpi_put_table((struct acpi_table_header *)tbl); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2; } else { /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ @@ -120,8 +125,10 @@ int tpm_read_log_acpi(struct tpm_chip *chip) break; } + acpi_put_table((struct acpi_table_header *)buff); format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2; } + if (!len) { dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__); return -EIO; @@ -156,5 +163,4 @@ int tpm_read_log_acpi(struct tpm_chip *chip) kfree(log->bios_event_log); log->bios_event_log = NULL; return ret; - } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index d69905233aff2da31bfd55d6f53a62e5a5cb90b4..7e513b7718320c0392385a7d322e97b05e868f11 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev) } suspended: - return rc; + if (rc) + dev_err(dev, "Ignoring error %d while suspending\n", rc); + return 0; } EXPORT_SYMBOL_GPL(tpm_pm_suspend); diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 18606651d1aa43231755e8ec468c9191ec2e0c6b..16fc481d60950f3dd3ef2662d45fd16f71274a37 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -252,7 +252,7 @@ static int __crb_relinquish_locality(struct device *dev, iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, TPM2_TIMEOUT_C)) { - dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); + dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n"); return -ETIME; } @@ -676,12 +676,16 @@ static int crb_acpi_add(struct acpi_device *device) /* Should the FIFO driver handle this? */ sm = buf->start_method; - if (sm == ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + if (sm == ACPI_TPM2_MEMORY_MAPPED) { + rc = -ENODEV; + goto out; + } priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + if (!priv) { + rc = -ENOMEM; + goto out; + } if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { @@ -689,7 +693,8 @@ static int crb_acpi_add(struct acpi_device *device) FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); - return -EINVAL; + rc = -EINVAL; + goto out; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; @@ -700,17 +705,23 @@ static int crb_acpi_add(struct acpi_device *device) rc = crb_map_io(device, priv, buf); if (rc) - return rc; + goto out; chip = tpmm_chip_alloc(dev, &tpm_crb); - if (IS_ERR(chip)) - return PTR_ERR(chip); + if (IS_ERR(chip)) { + rc = PTR_ERR(chip); + goto out; + } dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; - return tpm_chip_register(chip); + rc = tpm_chip_register(chip); + +out: + acpi_put_table((struct acpi_table_header *)buf); + return rc; } static int crb_acpi_remove(struct acpi_device *device) diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c index 5c233423c56fa99a1e4572a75f298abcb72541d6..deff23bb54bf1769ef402076b7a17300ea8c55a4 100644 --- a/drivers/char/tpm/tpm_ftpm_tee.c +++ b/drivers/char/tpm/tpm_ftpm_tee.c @@ -397,7 +397,13 @@ static int __init ftpm_mod_init(void) if (rc) return rc; - return driver_register(&ftpm_tee_driver.driver); + rc = driver_register(&ftpm_tee_driver.driver); + if (rc) { + platform_driver_unregister(&ftpm_tee_plat_driver); + return rc; + } + + return 0; } static void __exit ftpm_mod_exit(void) diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index bcff6429e0b4f2b0e2f748a545d95a7408846473..ed5dabd3c72d62708a56d813c7713d7bc2c98c0f 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -125,6 +125,7 @@ static int check_acpi_tpm2(struct device *dev) const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev); struct acpi_table_tpm2 *tbl; acpi_status st; + int ret = 0; if (!aid || aid->driver_data != DEVICE_IS_TPM2) return 0; @@ -132,8 +133,7 @@ static int check_acpi_tpm2(struct device *dev) /* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2 * table is mandatory */ - st = - acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); + st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl); if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; @@ -141,9 +141,10 @@ static int check_acpi_tpm2(struct device *dev) /* The tpm2_crb driver handles this device */ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED) - return -ENODEV; + ret = -ENODEV; - return 0; + acpi_put_table((struct acpi_table_header *)tbl); + return ret; } #else static int check_acpi_tpm2(struct device *dev) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 757623bacfd502889ec4e0f0020469a1213b9f2b..3f98e587b3e849aa35346cc8110c17c7c1987cbe 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -682,15 +682,19 @@ static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); - switch (priv->manufacturer_id) { - case TPM_VID_WINBOND: - return ((status == TPM_STS_VALID) || - (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); - case TPM_VID_STM: - return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); - default: - return (status == TPM_STS_COMMAND_READY); + if (!test_bit(TPM_TIS_DEFAULT_CANCELLATION, &priv->flags)) { + switch (priv->manufacturer_id) { + case TPM_VID_WINBOND: + return ((status == TPM_STS_VALID) || + (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY))); + case TPM_VID_STM: + return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)); + default: + break; + } } + + return status == TPM_STS_COMMAND_READY; } static irqreturn_t tis_int_handler(int dummy, void *dev_id) diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 66a5a13cd1df2939fdf910e4bebf28448bacef60..b68479e0de10f396ea294a0c68988c9a479763d9 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -86,6 +86,7 @@ enum tis_defaults { enum tpm_tis_flags { TPM_TIS_ITPM_WORKAROUND = BIT(0), TPM_TIS_INVALID_STATUS = BIT(1), + TPM_TIS_DEFAULT_CANCELLATION = BIT(2), }; struct tpm_tis_data { diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c index 0692510dfcab9b3b6bb8b004912364b3ff70f652..f3a7251c8e38fbe9d19d972d7adde1bf5e16f48f 100644 --- a/drivers/char/tpm/tpm_tis_i2c.c +++ b/drivers/char/tpm/tpm_tis_i2c.c @@ -49,7 +49,7 @@ /* Masks with bits that must be read zero */ #define TPM_ACCESS_READ_ZERO 0x48 -#define TPM_INT_ENABLE_ZERO 0x7FFFFF6 +#define TPM_INT_ENABLE_ZERO 0x7FFFFF60 #define TPM_STS_READ_ZERO 0x23 #define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000 #define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000 @@ -329,6 +329,7 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev, if (!phy->io_buf) return -ENOMEM; + set_bit(TPM_TIS_DEFAULT_CANCELLATION, &phy->priv.flags); phy->i2c_client = dev; /* must precede all communication with the tpm */ diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index d37c45b676abe4da6b7fcf76a0b024543d7dc70d..2afea905f7f3c80b9d0e6a13e94fbb99f7eb4bcb 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -27,10 +27,10 @@ static u32 share_count_nand; static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", }; -static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; +static const char * const video_pll_bypass_sels[] = {"video_pll", "video_pll_ref_sel", }; static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", }; static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; -static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; +static const char * const m7_alt_pll_bypass_sels[] = {"m7_alt_pll", "m7_alt_pll_ref_sel", }; static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; @@ -40,24 +40,24 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; -static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out", - "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; +static const char * const imx8mn_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "m7_alt_pll_out", + "sys_pll1_800m", "audio_pll1_out", "video_pll_out", "sys_pll3_out", }; static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out", - "video_pll1_out", "audio_pll2_out", }; + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_gpu_shader_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out", - "video_pll1_out", "audio_pll2_out", }; + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m", "sys_pll2_1000m", "audio_pll1_out", - "video_pll1_out", "sys_pll1_100m",}; + "video_pll_out", "sys_pll1_100m",}; static const char * const imx8mn_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m", "sys_pll2_200m", "audio_pll1_out", - "video_pll1_out", "sys_pll3_out", }; + "video_pll_out", "sys_pll3_out", }; static const char * const imx8mn_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m", "sys_pll1_133m", "sys_pll3_out", @@ -77,23 +77,23 @@ static const char * const imx8mn_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", " static const char * const imx8mn_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out", - "video_pll1_out", "audio_pll2_out", }; + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out", - "video_pll1_out", "audio_pll2_out", }; + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_500m", "audio_pll1_out", - "video_pll1_out", "audio_pll2_out", }; + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m", "sys_pll2_125m", "sys_pll3_out", - "audio_pll1_out", "video_pll1_out", }; + "audio_pll1_out", "video_pll_out", }; static const char * const imx8mn_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll2_166m", "sys_pll3_out", - "audio_pll1_out", "video_pll1_out", }; + "audio_pll1_out", "video_pll_out", }; static const char * const imx8mn_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m", "sys_pll2_1000m", "sys_pll3_out", @@ -103,49 +103,49 @@ static const char * const imx8mn_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; -static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", +static const char * const imx8mn_disp_pixel_sels[] = {"osc_24m", "video_pll_out", "audio_pll2_out", "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; static const char * const imx8mn_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", - "clk_ext3", "clk_ext4", }; + "video_pll_out", "sys_pll1_133m", "dummy", + "clk_ext2", "clk_ext3", }; static const char * const imx8mn_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_sai7_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll_out", "sys_pll1_133m", "dummy", "clk_ext3", "clk_ext4", }; static const char * const imx8mn_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", - "video_pll1_out", "sys_pll1_133m", "osc_hdmi", + "video_pll_out", "sys_pll1_133m", "dummy", "clk_ext2", "clk_ext3", }; static const char * const imx8mn_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m", "sys_pll1_160m", "audio_pll1_out", - "video_pll1_out", "clk_ext4", }; + "video_pll_out", "clk_ext4", }; static const char * const imx8mn_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", "clk_ext3", - "clk_ext4", "video_pll1_out", }; + "clk_ext4", "video_pll_out", }; static const char * const imx8mn_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", - "sys_pll2_200m", "sys_pll2_500m", "video_pll1_out", - "audio_pll2_out", }; + "sys_pll2_200m", "sys_pll2_500m", "audio_pll1_out", + "video_pll_out", "audio_pll2_out", }; static const char * const imx8mn_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m", "audio_pll2_out", "sys_pll3_out", - "sys_pll2_250m", "video_pll1_out", }; + "sys_pll2_250m", "video_pll_out", }; static const char * const imx8mn_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll2_333m", "sys_pll2_500m", "audio_pll2_out", "sys_pll1_266m", @@ -160,19 +160,19 @@ static const char * const imx8mn_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "s "audio_pll2_out", "sys_pll1_100m", }; static const char * const imx8mn_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", - "sys_pll3_out", "audio_pll1_out", "video_pll1_out", + "sys_pll3_out", "audio_pll1_out", "video_pll_out", "audio_pll2_out", "sys_pll1_133m", }; static const char * const imx8mn_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", - "sys_pll3_out", "audio_pll1_out", "video_pll1_out", + "sys_pll3_out", "audio_pll1_out", "video_pll_out", "audio_pll2_out", "sys_pll1_133m", }; static const char * const imx8mn_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", - "sys_pll3_out", "audio_pll1_out", "video_pll1_out", + "sys_pll3_out", "audio_pll1_out", "video_pll_out", "audio_pll2_out", "sys_pll1_133m", }; static const char * const imx8mn_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", - "sys_pll3_out", "audio_pll1_out", "video_pll1_out", + "sys_pll3_out", "audio_pll1_out", "video_pll_out", "audio_pll2_out", "sys_pll1_133m", }; static const char * const imx8mn_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", @@ -213,63 +213,63 @@ static const char * const imx8mn_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "s static const char * const imx8mn_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", "sys_pll3_out", "clk_ext1", - "sys_pll1_80m", "video_pll1_out", }; + "sys_pll1_80m", "video_pll_out", }; static const char * const imx8mn_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", "sys_pll3_out", "clk_ext1", - "sys_pll1_80m", "video_pll1_out", }; + "sys_pll1_80m", "video_pll_out", }; static const char * const imx8mn_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", "sys_pll3_out", "clk_ext2", - "sys_pll1_80m", "video_pll1_out", }; + "sys_pll1_80m", "video_pll_out", }; static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", "sys_pll3_out", "clk_ext2", - "sys_pll1_80m", "video_pll1_out", }; + "sys_pll1_80m", "video_pll_out", }; static const char * const imx8mn_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", - "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "sys_pll1_40m", "video_pll_out", "sys_pll1_80m", "audio_pll1_out", "clk_ext1", }; static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", - "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", + "m7_alt_pll_out", "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", }; -static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", +static const char * const imx8mn_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "m7_alt_pll_out", "sys_pll3_out", "sys_pll2_200m", "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", }; static const char * const imx8mn_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m", "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", "sys_pll3_out", "sys_pll1_266m", @@ -277,15 +277,15 @@ static const char * const imx8mn_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "s static const char * const imx8mn_camera_pixel_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m", "sys_pll2_1000m", "clk_ext2", - "audio_pll2_out", "video_pll1_out", }; + "audio_pll2_out", "video_pll_out", }; static const char * const imx8mn_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", @@ -306,9 +306,9 @@ static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "du "dummy", "sys_pll1_80m", }; static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", - "video_pll1_out", "osc_32k", }; + "video_pll_out", "osc_32k", }; -static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out", +static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll_out", "dummy", "dummy", "gpu_pll_out", "dummy", "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3", "dummy", "dummy", "osc_24m", "dummy", "osc_32k"}; @@ -349,19 +349,19 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_AUDIO_PLL1_REF_SEL] = imx_clk_hw_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_AUDIO_PLL2_REF_SEL] = imx_clk_hw_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_VIDEO_PLL1_REF_SEL] = imx_clk_hw_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); + hws[IMX8MN_VIDEO_PLL_REF_SEL] = imx_clk_hw_mux("video_pll_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_DRAM_PLL_REF_SEL] = imx_clk_hw_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_hw_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); - hws[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_hw_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); + hws[IMX8MN_M7_ALT_PLL_REF_SEL] = imx_clk_hw_mux("m7_alt_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_hw_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_hw_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); hws[IMX8MN_AUDIO_PLL1] = imx_clk_hw_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll); hws[IMX8MN_AUDIO_PLL2] = imx_clk_hw_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll); - hws[IMX8MN_VIDEO_PLL1] = imx_clk_hw_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll); + hws[IMX8MN_VIDEO_PLL] = imx_clk_hw_pll14xx("video_pll", "video_pll_ref_sel", base + 0x28, &imx_1443x_pll); hws[IMX8MN_DRAM_PLL] = imx_clk_hw_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_dram_pll); hws[IMX8MN_GPU_PLL] = imx_clk_hw_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll); - hws[IMX8MN_VPU_PLL] = imx_clk_hw_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll); + hws[IMX8MN_M7_ALT_PLL] = imx_clk_hw_pll14xx("m7_alt_pll", "m7_alt_pll_ref_sel", base + 0x74, &imx_1416x_pll); hws[IMX8MN_ARM_PLL] = imx_clk_hw_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll); hws[IMX8MN_SYS_PLL1] = imx_clk_hw_fixed("sys_pll1", 800000000); hws[IMX8MN_SYS_PLL2] = imx_clk_hw_fixed("sys_pll2", 1000000000); @@ -370,20 +370,20 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) /* PLL bypass out */ hws[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MN_VIDEO_PLL_BYPASS] = imx_clk_hw_mux_flags("video_pll_bypass", base + 0x28, 16, 1, video_pll_bypass_sels, ARRAY_SIZE(video_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); - hws[IMX8MN_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX8MN_M7_ALT_PLL_BYPASS] = imx_clk_hw_mux_flags("m7_alt_pll_bypass", base + 0x74, 28, 1, m7_alt_pll_bypass_sels, ARRAY_SIZE(m7_alt_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); hws[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); /* PLL out gate */ hws[IMX8MN_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", base, 13); hws[IMX8MN_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13); - hws[IMX8MN_VIDEO_PLL1_OUT] = imx_clk_hw_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); + hws[IMX8MN_VIDEO_PLL_OUT] = imx_clk_hw_gate("video_pll_out", "video_pll_bypass", base + 0x28, 13); hws[IMX8MN_DRAM_PLL_OUT] = imx_clk_hw_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); hws[IMX8MN_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11); - hws[IMX8MN_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11); + hws[IMX8MN_M7_ALT_PLL_OUT] = imx_clk_hw_gate("m7_alt_pll_out", "m7_alt_pll_bypass", base + 0x74, 11); hws[IMX8MN_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11); hws[IMX8MN_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 652ae58c2735fffb51c9c20b82d4c1a64c467f23..5d68d975b4eb1f7192f85a6bab0cadec4e432148 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -17,6 +17,7 @@ static u32 share_count_nand; static u32 share_count_media; +static u32 share_count_usb; static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; @@ -673,7 +674,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); - hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); + hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate2_shared2("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0, &share_count_usb); + hws[IMX8MP_CLK_USB_SUSP] = imx_clk_hw_gate2_shared2("usb_suspend_clk", "osc_32k", ccm_base + 0x44d0, 0, &share_count_usb); hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c index 99cff1fd108b5b7c68d25c86b39e152770ac5872..02d6a9894521d4c5003d498bd371b033b875cb46 100644 --- a/drivers/clk/imx/clk-imx93.c +++ b/drivers/clk/imx/clk-imx93.c @@ -170,7 +170,7 @@ static const struct imx93_clk_ccgr { { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub }, { IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, }, { IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, }, - { IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, }, + { IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, }, { IMX93_CLK_GPIO1_GATE, "gpio1", "m33_root", 0x8880, }, { IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, }, { IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, }, @@ -240,7 +240,7 @@ static const struct imx93_clk_ccgr { { IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, }, { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, }, { IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, }, - { IMX93_CLK_ENET1_GATE, "enet1", "enet_root", 0x9e00, }, + { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, }, { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, }, { IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, }, { IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, }, @@ -258,7 +258,7 @@ static int imx93_clocks_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; const struct imx93_clk_root *root; const struct imx93_clk_ccgr *ccgr; - void __iomem *base = NULL; + void __iomem *base, *anatop_base; int i, ret; clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, @@ -285,20 +285,22 @@ static int imx93_clocks_probe(struct platform_device *pdev) "sys_pll_pfd2", 1, 2); np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop"); - base = of_iomap(np, 0); + anatop_base = of_iomap(np, 0); of_node_put(np); - if (WARN_ON(!base)) + if (WARN_ON(!anatop_base)) return -ENOMEM; - clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", base + 0x1200, + clks[IMX93_CLK_AUDIO_PLL] = imx_clk_fracn_gppll("audio_pll", "osc_24m", anatop_base + 0x1200, &imx_fracn_gppll); - clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", base + 0x1400, + clks[IMX93_CLK_VIDEO_PLL] = imx_clk_fracn_gppll("video_pll", "osc_24m", anatop_base + 0x1400, &imx_fracn_gppll); np = dev->of_node; base = devm_platform_ioremap_resource(pdev, 0); - if (WARN_ON(IS_ERR(base))) + if (WARN_ON(IS_ERR(base))) { + iounmap(anatop_base); return PTR_ERR(base); + } for (i = 0; i < ARRAY_SIZE(root_array); i++) { root = &root_array[i]; @@ -327,6 +329,7 @@ static int imx93_clocks_probe(struct platform_device *pdev) unregister_hws: imx_unregister_hw_clocks(clks, IMX93_CLK_END); + iounmap(anatop_base); return ret; } diff --git a/drivers/clk/imx/clk-imxrt1050.c b/drivers/clk/imx/clk-imxrt1050.c index 9539d35588ee938fa5642a073a71c2ff75ed99dc..26108e9f7e67ad5dad6501a94fb4bf65c7431fc9 100644 --- a/drivers/clk/imx/clk-imxrt1050.c +++ b/drivers/clk/imx/clk-imxrt1050.c @@ -140,7 +140,7 @@ static int imxrt1050_clocks_probe(struct platform_device *pdev) hws[IMXRT1050_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", ccm_base + 0x80, 2); hws[IMXRT1050_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", ccm_base + 0x80, 4); hws[IMXRT1050_CLK_LPUART1] = imx_clk_hw_gate2("lpuart1", "lpuart_podf", ccm_base + 0x7c, 24); - hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x74, 10); + hws[IMXRT1050_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif", "lcdif_podf", ccm_base + 0x70, 28); hws[IMXRT1050_CLK_DMA] = imx_clk_hw_gate("dma", "ipg", ccm_base + 0x7C, 6); hws[IMXRT1050_CLK_DMA_MUX] = imx_clk_hw_gate("dmamux0", "ipg", ccm_base + 0x7C, 7); imx_check_clk_hws(hws, IMXRT1050_CLK_END); diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c index d90727a53283c70e7ac1a627c1387b96c36688e0..49666047bf0ed53f5340762131c4b5a5b6746425 100644 --- a/drivers/clk/mediatek/clk-mt7986-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c @@ -153,7 +153,7 @@ static const struct mtk_gate infra_clks[] = { 18), GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "infra_sysaxi_d2", 19), - GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "csw_f26m_sel", 20), + GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "infra_adc_frc", 20), GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m_sel", 21), GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x_sel", 23), /* INFRA2 */ diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c index 7be028dced63d352c039e40128f6da89d6b31d28..32aae880a14f3b1c582ce7031dc4bb8bc032eb8e 100644 --- a/drivers/clk/microchip/clk-mpfs-ccc.c +++ b/drivers/clk/microchip/clk-mpfs-ccc.c @@ -166,6 +166,9 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_ struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i]; char *name = devm_kzalloc(dev, 23, GFP_KERNEL); + if (!name) + return -ENOMEM; + snprintf(name, 23, "%s_out%u", parent->name, i); out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0); out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] + @@ -200,6 +203,9 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i]; char *name = devm_kzalloc(dev, 18, GFP_KERNEL); + if (!name) + return -ENOMEM; + pll_hw->base = data->pll_base[i]; snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i); pll_hw->name = (const char *)name; diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c index 45da736bd5f4cb079d0f043a7c5c29889ed77529..293a9dfa7151aabd484f2a0d1791a7b9404d6259 100644 --- a/drivers/clk/qcom/clk-krait.c +++ b/drivers/clk/qcom/clk-krait.c @@ -114,6 +114,8 @@ static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, if (d->lpl) mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; + else + mask <<= d->shift; spin_lock_irqsave(&krait_clock_reg_lock, flags); val = krait_get_l2_indirect_reg(d->offset); diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c index 0c3c2e26ede90ba2fe0c10df1894d4b5731ce0e2..ea6f54ed846ece18b9c493cf26b2bce88e98a21f 100644 --- a/drivers/clk/qcom/dispcc-sm6350.c +++ b/drivers/clk/qcom/dispcc-sm6350.c @@ -306,7 +306,7 @@ static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { .name = "disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), - .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE, .ops = &clk_pixel_ops, }, }; @@ -385,7 +385,7 @@ static struct clk_branch disp_cc_mdss_byte0_clk = { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE | CLK_OPS_PARENT_ENABLE, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 718de17a1e60055c74f3ea22378be4e299fde0a7..6447f3e81b555195b1fd0611a24583aaaf437385 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -79,7 +79,9 @@ static struct clk_regmap pll4_vote = { .enable_mask = BIT(4), .hw.init = &(struct clk_init_data){ .name = "pll4_vote", - .parent_names = (const char *[]){ "pll4" }, + .parent_data = &(const struct clk_parent_data){ + .fw_name = "pll4", .name = "pll4", + }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c index 9755ef4888c193754c63ba243d073bcc64a39d25..a0ba37656b07b78c30282c0564833796ebaed9ec 100644 --- a/drivers/clk/qcom/gcc-sm8250.c +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -3267,7 +3267,7 @@ static struct gdsc usb30_prim_gdsc = { .pd = { .name = "usb30_prim_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc usb30_sec_gdsc = { @@ -3275,7 +3275,7 @@ static struct gdsc usb30_sec_gdsc = { .pd = { .name = "usb30_sec_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, }; static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c index 063e0365f311928e2bb3342f943a84c85eb854de..1339f9211a149bb08a896eb037dbb414c5c016c1 100644 --- a/drivers/clk/qcom/lpassaudiocc-sc7280.c +++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c @@ -722,33 +722,17 @@ static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = { }; MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table); -static void lpassaudio_pm_runtime_disable(void *data) -{ - pm_runtime_disable(data); -} - -static void lpassaudio_pm_clk_destroy(void *data) -{ - pm_clk_destroy(data); -} - -static int lpassaudio_create_pm_clks(struct platform_device *pdev) +static int lpass_audio_setup_runtime_pm(struct platform_device *pdev) { int ret; pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, 50); - pm_runtime_enable(&pdev->dev); - - ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_runtime_disable, &pdev->dev); - if (ret) - return ret; - - ret = pm_clk_create(&pdev->dev); + ret = devm_pm_runtime_enable(&pdev->dev); if (ret) return ret; - ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_clk_destroy, &pdev->dev); + ret = devm_pm_clk_create(&pdev->dev); if (ret) return ret; @@ -756,7 +740,7 @@ static int lpassaudio_create_pm_clks(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "failed to acquire iface clock\n"); - return ret; + return pm_runtime_resume_and_get(&pdev->dev); } static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) @@ -765,7 +749,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) struct regmap *regmap; int ret; - ret = lpassaudio_create_pm_clks(pdev); + ret = lpass_audio_setup_runtime_pm(pdev); if (ret) return ret; @@ -775,8 +759,8 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) regmap = qcom_cc_map(pdev, desc); if (IS_ERR(regmap)) { - pm_runtime_disable(&pdev->dev); - return PTR_ERR(regmap); + ret = PTR_ERR(regmap); + goto exit; } clk_zonda_pll_configure(&lpass_audio_cc_pll, regmap, &lpass_audio_cc_pll_config); @@ -788,20 +772,18 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap); if (ret) { dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n"); - pm_runtime_disable(&pdev->dev); - return ret; + goto exit; } ret = qcom_cc_probe_by_index(pdev, 1, &lpass_audio_cc_reset_sc7280_desc); if (ret) { dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC Resets\n"); - pm_runtime_disable(&pdev->dev); - return ret; + goto exit; } pm_runtime_mark_last_busy(&pdev->dev); +exit: pm_runtime_put_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); return ret; } @@ -839,14 +821,15 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev) struct regmap *regmap; int ret; - ret = lpassaudio_create_pm_clks(pdev); + ret = lpass_audio_setup_runtime_pm(pdev); if (ret) return ret; if (of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) { lpass_audio_cc_sc7280_regmap_config.name = "cc"; desc = &lpass_cc_sc7280_desc; - return qcom_cc_probe(pdev, desc); + ret = qcom_cc_probe(pdev, desc); + goto exit; } lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon"; @@ -854,18 +837,22 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev) desc = &lpass_aon_cc_sc7280_desc; regmap = qcom_cc_map(pdev, desc); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto exit; + } clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config); ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap); - if (ret) + if (ret) { dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n"); + goto exit; + } pm_runtime_mark_last_busy(&pdev->dev); +exit: pm_runtime_put_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); return ret; } diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c index ac09b7b840abaa13515f08951126e317aff411ed..a5731994cbed1b639afb81da3ea2648e5fb7c7b2 100644 --- a/drivers/clk/qcom/lpasscorecc-sc7180.c +++ b/drivers/clk/qcom/lpasscorecc-sc7180.c @@ -356,7 +356,7 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = { .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs), }; -static int lpass_create_pm_clks(struct platform_device *pdev) +static int lpass_setup_runtime_pm(struct platform_device *pdev) { int ret; @@ -375,7 +375,7 @@ static int lpass_create_pm_clks(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "failed to acquire iface clock\n"); - return ret; + return pm_runtime_resume_and_get(&pdev->dev); } static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) @@ -384,7 +384,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) struct regmap *regmap; int ret; - ret = lpass_create_pm_clks(pdev); + ret = lpass_setup_runtime_pm(pdev); if (ret) return ret; @@ -392,12 +392,14 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) desc = &lpass_audio_hm_sc7180_desc; ret = qcom_cc_probe_by_index(pdev, 1, desc); if (ret) - return ret; + goto exit; lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc"; regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto exit; + } /* * Keep the CLK always-ON @@ -415,6 +417,7 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap); pm_runtime_mark_last_busy(&pdev->dev); +exit: pm_runtime_put_autosuspend(&pdev->dev); return ret; @@ -425,14 +428,19 @@ static int lpass_hm_core_probe(struct platform_device *pdev) const struct qcom_cc_desc *desc; int ret; - ret = lpass_create_pm_clks(pdev); + ret = lpass_setup_runtime_pm(pdev); if (ret) return ret; lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core"; desc = &lpass_core_hm_sc7180_desc; - return qcom_cc_probe_by_index(pdev, 0, desc); + ret = qcom_cc_probe_by_index(pdev, 0, desc); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return ret; } static const struct of_device_id lpass_hm_sc7180_match_table[] = { diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c index d74d46833012ffcff91dc50141d66f21de9ddc85..e02542ca24a06285b8c164b35d072cb08c9a47ba 100644 --- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c @@ -116,7 +116,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1), - DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870), + DEF_GEN4_SDH("sd0h", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870), DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870), DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c index 4baf355e26d88b0acdb2673f754cb0bb4d21a71a..27b668def357fa44d753650b8522c6d6621b0789 100644 --- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c @@ -113,7 +113,7 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { DEF_FIXED("sasyncperd2", R8A779F0_CLK_SASYNCPERD2, R8A779F0_CLK_SASYNCPERD1, 2, 1), DEF_FIXED("sasyncperd4", R8A779F0_CLK_SASYNCPERD4, R8A779F0_CLK_SASYNCPERD1, 4, 1), - DEF_GEN4_SDH("sdh0", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870), + DEF_GEN4_SDH("sd0h", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870), DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870), DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), @@ -126,10 +126,10 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { }; static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = { - DEF_MOD("hscif0", 514, R8A779F0_CLK_S0D3), - DEF_MOD("hscif1", 515, R8A779F0_CLK_S0D3), - DEF_MOD("hscif2", 516, R8A779F0_CLK_S0D3), - DEF_MOD("hscif3", 517, R8A779F0_CLK_S0D3), + DEF_MOD("hscif0", 514, R8A779F0_CLK_SASYNCPERD1), + DEF_MOD("hscif1", 515, R8A779F0_CLK_SASYNCPERD1), + DEF_MOD("hscif2", 516, R8A779F0_CLK_SASYNCPERD1), + DEF_MOD("hscif3", 517, R8A779F0_CLK_SASYNCPERD1), DEF_MOD("i2c0", 518, R8A779F0_CLK_S0D6_PER), DEF_MOD("i2c1", 519, R8A779F0_CLK_S0D6_PER), DEF_MOD("i2c2", 520, R8A779F0_CLK_S0D6_PER), @@ -142,10 +142,10 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = { DEF_MOD("msiof3", 621, R8A779F0_CLK_MSO), DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2), DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2), - DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER), - DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER), - DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER), - DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER), + DEF_MOD("scif0", 702, R8A779F0_CLK_SASYNCPERD4), + DEF_MOD("scif1", 703, R8A779F0_CLK_SASYNCPERD4), + DEF_MOD("scif3", 704, R8A779F0_CLK_SASYNCPERD4), + DEF_MOD("scif4", 705, R8A779F0_CLK_SASYNCPERD4), DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0), DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER), DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER), diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 1488c9d6e63943f6990b794585ad75ad48e0f84d..983faa5707b9cf4d192ba4c53564f8cd74e13f38 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -412,7 +412,7 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, int error; int index; - while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i++, &clkspec)) { if (clkspec.np != pd->dev.of_node) continue; @@ -425,7 +425,6 @@ static int r9a06g032_attach_dev(struct generic_pm_domain *pd, if (error) return error; } - i++; } return 0; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index f7827b3b7fc1cb77856347c1e643d9fe200423dd..6e5e502be44a63afdcf1044766a1f75d6ab953b3 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -981,6 +981,7 @@ struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, return mux_clk; err_pll: + kfree(pll->rate_table); clk_unregister(mux_clk); mux_clk = pll_clk; err_mux: diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index fe383471c5f0aee47a31aef33696e1127a42c7b1..0ff28938943f01ace0e8612ba4b217298d9a4c0e 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1583,6 +1583,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, if (ret) { pr_err("%s: failed to register pll clock %s : %d\n", __func__, pll_clk->name, ret); + kfree(pll->rate_table); kfree(pll); return; } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 53d6e3ec4309f2b8d5dc32f82b912bac4b7ce609..c94b59b80dd43c29c76ce8c02b48e878a5c37f06 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -188,8 +188,10 @@ void __init socfpga_gate_init(struct device_node *node) return; ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); - if (WARN_ON(!ops)) + if (WARN_ON(!ops)) { + kfree(socfpga_clk); return; + } rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); if (rc) @@ -243,6 +245,7 @@ void __init socfpga_gate_init(struct device_node *node) err = clk_hw_register(NULL, hw_clk); if (err) { + kfree(ops); kfree(socfpga_clk); return; } diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index d820292a381d0a78c818775b9b316c17f2d79e78..40df1db102a77bbe5144d42f6e05361b66634e9a 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -1020,9 +1020,10 @@ static void __init st_of_quadfs_setup(struct device_node *np, clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, reg, lock); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + kfree(lock); goto err_exit; - else + } else pr_debug("%s: parent %s rate %u\n", __clk_get_name(clk), __clk_get_name(clk_get_parent(clk)), diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c index a484cb945d67bacd6cf418143b81914020b74ad8..1f3234f22667446557a2e8d36843378a2841ac73 100644 --- a/drivers/clk/visconti/pll.c +++ b/drivers/clk/visconti/pll.c @@ -277,6 +277,7 @@ static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx, ret = clk_hw_register(NULL, &pll->hw); if (ret) { pr_err("failed to register pll clock %s : %d\n", name, ret); + kfree(pll->rate_table); kfree(pll); pll_hw_clk = ERR_PTR(ret); } diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 64dcb082d4cf641e2c0cb05900bb27976d3e3106..7b952aa52c0b99ea88a283af115bd5606b52e2e6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -116,6 +117,7 @@ struct sh_cmt_device { void __iomem *mapbase; struct clk *clk; unsigned long rate; + unsigned int reg_delay; raw_spinlock_t lock; /* Protect the shared start/stop register */ @@ -247,10 +249,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) { - if (ch->iostart) - ch->cmt->info->write_control(ch->iostart, 0, value); - else - ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + u32 old_value = sh_cmt_read_cmstr(ch); + + if (value != old_value) { + if (ch->iostart) { + ch->cmt->info->write_control(ch->iostart, 0, value); + udelay(ch->cmt->reg_delay); + } else { + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + udelay(ch->cmt->reg_delay); + } + } } static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) @@ -260,7 +269,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + u32 old_value = sh_cmt_read_cmcsr(ch); + + if (value != old_value) { + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + udelay(ch->cmt->reg_delay); + } } static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) @@ -268,14 +282,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) +static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) { + /* Tests showed that we need to wait 3 clocks here */ + unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); + u32 reg; + + if (ch->cmt->info->model > SH_CMT_16BIT) { + int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, + !(reg & SH_CMT32_CMCSR_WRFLG), + 1, cmcnt_delay, false, ch); + if (ret < 0) + return ret; + } + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); + udelay(cmcnt_delay); + return 0; } static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); + + if (value != old_value) { + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + udelay(ch->cmt->reg_delay); + } } static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) @@ -319,7 +352,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) static int sh_cmt_enable(struct sh_cmt_channel *ch) { - int k, ret; + int ret; dev_pm_syscore_device(&ch->cmt->pdev->dev, true); @@ -347,26 +380,9 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) } sh_cmt_write_cmcor(ch, 0xffffffff); - sh_cmt_write_cmcnt(ch, 0); - - /* - * According to the sh73a0 user's manual, as CMCNT can be operated - * only by the RCLK (Pseudo 32 kHz), there's one restriction on - * modifying CMCNT register; two RCLK cycles are necessary before - * this register is either read or any modification of the value - * it holds is reflected in the LSI's actual operation. - * - * While at it, we're supposed to clear out the CMCNT as of this - * moment, so make sure it's processed properly here. This will - * take RCLKx2 at maximum. - */ - for (k = 0; k < 100; k++) { - if (!sh_cmt_read_cmcnt(ch)) - break; - udelay(1); - } + ret = sh_cmt_write_cmcnt(ch, 0); - if (sh_cmt_read_cmcnt(ch)) { + if (ret || sh_cmt_read_cmcnt(ch)) { dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", ch->index); ret = -ETIMEDOUT; @@ -995,8 +1011,8 @@ MODULE_DEVICE_TABLE(of, sh_cmt_of_table); static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) { - unsigned int mask; - unsigned int i; + unsigned int mask, i; + unsigned long rate; int ret; cmt->pdev = pdev; @@ -1032,10 +1048,16 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) if (ret < 0) goto err_clk_unprepare; - if (cmt->info->width == 16) - cmt->rate = clk_get_rate(cmt->clk) / 512; - else - cmt->rate = clk_get_rate(cmt->clk) / 8; + rate = clk_get_rate(cmt->clk); + if (!rate) { + ret = -EINVAL; + goto err_clk_disable; + } + + /* We shall wait 2 input clks after register writes */ + if (cmt->info->model >= SH_CMT_48BIT) + cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); + cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); /* Map the memory resource(s). */ ret = sh_cmt_map_memory(cmt); diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 2737407ff0698033daa955a41abd65cba914af6f..632523c1232f6513c56de7eb36f77f604de386f8 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -345,8 +345,10 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, return error; r = clk_get_rate(clock); - if (!r) + if (!r) { + clk_disable_unprepare(clock); return -ENODEV; + } if (is_ick) t->ick = clock; diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index cad29ded3a48fd182334e8a218acca55795e1697..00af1a8e34fbd5db7b29e6cf450f30a3ebc5d0a1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -1258,7 +1258,7 @@ static struct platform_driver omap_dm_timer_driver = { .remove = omap_dm_timer_remove, .driver = { .name = "omap_timer", - .of_match_table = of_match_ptr(omap_timer_match), + .of_match_table = omap_timer_match, .pm = &omap_dm_timer_pm_ops, }, }; diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index d6b80b6dfc28796a1a710c93f97689defef65b4d..8439755559b2195d82196b0c98b32d32320c89a3 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -69,7 +69,7 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) return ret; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index 6448e03bcf488e9a5bd4e9ef939cfc55a11848a9..59b19b9975e8cfae08aac8f5b0e2da9268300229 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -125,6 +125,8 @@ static int __init amd_freq_sensitivity_init(void) if (!pcidev) { if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; + } else { + pci_dev_put(pcidev); } if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 69b3d61852ac61268453f7144a74b552943444ee..7e56a42750ea59eb3211856811f35c4b67942280 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1207,6 +1207,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask; + init_completion(&policy->kobj_unregister); ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, cpufreq_global_kobject, "policy%u", cpu); if (ret) { @@ -1245,7 +1246,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); init_waitqueue_head(&policy->transition_wait); - init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); policy->cpu = cpu; diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 833589bc95e40d5e749a17652d310a8120d985f4..3c623a0bc147f855da54bcc5c300c9f6634767b7 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -125,7 +125,35 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, return 0; } +static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) +{ + unsigned int lval; + + if (data->soc_data->reg_current_vote) + lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff; + else + lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff; + + return lval * xo_rate; +} + +/* Get the current frequency of the CPU (after throttling) */ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) +{ + struct qcom_cpufreq_data *data; + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get_raw(cpu); + if (!policy) + return 0; + + data = policy->driver_data; + + return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ; +} + +/* Get the frequency requested by the cpufreq core for the CPU */ +static unsigned int qcom_cpufreq_get_freq(unsigned int cpu) { struct qcom_cpufreq_data *data; const struct qcom_cpufreq_soc_data *soc_data; @@ -193,6 +221,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, } } else if (ret != -ENODEV) { dev_err(cpu_dev, "Invalid opp table in device tree\n"); + kfree(table); return ret; } else { policy->fast_switch_possible = true; @@ -286,18 +315,6 @@ static void qcom_get_related_cpus(int index, struct cpumask *m) } } -static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) -{ - unsigned int lval; - - if (data->soc_data->reg_current_vote) - lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff; - else - lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff; - - return lval * xo_rate; -} - static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) { struct cpufreq_policy *policy = data->policy; @@ -341,7 +358,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) * If h/w throttled frequency is higher than what cpufreq has requested * for, then stop polling and switch back to interrupt mechanism. */ - if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) + if (throttled_freq >= qcom_cpufreq_get_freq(cpu)) enable_irq(data->throttle_irq); else mod_delayed_work(system_highpri_wq, &data->throttle_work, diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 252f2a9686a6270f1c3a8f51c31d46bf8b7f0b27..448bc796b0b40567d5ad82f4c7e7460ac506de69 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -223,6 +223,6 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, * also be 0 on platforms with missing DT idle states or legacy DT * configuration predating the DT idle states bindings. */ - return i; + return state_idx - start_idx; } EXPORT_SYMBOL_GPL(dt_init_idle_driver); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 55e75fbb658ee1b5fd46a10fef0dde1e6dd3125d..4a618d80e106f89ea74265713b697bdab877a365 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -669,7 +669,12 @@ config CRYPTO_DEV_IMGTEC_HASH config CRYPTO_DEV_ROCKCHIP tristate "Rockchip's Cryptographic Engine driver" depends on OF && ARCH_ROCKCHIP + depends on PM + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_DES select CRYPTO_AES + select CRYPTO_ENGINE select CRYPTO_LIB_DES select CRYPTO_MD5 select CRYPTO_SHA1 @@ -785,8 +790,8 @@ config CRYPTO_DEV_CCREE select CRYPTO_ECB select CRYPTO_CTR select CRYPTO_XTS - select CRYPTO_SM4 - select CRYPTO_SM3 + select CRYPTO_SM4_GENERIC + select CRYPTO_SM3_GENERIC help Say 'Y' to enable a driver for the REE interface of the Arm TrustZone CryptoCell family of processors. Currently the diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 910d6751644cf673ea218cb346d2cf6e7ba6ac8a..902f6be057ec6cd7db03b433d2301dae542aa2ab 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -124,7 +124,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq) unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; int i = 0; - u32 a; + dma_addr_t a; int err; rctx->ivlen = ivsize; diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 6e7ae896717cd2a21600f4282fc27145c3e18da3..937187027ad574acd8eeaf62f34680e587b60e02 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -237,7 +237,6 @@ static int meson_crypto_probe(struct platform_device *pdev) return err; } - mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); if (mc->irqs[i] < 0) diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h index dc0f142324a3c042c5076fc438c900b1221dbac8..8c0746a1d6d43c30e34bb2718e6eb0e938247d86 100644 --- a/drivers/crypto/amlogic/amlogic-gxl.h +++ b/drivers/crypto/amlogic/amlogic-gxl.h @@ -95,7 +95,7 @@ struct meson_dev { struct device *dev; struct meson_flow *chanlist; atomic_t flow; - int *irqs; + int irqs[MAXFLOW]; #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG struct dentry *dbgfs_dir; #endif diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c index 9e7308e39b304c80d3c67dca856c836d1bf229dd..d4e06999af9b7e3ff6d5b5d80d82e92e277e8212 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c @@ -195,6 +195,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev) ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); + ndev->iov.vfdev = NULL; return -ENOMEM; } /* enable pf2vf mailbox interrupts */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 792d6da7f0c07aea57f8b169ca6ce398813c090b..084d052fddccbd63514efc15d7a3b95ceffba139 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -381,6 +381,15 @@ static const struct psp_vdata pspv3 = { .inten_reg = 0x10690, .intsts_reg = 0x10694, }; + +static const struct psp_vdata pspv4 = { + .sev = &sevv2, + .tee = &teev1, + .feature_reg = 0x109fc, + .inten_reg = 0x10690, + .intsts_reg = 0x10694, +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -426,7 +435,7 @@ static const struct sp_dev_vdata dev_vdata[] = { { /* 5 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP - .psp_vdata = &pspv2, + .psp_vdata = &pspv4, #endif }, { /* 6 */ diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 7083767602fcf98ad36051aa4279792aa9a0426c..8f008f024f8f1bee62b53339d1aa3327741940f7 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -55,7 +55,7 @@ void __init cc_debugfs_global_init(void) cc_debugfs_dir = debugfs_create_dir("ccree", NULL); } -void __exit cc_debugfs_global_fini(void) +void cc_debugfs_global_fini(void) { debugfs_remove(cc_debugfs_dir); } diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index cadead18b59e8d6cf0862b49b074ab32cafcbc9f..d489c6f808925796655d5838aa010d7000dbbdaa 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -651,9 +651,17 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { + int rc; + cc_debugfs_global_init(); - return platform_driver_register(&ccree_driver); + rc = platform_driver_register(&ccree_driver); + if (rc) { + cc_debugfs_global_fini(); + return rc; + } + + return 0; } module_init(ccree_init); diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 27e1fa91206390d62f4b65a52d75980f5001b279..743ce4fc3158c68b46a6b0ded41b8f21b9345dcb 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -26,7 +26,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 - select CRYPTO_SM4 + select CRYPTO_SM4_GENERIC depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 471e5ca720f5781551cffe39744deb0c800d15ac..baf1faec7046f73916c30ed5632cb817a3f4ae13 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1437,18 +1437,12 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void hpre_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int ret; hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); hisi_qm_alg_unregister(qm, &hpre_devices); - if (qm->fun_type == QM_HW_PF && qm->vfs_num) { - ret = hisi_qm_sriov_disable(pdev, true); - if (ret) { - pci_err(pdev, "Disable SRIOV fail!\n"); - return; - } - } + if (qm->fun_type == QM_HW_PF && qm->vfs_num) + hisi_qm_sriov_disable(pdev, true); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 8b387de69d229b7cb6059a6bc5094e277a4be16b..07e1e39a5e378a160a55729357f81a342237c4ac 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -250,7 +250,6 @@ #define QM_QOS_MIN_CIR_B 100 #define QM_QOS_MAX_CIR_U 6 #define QM_QOS_MAX_CIR_S 11 -#define QM_QOS_VAL_MAX_LEN 32 #define QM_DFX_BASE 0x0100000 #define QM_DFX_STATE1 0x0104000 #define QM_DFX_STATE2 0x01040C8 @@ -359,7 +358,7 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = { static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, - {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, + {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, @@ -909,8 +908,8 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, u32 depth; depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); - *high_bits = depth & QM_XQ_DEPTH_MASK; - *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; + *low_bits = depth & QM_XQ_DEPTH_MASK; + *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } static u32 qm_get_irq_num(struct hisi_qm *qm) @@ -4614,7 +4613,7 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, unsigned int *fun_index) { char tbuf_bdf[QM_DBG_READ_LEN] = {0}; - char val_buf[QM_QOS_VAL_MAX_LEN] = {0}; + char val_buf[QM_DBG_READ_LEN] = {0}; u32 tmp1, device, function; int ret, bus; @@ -5725,6 +5724,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm) cmd = QM_VF_START_FAIL; } + qm_cmd_init(qm); ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); @@ -5786,7 +5786,6 @@ static void qm_pf_reset_vf_process(struct hisi_qm *qm, goto err_get_status; qm_pf_reset_vf_done(qm); - qm_cmd_init(qm); dev_info(dev, "device reset done.\n"); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index d8e82d69745d8c95af1101c652a524e173edf540..9629e98bd68b7096af551610e63ca4fa2f139022 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -358,12 +358,16 @@ static int img_hash_dma_init(struct img_hash_dev *hdev) static void img_hash_dma_task(unsigned long d) { struct img_hash_dev *hdev = (struct img_hash_dev *)d; - struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); + struct img_hash_request_ctx *ctx; u8 *addr; size_t nbytes, bleft, wsend, len, tbc; struct scatterlist tsg; - if (!hdev->req || !ctx->sg) + if (!hdev->req) + return; + + ctx = ahash_request_ctx(hdev->req); + if (!ctx->sg) return; addr = sg_virt(ctx->sg); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 31e24df18877f52a07b6d55c36a2b1d78da1cd2d..20d0dcd50344b3335448adb83c0ef32c36e5596c 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1229,6 +1229,7 @@ struct n2_hash_tmpl { const u8 *hash_init; u8 hw_op_hashsz; u8 digest_size; + u8 statesize; u8 block_size; u8 auth_type; u8 hmac_type; @@ -1260,6 +1261,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_MD5, .hw_op_hashsz = MD5_DIGEST_SIZE, .digest_size = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), .block_size = MD5_HMAC_BLOCK_SIZE }, { .name = "sha1", .hash_zero = sha1_zero_message_hash, @@ -1268,6 +1270,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA1, .hw_op_hashsz = SHA1_DIGEST_SIZE, .digest_size = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), .block_size = SHA1_BLOCK_SIZE }, { .name = "sha256", .hash_zero = sha256_zero_message_hash, @@ -1276,6 +1279,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_HMAC_SHA256, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA256_BLOCK_SIZE }, { .name = "sha224", .hash_zero = sha224_zero_message_hash, @@ -1284,6 +1288,7 @@ static const struct n2_hash_tmpl hash_tmpls[] = { .hmac_type = AUTH_TYPE_RESERVED, .hw_op_hashsz = SHA256_DIGEST_SIZE, .digest_size = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), .block_size = SHA224_BLOCK_SIZE }, }; #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) @@ -1424,6 +1429,7 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) halg = &ahash->halg; halg->digestsize = tmpl->digest_size; + halg->statesize = tmpl->statesize; base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 655a7f5a406a17d80ebb3103acdd0650a111fdec..cbeda59c6b1911e7cd17e7eb180770b32936fc1f 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -2114,7 +2114,7 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "failed to get sync: %d\n", err); goto err_pm; diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index 2f212561acc47b8c95e52c076ae7cc92980e06c0..670a58b25cb166a9479824af9dbe5462332482e4 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -261,6 +261,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); if (!hw_data->accel_capabilities_mask) { dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; goto out_err; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index 35d73061d1569b292de14d6a206d832f800e8007..14a0aef18ab1367b672c2a2df7110f34068c291b 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -65,186 +65,24 @@ static void rk_crypto_disable_clk(struct rk_crypto_info *dev) clk_disable_unprepare(dev->sclk); } -static int check_alignment(struct scatterlist *sg_src, - struct scatterlist *sg_dst, - int align_mask) -{ - int in, out, align; - - in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && - IS_ALIGNED((uint32_t)sg_src->length, align_mask); - if (!sg_dst) - return in; - out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && - IS_ALIGNED((uint32_t)sg_dst->length, align_mask); - align = in && out; - - return (align && (sg_src->length == sg_dst->length)); -} - -static int rk_load_data(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst) -{ - unsigned int count; - - dev->aligned = dev->aligned ? - check_alignment(sg_src, sg_dst, dev->align_size) : - dev->aligned; - if (dev->aligned) { - count = min(dev->left_bytes, sg_src->length); - dev->left_bytes -= count; - - if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", - __func__, __LINE__); - return -EINVAL; - } - dev->addr_in = sg_dma_address(sg_src); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(dst) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, sg_src, 1, - DMA_TO_DEVICE); - return -EINVAL; - } - dev->addr_out = sg_dma_address(sg_dst); - } - } else { - count = (dev->left_bytes > PAGE_SIZE) ? - PAGE_SIZE : dev->left_bytes; - - if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, - dev->addr_vir, count, - dev->total - dev->left_bytes)) { - dev_err(dev->dev, "[%s:%d] pcopy err\n", - __func__, __LINE__); - return -EINVAL; - } - dev->left_bytes -= count; - sg_init_one(&dev->sg_tmp, dev->addr_vir, count); - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { - dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - return -ENOMEM; - } - dev->addr_in = sg_dma_address(&dev->sg_tmp); - - if (sg_dst) { - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, - DMA_FROM_DEVICE)) { - dev_err(dev->dev, - "[%s:%d] dma_map_sg(sg_tmp) error\n", - __func__, __LINE__); - dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, - DMA_TO_DEVICE); - return -ENOMEM; - } - dev->addr_out = sg_dma_address(&dev->sg_tmp); - } - } - dev->count = count; - return 0; -} - -static void rk_unload_data(struct rk_crypto_info *dev) -{ - struct scatterlist *sg_in, *sg_out; - - sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); - - if (dev->sg_dst) { - sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; - dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); - } -} - static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; - spin_lock(&dev->lock); interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); + dev->status = 1; if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); - dev->err = -EFAULT; + dev->status = 0; } - tasklet_schedule(&dev->done_task); + complete(&dev->complete); - spin_unlock(&dev->lock); return IRQ_HANDLED; } -static int rk_crypto_enqueue(struct rk_crypto_info *dev, - struct crypto_async_request *async_req) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&dev->lock, flags); - ret = crypto_enqueue_request(&dev->queue, async_req); - if (dev->busy) { - spin_unlock_irqrestore(&dev->lock, flags); - return ret; - } - dev->busy = true; - spin_unlock_irqrestore(&dev->lock, flags); - tasklet_schedule(&dev->queue_task); - - return ret; -} - -static void rk_crypto_queue_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - struct crypto_async_request *async_req, *backlog; - unsigned long flags; - int err = 0; - - dev->err = 0; - spin_lock_irqsave(&dev->lock, flags); - backlog = crypto_get_backlog(&dev->queue); - async_req = crypto_dequeue_request(&dev->queue); - - if (!async_req) { - dev->busy = false; - spin_unlock_irqrestore(&dev->lock, flags); - return; - } - spin_unlock_irqrestore(&dev->lock, flags); - - if (backlog) { - backlog->complete(backlog, -EINPROGRESS); - backlog = NULL; - } - - dev->async_req = async_req; - err = dev->start(dev); - if (err) - dev->complete(dev->async_req, err); -} - -static void rk_crypto_done_task_cb(unsigned long data) -{ - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; - - if (dev->err) { - dev->complete(dev->async_req, dev->err); - return; - } - - dev->err = dev->update(dev); - if (dev->err) - dev->complete(dev->async_req, dev->err); -} - static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ecb_aes_alg, &rk_cbc_aes_alg, @@ -337,8 +175,6 @@ static int rk_crypto_probe(struct platform_device *pdev) if (err) goto err_crypto; - spin_lock_init(&crypto_info->lock); - crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); @@ -389,18 +225,11 @@ static int rk_crypto_probe(struct platform_device *pdev) crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); - tasklet_init(&crypto_info->queue_task, - rk_crypto_queue_task_cb, (unsigned long)crypto_info); - tasklet_init(&crypto_info->done_task, - rk_crypto_done_task_cb, (unsigned long)crypto_info); - crypto_init_queue(&crypto_info->queue, 50); + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); + crypto_engine_start(crypto_info->engine); + init_completion(&crypto_info->complete); - crypto_info->enable_clk = rk_crypto_enable_clk; - crypto_info->disable_clk = rk_crypto_disable_clk; - crypto_info->load_data = rk_load_data; - crypto_info->unload_data = rk_unload_data; - crypto_info->enqueue = rk_crypto_enqueue; - crypto_info->busy = false; + rk_crypto_enable_clk(crypto_info); err = rk_crypto_register(crypto_info); if (err) { @@ -412,9 +241,9 @@ static int rk_crypto_probe(struct platform_device *pdev) return 0; err_register_alg: - tasklet_kill(&crypto_info->queue_task); - tasklet_kill(&crypto_info->done_task); + crypto_engine_exit(crypto_info->engine); err_crypto: + dev_err(dev, "Crypto Accelerator not successfully registered\n"); return err; } @@ -423,8 +252,8 @@ static int rk_crypto_remove(struct platform_device *pdev) struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); rk_crypto_unregister(); - tasklet_kill(&crypto_tmp->done_task); - tasklet_kill(&crypto_tmp->queue_task); + rk_crypto_disable_clk(crypto_tmp); + crypto_engine_exit(crypto_tmp->engine); return 0; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index 97278c2574ff93a93c2c7f01467a6a1895f14264..045e811b4af8484871ec186ba98758277e08e409 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -193,45 +195,15 @@ struct rk_crypto_info { struct reset_control *rst; void __iomem *reg; int irq; - struct crypto_queue queue; - struct tasklet_struct queue_task; - struct tasklet_struct done_task; - struct crypto_async_request *async_req; - int err; - /* device lock */ - spinlock_t lock; - - /* the public variable */ - struct scatterlist *sg_src; - struct scatterlist *sg_dst; - struct scatterlist sg_tmp; - struct scatterlist *first; - unsigned int left_bytes; - void *addr_vir; - int aligned; - int align_size; - size_t src_nents; - size_t dst_nents; - unsigned int total; - unsigned int count; - dma_addr_t addr_in; - dma_addr_t addr_out; - bool busy; - int (*start)(struct rk_crypto_info *dev); - int (*update)(struct rk_crypto_info *dev); - void (*complete)(struct crypto_async_request *base, int err); - int (*enable_clk)(struct rk_crypto_info *dev); - void (*disable_clk)(struct rk_crypto_info *dev); - int (*load_data)(struct rk_crypto_info *dev, - struct scatterlist *sg_src, - struct scatterlist *sg_dst); - void (*unload_data)(struct rk_crypto_info *dev); - int (*enqueue)(struct rk_crypto_info *dev, - struct crypto_async_request *async_req); + + struct crypto_engine *engine; + struct completion complete; + int status; }; /* the private variable of hash */ struct rk_ahash_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; /* for fallback */ struct crypto_ahash *fallback_tfm; @@ -241,14 +213,23 @@ struct rk_ahash_ctx { struct rk_ahash_rctx { struct ahash_request fallback_req; u32 mode; + int nrsg; }; /* the private variable of cipher */ struct rk_cipher_ctx { + struct crypto_engine_ctx enginectx; struct rk_crypto_info *dev; unsigned int keylen; - u32 mode; + u8 key[AES_MAX_KEY_SIZE]; u8 iv[AES_BLOCK_SIZE]; + struct crypto_skcipher *fallback_tfm; +}; + +struct rk_cipher_rctx { + u8 backup_iv[AES_BLOCK_SIZE]; + u32 mode; + struct skcipher_request fallback_req; // keep at the end }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index ed03058497bc2863e1c6333fb42f025efd85291b..edd40e16a3f0a765a78b8662a8a9166932097940 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -9,6 +9,7 @@ * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" /* @@ -16,6 +17,40 @@ * so we put the fixed hash out when met zero message. */ +static bool rk_ahash_need_fallback(struct ahash_request *req) +{ + struct scatterlist *sg; + + sg = req->src; + while (sg) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + return true; + } + if (sg->length % 4) { + return true; + } + sg = sg_next(sg); + } + return false; +} + +static int rk_ahash_digest_fb(struct ahash_request *areq) +{ + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); + + ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); + rctx->fallback_req.base.flags = areq->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP; + + rctx->fallback_req.nbytes = areq->nbytes; + rctx->fallback_req.src = areq->src; + rctx->fallback_req.result = areq->result; + + return crypto_ahash_digest(&rctx->fallback_req); +} + static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -38,16 +73,12 @@ static int zero_message_process(struct ahash_request *req) return 0; } -static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err) -{ - if (base->complete) - base->complete(base, err); -} - -static void rk_ahash_reg_init(struct rk_crypto_info *dev) +static void rk_ahash_reg_init(struct ahash_request *req) { - struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct rk_crypto_info *dev = tctx->dev; int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | @@ -74,7 +105,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) RK_CRYPTO_BYTESWAP_BRFIFO | RK_CRYPTO_BYTESWAP_BTFIFO); - CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total); + CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); } static int rk_ahash_init(struct ahash_request *req) @@ -167,48 +198,64 @@ static int rk_ahash_digest(struct ahash_request *req) struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct rk_crypto_info *dev = tctx->dev; + if (rk_ahash_need_fallback(req)) + return rk_ahash_digest_fb(req); + if (!req->nbytes) return zero_message_process(req); - else - return dev->enqueue(dev, &req->base); + + return crypto_transfer_hash_request_to_engine(dev->engine, req); } -static void crypto_ahash_dma_start(struct rk_crypto_info *dev) +static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) { - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); + CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | (RK_CRYPTO_HASH_START << 16)); } -static int rk_ahash_set_data_start(struct rk_crypto_info *dev) +static int rk_hash_prepare(struct crypto_engine *engine, void *breq) +{ + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + int ret; + + ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + if (ret <= 0) + return -EINVAL; + + rctx->nrsg = ret; + + return 0; +} + +static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) { - int err; + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); - err = dev->load_data(dev, dev->sg_src, NULL); - if (!err) - crypto_ahash_dma_start(dev); - return err; + dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); + return 0; } -static int rk_ahash_start(struct rk_crypto_info *dev) +static int rk_hash_run(struct crypto_engine *engine, void *breq) { - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - struct rk_ahash_rctx *rctx; - - dev->total = req->nbytes; - dev->left_bytes = req->nbytes; - dev->aligned = 0; - dev->align_size = 4; - dev->sg_dst = NULL; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - rctx = ahash_request_ctx(req); + struct ahash_request *areq = container_of(breq, struct ahash_request, base); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); + struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); + struct scatterlist *sg = areq->src; + int err = 0; + int i; + u32 v; + rctx->mode = 0; - tfm = crypto_ahash_reqtfm(req); switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA1; @@ -220,32 +267,26 @@ static int rk_ahash_start(struct rk_crypto_info *dev) rctx->mode = RK_CRYPTO_HASH_MD5; break; default: - return -EINVAL; + err = -EINVAL; + goto theend; } - rk_ahash_reg_init(dev); - return rk_ahash_set_data_start(dev); -} - -static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct ahash_request *req = ahash_request_cast(dev->async_req); - struct crypto_ahash *tfm; - - dev->unload_data(dev); - if (dev->left_bytes) { - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_warn(dev->dev, "[%s:%d], Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); + rk_ahash_reg_init(areq); + + while (sg) { + reinit_completion(&tctx->dev->complete); + tctx->dev->status = 0; + crypto_ahash_dma_start(tctx->dev, sg); + wait_for_completion_interruptible_timeout(&tctx->dev->complete, + msecs_to_jiffies(2000)); + if (!tctx->dev->status) { + dev_err(tctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; } - err = rk_ahash_set_data_start(dev); - } else { + sg = sg_next(sg); + } + /* * it will take some time to process date after last dma * transmission. @@ -256,18 +297,20 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev) * efficiency, and make it response quickly when dma * complete. */ - while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS)) - udelay(10); - - tfm = crypto_ahash_reqtfm(req); - memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0, - crypto_ahash_digestsize(tfm)); - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS)) + udelay(10); + + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { + v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); + put_unaligned_le32(v, areq->result + i * 4); } -out_rx: - return err; +theend: + local_bh_disable(); + crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); + + return 0; } static int rk_cra_hash_init(struct crypto_tfm *tfm) @@ -281,14 +324,6 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) algt = container_of(alg, struct rk_crypto_tmp, alg.hash); tctx->dev = algt->dev; - tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL); - if (!tctx->dev->addr_vir) { - dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n"); - return -ENOMEM; - } - tctx->dev->start = rk_ahash_start; - tctx->dev->update = rk_ahash_crypto_rx; - tctx->dev->complete = rk_ahash_crypto_complete; /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, @@ -297,19 +332,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm) dev_err(tctx->dev->dev, "Could not load fallback driver.\n"); return PTR_ERR(tctx->fallback_tfm); } + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); - return tctx->dev->enable_clk(tctx->dev); + tctx->enginectx.op.do_one_request = rk_hash_run; + tctx->enginectx.op.prepare_request = rk_hash_prepare; + tctx->enginectx.op.unprepare_request = rk_hash_unprepare; + + return 0; } static void rk_cra_hash_exit(struct crypto_tfm *tfm) { struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm); - free_page((unsigned long)tctx->dev->addr_vir); - return tctx->dev->disable_clk(tctx->dev); + crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 5bbf0d2722e11cffef500ad42a27cb7d5e65980a..67a7e05d5ae31ef5bcb70eec2e1ceb449f9978dc 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -9,23 +9,77 @@ * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include +#include #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) -static void rk_crypto_complete(struct crypto_async_request *base, int err) +static int rk_cipher_need_fallback(struct skcipher_request *req) { - if (base->complete) - base->complete(base, err); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + unsigned int bs = crypto_skcipher_blocksize(tfm); + struct scatterlist *sgs, *sgd; + unsigned int stodo, dtodo, len; + + if (!req->cryptlen) + return true; + + len = req->cryptlen; + sgs = req->src; + sgd = req->dst; + while (sgs && sgd) { + if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { + return true; + } + if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { + return true; + } + stodo = min(len, sgs->length); + if (stodo % bs) { + return true; + } + dtodo = min(len, sgd->length); + if (dtodo % bs) { + return true; + } + if (stodo != dtodo) { + return true; + } + len -= stodo; + sgs = sg_next(sgs); + sgd = sg_next(sgd); + } + return false; +} + +static int rk_cipher_fallback(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + int err; + + skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + if (rctx->mode & RK_CRYPTO_DEC) + err = crypto_skcipher_decrypt(&rctx->fallback_req); + else + err = crypto_skcipher_encrypt(&rctx->fallback_req); + return err; } static int rk_handle_req(struct rk_crypto_info *dev, struct skcipher_request *req) { - if (!IS_ALIGNED(req->cryptlen, dev->align_size)) - return -EINVAL; - else - return dev->enqueue(dev, &req->base); + struct crypto_engine *engine = dev->engine; + + if (rk_cipher_need_fallback(req)) + return rk_cipher_fallback(req); + + return crypto_transfer_skcipher_request_to_engine(engine, req); } static int rk_aes_setkey(struct crypto_skcipher *cipher, @@ -38,8 +92,9 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher, keylen != AES_KEYSIZE_256) return -EINVAL; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_des_setkey(struct crypto_skcipher *cipher, @@ -53,8 +108,9 @@ static int rk_des_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_tdes_setkey(struct crypto_skcipher *cipher, @@ -68,17 +124,19 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher, return err; ctx->keylen = keylen; - memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen); - return 0; + memcpy(ctx->key, key, keylen); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_aes_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE; + rctx->mode = RK_CRYPTO_AES_ECB_MODE; return rk_handle_req(dev, req); } @@ -86,9 +144,10 @@ static int rk_aes_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -96,9 +155,10 @@ static int rk_aes_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE; + rctx->mode = RK_CRYPTO_AES_CBC_MODE; return rk_handle_req(dev, req); } @@ -106,9 +166,10 @@ static int rk_aes_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -116,9 +177,10 @@ static int rk_des_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = 0; + rctx->mode = 0; return rk_handle_req(dev, req); } @@ -126,9 +188,10 @@ static int rk_des_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -136,9 +199,10 @@ static int rk_des_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -146,9 +210,10 @@ static int rk_des_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -156,9 +221,10 @@ static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT; + rctx->mode = RK_CRYPTO_TDES_SELECT; return rk_handle_req(dev, req); } @@ -166,9 +232,10 @@ static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } @@ -176,9 +243,10 @@ static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_handle_req(dev, req); } @@ -186,43 +254,42 @@ static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *dev = ctx->dev; - ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | + rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_handle_req(dev, req); } -static void rk_ablk_hw_init(struct rk_crypto_info *dev) +static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) { - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); - u32 ivsize, block, conf_reg = 0; + u32 block, conf_reg = 0; block = crypto_tfm_alg_blocksize(tfm); - ivsize = crypto_skcipher_ivsize(cipher); if (block == DES_BLOCK_SIZE) { - ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; - CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize); + CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); conf_reg = RK_CRYPTO_DESSEL; } else { - ctx->mode |= RK_CRYPTO_AES_FIFO_MODE | + rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) - ctx->mode |= RK_CRYPTO_AES_192BIT_key; + rctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) - ctx->mode |= RK_CRYPTO_AES_256BIT_key; - CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode); - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize); + rctx->mode |= RK_CRYPTO_AES_256BIT_key; + CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; @@ -231,146 +298,138 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev) RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); } -static void crypto_dma_start(struct rk_crypto_info *dev) +static void crypto_dma_start(struct rk_crypto_info *dev, + struct scatterlist *sgs, + struct scatterlist *sgd, unsigned int todo) { - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in); - CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4); - CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); + CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); + CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | _SBF(RK_CRYPTO_BLOCK_START, 16)); } -static int rk_set_data_start(struct rk_crypto_info *dev) +static int rk_cipher_run(struct crypto_engine *engine, void *async_req) { - int err; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + - dev->sg_src->offset + dev->sg_src->length - ivsize; - - /* Store the iv that need to be updated in chain mode. - * And update the IV buffer to contain the next IV for decryption mode. - */ - if (ctx->mode & RK_CRYPTO_DEC) { - memcpy(ctx->iv, src_last_blk, ivsize); - sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv, - ivsize, dev->total - ivsize); - } - - err = dev->load_data(dev, dev->sg_src, dev->sg_dst); - if (!err) - crypto_dma_start(dev); - return err; -} - -static int rk_ablk_start(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - unsigned long flags; + struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sgs, *sgd; int err = 0; + int ivsize = crypto_skcipher_ivsize(tfm); + int offset; + u8 iv[AES_BLOCK_SIZE]; + u8 biv[AES_BLOCK_SIZE]; + u8 *ivtouse = areq->iv; + unsigned int len = areq->cryptlen; + unsigned int todo; + + ivsize = crypto_skcipher_ivsize(tfm); + if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { + if (rctx->mode & RK_CRYPTO_DEC) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + offset, ivsize, 0); + } + } - dev->left_bytes = req->cryptlen; - dev->total = req->cryptlen; - dev->sg_src = req->src; - dev->first = req->src; - dev->src_nents = sg_nents(req->src); - dev->sg_dst = req->dst; - dev->dst_nents = sg_nents(req->dst); - dev->aligned = 1; - - spin_lock_irqsave(&dev->lock, flags); - rk_ablk_hw_init(dev); - err = rk_set_data_start(dev); - spin_unlock_irqrestore(&dev->lock, flags); - return err; -} + sgs = areq->src; + sgd = areq->dst; -static void rk_iv_copyback(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - - /* Update the IV buffer to contain the next IV for encryption mode. */ - if (!(ctx->mode & RK_CRYPTO_DEC)) { - if (dev->aligned) { - memcpy(req->iv, sg_virt(dev->sg_dst) + - dev->sg_dst->length - ivsize, ivsize); + while (sgs && sgd && len) { + if (!sgs->length) { + sgs = sg_next(sgs); + sgd = sg_next(sgd); + continue; + } + if (rctx->mode & RK_CRYPTO_DEC) { + /* we backup last block of source to be used as IV at next step */ + offset = sgs->length - ivsize; + scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); + } + if (sgs == sgd) { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + } else { + err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_iv; + } + err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + if (err <= 0) { + err = -EINVAL; + goto theend_sgs; + } + } + err = 0; + rk_ablk_hw_init(ctx->dev, areq); + if (ivsize) { + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); + else + memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); + } + reinit_completion(&ctx->dev->complete); + ctx->dev->status = 0; + + todo = min(sg_dma_len(sgs), len); + len -= todo; + crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); + wait_for_completion_interruptible_timeout(&ctx->dev->complete, + msecs_to_jiffies(2000)); + if (!ctx->dev->status) { + dev_err(ctx->dev->dev, "DMA timeout\n"); + err = -EFAULT; + goto theend; + } + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); + } + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(iv, biv, ivsize); + ivtouse = iv; } else { - memcpy(req->iv, dev->addr_vir + - dev->count - ivsize, ivsize); + offset = sgd->length - ivsize; + scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); + ivtouse = iv; } + sgs = sg_next(sgs); + sgd = sg_next(sgd); } -} - -static void rk_update_iv(struct rk_crypto_info *dev) -{ - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - u32 ivsize = crypto_skcipher_ivsize(tfm); - u8 *new_iv = NULL; - if (ctx->mode & RK_CRYPTO_DEC) { - new_iv = ctx->iv; - } else { - new_iv = page_address(sg_page(dev->sg_dst)) + - dev->sg_dst->offset + dev->sg_dst->length - ivsize; + if (areq->iv && ivsize > 0) { + offset = areq->cryptlen - ivsize; + if (rctx->mode & RK_CRYPTO_DEC) { + memcpy(areq->iv, rctx->backup_iv, ivsize); + memzero_explicit(rctx->backup_iv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); + } } - if (ivsize == DES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); - else if (ivsize == AES_BLOCK_SIZE) - memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); -} +theend: + local_bh_disable(); + crypto_finalize_skcipher_request(engine, areq, err); + local_bh_enable(); + return 0; -/* return: - * true some err was occurred - * fault no err, continue - */ -static int rk_ablk_rx(struct rk_crypto_info *dev) -{ - int err = 0; - struct skcipher_request *req = - skcipher_request_cast(dev->async_req); - - dev->unload_data(dev); - if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, - dev->addr_vir, dev->count, - dev->total - dev->left_bytes - - dev->count)) { - err = -EINVAL; - goto out_rx; - } - } - if (dev->left_bytes) { - rk_update_iv(dev); - if (dev->aligned) { - if (sg_is_last(dev->sg_src)) { - dev_err(dev->dev, "[%s:%d] Lack of data\n", - __func__, __LINE__); - err = -ENOMEM; - goto out_rx; - } - dev->sg_src = sg_next(dev->sg_src); - dev->sg_dst = sg_next(dev->sg_dst); - } - err = rk_set_data_start(dev); +theend_sgs: + if (sgs == sgd) { + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); } else { - rk_iv_copyback(dev); - /* here show the calculation is over without any err */ - dev->complete(dev->async_req, 0); - tasklet_schedule(&dev->queue_task); + dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); + dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); } -out_rx: +theend_iv: return err; } @@ -378,26 +437,34 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); struct rk_crypto_tmp *algt; algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); ctx->dev = algt->dev; - ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1; - ctx->dev->start = rk_ablk_start; - ctx->dev->update = rk_ablk_rx; - ctx->dev->complete = rk_crypto_complete; - ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL); - return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM; + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + tfm->reqsize = sizeof(struct rk_cipher_rctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm); + + ctx->enginectx.op.do_one_request = rk_cipher_run; + + return 0; } static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); - free_page((unsigned long)ctx->dev->addr_vir); - ctx->dev->disable_clk(ctx->dev); + memzero_explicit(ctx->key, ctx->keylen); + crypto_free_skcipher(ctx->fallback_tfm); } struct rk_crypto_tmp rk_ecb_aes_alg = { @@ -406,7 +473,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -428,7 +495,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, @@ -451,7 +518,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -473,7 +540,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -496,7 +563,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, @@ -518,7 +585,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, - .base.cra_flags = CRYPTO_ALG_ASYNC, + .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index e553ccadbcbc859a38ce112ca6ef8df2407f10a6..e5876286828b8e53e0dba6fa705090b299dffc66 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session( pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", ctrl_status->status, destroy_session->session_id); - return -EINVAL; + err = -EINVAL; + goto out; } err = 0; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index f9ae5ad284ffb0c8ac3e16a84d8896052006dc51..c4f32c32dfd5084ad462d8909407cd105c147b7c 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1226,7 +1226,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled_target; struct cxl_memdev *cxlmd_target; - cxled_target = p->targets[pos]; + cxled_target = p->targets[i]; if (!cxled_target) continue; @@ -1923,6 +1923,9 @@ static int cxl_region_probe(struct device *dev) */ up_read(&cxl_region_rwsem); + if (rc) + return rc; + switch (cxlr->mode) { case CXL_DECODER_PMEM: return devm_cxl_add_pmem_region(cxlr); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 63347a5ae5999af2053b0802d71dcae1c551d0e2..8c5f6f7fca112266ab59abaf9dba090fd4d0052a 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -776,8 +776,7 @@ static void remove_sysfs_files(struct devfreq *devfreq, * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -1011,8 +1010,7 @@ static void devm_devfreq_dev_release(struct device *dev, void *res) * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. - * @data: private data for the governor. The devfreq framework does not - * touch this value. + * @data: devfreq driver pass to governors, governor should not change it. * * This function manages automatically the memory of devfreq device using device * resource management and simplify the free operation for memory of devfreq diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index ab9db7adb3adec069134f5abe8d20a04325f48e7..d69672ccacc49b142a337dc5048759aba0f8cc89 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -21,7 +21,7 @@ struct userspace_data { static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq) { - struct userspace_data *data = df->data; + struct userspace_data *data = df->governor_data; if (data->valid) *freq = data->user_frequency; @@ -40,7 +40,7 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; @@ -60,7 +60,7 @@ static ssize_t set_freq_show(struct device *dev, int err = 0; mutex_lock(&devfreq->lock); - data = devfreq->data; + data = devfreq->governor_data; if (data->valid) err = sprintf(buf, "%lu\n", data->user_frequency); @@ -91,7 +91,7 @@ static int userspace_init(struct devfreq *devfreq) goto out; } data->valid = false; - devfreq->data = data; + devfreq->governor_data = data; err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); out: @@ -107,8 +107,8 @@ static void userspace_exit(struct devfreq *devfreq) if (devfreq->dev.kobj.sd) sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); - kfree(devfreq->data); - devfreq->data = NULL; + kfree(devfreq->governor_data); + devfreq->governor_data = NULL; } static int devfreq_userspace_handler(struct devfreq *devfreq, diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 0e5a5662d5a40c21d1aa2c9e38eb200af70324e1..0a051d65688002cce5f80b52a9d3c6feaba0a644 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -109,6 +109,12 @@ static char dio_no_name[] = { 0 }; #endif /* CONFIG_DIO_CONSTANTS */ +static void dio_dev_release(struct device *dev) +{ + struct dio_dev *ddev = container_of(dev, typeof(struct dio_dev), dev); + kfree(ddev); +} + int __init dio_find(int deviceid) { /* Called to find a DIO device before the full bus scan has run. @@ -225,6 +231,7 @@ static int __init dio_init(void) dev->bus = &dio_bus; dev->dev.parent = &dio_bus.dev; dev->dev.bus = &dio_bus_type; + dev->dev.release = dio_dev_release; dev->scode = scode; dev->resource.start = pa; dev->resource.end = pa + DIO_SIZE(scode, va); @@ -252,6 +259,7 @@ static int __init dio_init(void) if (error) { pr_err("DIO: Error registering device %s\n", dev->name); + put_device(&dev->dev); continue; } error = dio_create_sysfs_dev_files(dev); diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index a2cc520225d32850db821ff0a8b7633a86343ddb..90f28bda29c8bd41e19b340e779c351634d29283 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -21,6 +21,12 @@ #define NCHANNELS_MAX 64 #define IRQ_NOUTPUTS 4 +/* + * For allocation purposes we split the cache + * memory into blocks of fixed size (given in bytes). + */ +#define SRAM_BLOCK 2048 + #define RING_WRITE_SLOT GENMASK(1, 0) #define RING_READ_SLOT GENMASK(5, 4) #define RING_FULL BIT(9) @@ -36,6 +42,9 @@ #define REG_TX_STOP 0x0004 #define REG_RX_START 0x0008 #define REG_RX_STOP 0x000c +#define REG_IMPRINT 0x0090 +#define REG_TX_SRAM_SIZE 0x0094 +#define REG_RX_SRAM_SIZE 0x0098 #define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200) #define REG_CHAN_CTL_RST_RINGS BIT(0) @@ -53,7 +62,9 @@ #define BUS_WIDTH_FRAME_2_WORDS 0x10 #define BUS_WIDTH_FRAME_4_WORDS 0x20 -#define CHAN_BUFSIZE 0x8000 +#define REG_CHAN_SRAM_CARVEOUT(ch) (0x8050 + (ch) * 0x200) +#define CHAN_SRAM_CARVEOUT_SIZE GENMASK(31, 16) +#define CHAN_SRAM_CARVEOUT_BASE GENMASK(15, 0) #define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200) #define CHAN_FIFOCTL_LIMIT GENMASK(31, 16) @@ -76,6 +87,8 @@ struct admac_chan { struct dma_chan chan; struct tasklet_struct tasklet; + u32 carveout; + spinlock_t lock; struct admac_tx *current_tx; int nperiod_acks; @@ -92,12 +105,24 @@ struct admac_chan { struct list_head to_free; }; +struct admac_sram { + u32 size; + /* + * SRAM_CARVEOUT has 16-bit fields, so the SRAM cannot be larger than + * 64K and a 32-bit bitfield over 2K blocks covers it. + */ + u32 allocated; +}; + struct admac_data { struct dma_device dma; struct device *dev; __iomem void *base; struct reset_control *rstc; + struct mutex cache_alloc_lock; + struct admac_sram txcache, rxcache; + int irq; int irq_index; int nchannels; @@ -118,6 +143,60 @@ struct admac_tx { struct list_head node; }; +static int admac_alloc_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 *out) +{ + struct admac_sram *sram; + int i, ret = 0, nblocks; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + mutex_lock(&ad->cache_alloc_lock); + + nblocks = sram->size / SRAM_BLOCK; + for (i = 0; i < nblocks; i++) + if (!(sram->allocated & BIT(i))) + break; + + if (i < nblocks) { + *out = FIELD_PREP(CHAN_SRAM_CARVEOUT_BASE, i * SRAM_BLOCK) | + FIELD_PREP(CHAN_SRAM_CARVEOUT_SIZE, SRAM_BLOCK); + sram->allocated |= BIT(i); + } else { + ret = -EBUSY; + } + + mutex_unlock(&ad->cache_alloc_lock); + + return ret; +} + +static void admac_free_sram_carveout(struct admac_data *ad, + enum dma_transfer_direction dir, + u32 carveout) +{ + struct admac_sram *sram; + u32 base = FIELD_GET(CHAN_SRAM_CARVEOUT_BASE, carveout); + int i; + + if (dir == DMA_MEM_TO_DEV) + sram = &ad->txcache; + else + sram = &ad->rxcache; + + if (WARN_ON(base >= sram->size)) + return; + + mutex_lock(&ad->cache_alloc_lock); + i = base / SRAM_BLOCK; + sram->allocated &= ~BIT(i); + mutex_unlock(&ad->cache_alloc_lock); +} + static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val) { void __iomem *addr = ad->base + reg; @@ -466,15 +545,28 @@ static void admac_synchronize(struct dma_chan *chan) static int admac_alloc_chan_resources(struct dma_chan *chan) { struct admac_chan *adchan = to_admac_chan(chan); + struct admac_data *ad = adchan->host; + int ret; dma_cookie_init(&adchan->chan); + ret = admac_alloc_sram_carveout(ad, admac_chan_direction(adchan->no), + &adchan->carveout); + if (ret < 0) + return ret; + + writel_relaxed(adchan->carveout, + ad->base + REG_CHAN_SRAM_CARVEOUT(adchan->no)); return 0; } static void admac_free_chan_resources(struct dma_chan *chan) { + struct admac_chan *adchan = to_admac_chan(chan); + admac_terminate_all(chan); admac_synchronize(chan); + admac_free_sram_carveout(adchan->host, admac_chan_direction(adchan->no), + adchan->carveout); } static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, @@ -712,6 +804,7 @@ static int admac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ad); ad->dev = &pdev->dev; ad->nchannels = nchannels; + mutex_init(&ad->cache_alloc_lock); /* * The controller has 4 IRQ outputs. Try them all until @@ -801,6 +894,13 @@ static int admac_probe(struct platform_device *pdev) goto free_irq; } + ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); + ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); + + dev_info(&pdev->dev, "Audio DMA Controller\n"); + dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n", + readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size); + return 0; free_irq: diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 7269bd54554f60cb5b8915c9a1ed7f2a36ba66bf..3229dfc78650784733a261fa539e6de64bf1979d 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -528,6 +528,22 @@ static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr, !idxd->hw.group_cap.progress_limit; } +static bool idxd_group_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_group_use_token_limit.attr || + attr == &dev_attr_group_use_read_buffer_limit.attr || + attr == &dev_attr_group_tokens_allowed.attr || + attr == &dev_attr_group_read_buffers_allowed.attr || + attr == &dev_attr_group_tokens_reserved.attr || + attr == &dev_attr_group_read_buffers_reserved.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_group_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -538,6 +554,9 @@ static umode_t idxd_group_attr_visible(struct kobject *kobj, if (idxd_group_attr_progress_limit_invisible(attr, idxd)) return 0; + if (idxd_group_attr_read_buffers_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1233,6 +1252,14 @@ static bool idxd_wq_attr_op_config_invisible(struct attribute *attr, !idxd->hw.wq_cap.op_config; } +static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_wq_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + static umode_t idxd_wq_attr_visible(struct kobject *kobj, struct attribute *attr, int n) { @@ -1243,6 +1270,9 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj, if (idxd_wq_attr_op_config_invisible(attr, idxd)) return 0; + if (idxd_wq_attr_max_batch_size_invisible(attr, idxd)) + return 0; + return attr->mode; } @@ -1533,6 +1563,43 @@ static ssize_t cmd_status_store(struct device *dev, struct device_attribute *att } static DEVICE_ATTR_RW(cmd_status); +static bool idxd_device_attr_max_batch_size_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* Intel IAA does not support batch processing, make it invisible */ + return attr == &dev_attr_max_batch_size.attr && + idxd->data->type == IDXD_TYPE_IAX; +} + +static bool idxd_device_attr_read_buffers_invisible(struct attribute *attr, + struct idxd_device *idxd) +{ + /* + * Intel IAA does not support Read Buffer allocation control, + * make these attributes invisible. + */ + return (attr == &dev_attr_max_tokens.attr || + attr == &dev_attr_max_read_buffers.attr || + attr == &dev_attr_token_limit.attr || + attr == &dev_attr_read_buffer_limit.attr) && + idxd->data->type == IDXD_TYPE_IAX; +} + +static umode_t idxd_device_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd_device_attr_max_batch_size_invisible(attr, idxd)) + return 0; + + if (idxd_device_attr_read_buffers_invisible(attr, idxd)) + return 0; + + return attr->mode; +} + static struct attribute *idxd_device_attributes[] = { &dev_attr_version.attr, &dev_attr_max_groups.attr, @@ -1560,6 +1627,7 @@ static struct attribute *idxd_device_attributes[] = { static const struct attribute_group idxd_device_attribute_group = { .attrs = idxd_device_attributes, + .is_visible = idxd_device_attr_visible, }; static const struct attribute_group *idxd_attribute_groups[] = { diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 0a638c97702a5b2c4dfffd8df944e679d215496d..15f63452a9bec807df36b09c2773e74998127da8 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -298,6 +298,14 @@ DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 6); DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 7); +DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 8); +DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 9); +DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 10); +DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 11); /* Total possible dynamic DIMM Label attribute file table */ static struct attribute *dynamic_csrow_dimm_attr[] = { @@ -309,6 +317,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { &dev_attr_legacy_ch5_dimm_label.attr.attr, &dev_attr_legacy_ch6_dimm_label.attr.attr, &dev_attr_legacy_ch7_dimm_label.attr.attr, + &dev_attr_legacy_ch8_dimm_label.attr.attr, + &dev_attr_legacy_ch9_dimm_label.attr.attr, + &dev_attr_legacy_ch10_dimm_label.attr.attr, + &dev_attr_legacy_ch11_dimm_label.attr.attr, NULL }; @@ -329,6 +341,14 @@ DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, channel_ce_count_show, NULL, 6); DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, channel_ce_count_show, NULL, 7); +DEVICE_CHANNEL(ch8_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 8); +DEVICE_CHANNEL(ch9_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 9); +DEVICE_CHANNEL(ch10_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 10); +DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 11); /* Total possible dynamic ce_count attribute file table */ static struct attribute *dynamic_csrow_ce_count_attr[] = { @@ -340,6 +360,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { &dev_attr_legacy_ch5_ce_count.attr.attr, &dev_attr_legacy_ch6_ce_count.attr.attr, &dev_attr_legacy_ch7_ce_count.attr.attr, + &dev_attr_legacy_ch8_ce_count.attr.attr, + &dev_attr_legacy_ch9_ce_count.attr.attr, + &dev_attr_legacy_ch10_ce_count.attr.attr, + &dev_attr_legacy_ch11_ce_count.attr.attr, NULL }; diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index a22ea053f8e1cdbd92d507128258edc450fcb409..8af4d2523194a760f4fc95acebaf2a73134c2b02 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -304,11 +304,10 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, if (unlikely(pci_enable_device(pdev) < 0)) { edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", bus, dev, fun); + pci_dev_put(pdev); return NULL; } - pci_dev_get(pdev); - return pdev; } diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c index 2a120d8d3c2723525efdaf9a3989997fbb5d058e..9dfa545427ca15ff4f5158753ee3cf7c4f6ee4a0 100644 --- a/drivers/extcon/extcon-usbc-tusb320.c +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -313,9 +313,9 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9) typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB); } -static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +static irqreturn_t tusb320_state_update_handler(struct tusb320_priv *priv, + bool force_update) { - struct tusb320_priv *priv = dev_id; unsigned int reg; if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { @@ -323,7 +323,7 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) + if (!force_update && !(reg & TUSB320_REG9_INTERRUPT_STATUS)) return IRQ_NONE; tusb320_extcon_irq_handler(priv, reg); @@ -340,6 +340,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +{ + struct tusb320_priv *priv = dev_id; + + return tusb320_state_update_handler(priv, false); +} + static const struct regmap_config tusb320_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -466,7 +473,7 @@ static int tusb320_probe(struct i2c_client *client, return ret; /* update initial state */ - tusb320_irq_handler(client->irq, priv); + tusb320_state_update_handler(priv, true); /* Reset chip to its default state */ ret = tusb320_reset(priv); @@ -477,7 +484,7 @@ static int tusb320_probe(struct i2c_client *client, * State and polarity might change after a reset, so update * them again and make sure the interrupt status bit is cleared. */ - tusb320_irq_handler(client->irq, priv); + tusb320_state_update_handler(priv, true); ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, tusb320_irq_handler, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index a46df5d1d0942751635276f6f1dc2710305ffae1..f12cc29bd4b845603690442dc729728a80446dbc 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -611,7 +611,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = min(seed->size, EFI_RANDOM_SEED_SIZE); + size = min_t(u32, seed->size, SZ_1K); // sanity check early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); @@ -620,8 +620,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed) + size); if (seed != NULL) { - pr_notice("seeding entropy pool\n"); add_bootloader_randomness(seed->bits, size); + memzero_explicit(seed->bits, size); early_memunmap(seed, sizeof(*seed) + size); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index eb03d5a9aac88e84f60a0753f7f51ded5141c4d3..900df67a20785be7ebcbc46a5954e43224738fc9 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -882,6 +882,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out); efi_status_t efi_random_alloc(unsigned long size, unsigned long align, unsigned long *addr, unsigned long random_seed); +efi_status_t efi_random_get_seed(void); + efi_status_t check_platform_features(void); void *get_efi_config_table(efi_guid_t guid); diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 33ab567695951d6c3c9df4217d067fb53367a373..f85d2c066877772c256b1f2cd282de1f6c37bf4c 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -67,27 +67,43 @@ efi_status_t efi_random_get_seed(void) efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; + struct linux_efi_random_seed *prev_seed, *seed = NULL; + int prev_seed_size = 0, seed_size = EFI_RANDOM_SEED_SIZE; efi_rng_protocol_t *rng = NULL; - struct linux_efi_random_seed *seed = NULL; efi_status_t status; status = efi_bs_call(locate_protocol, &rng_proto, NULL, (void **)&rng); if (status != EFI_SUCCESS) return status; + /* + * Check whether a seed was provided by a prior boot stage. In that + * case, instead of overwriting it, let's create a new buffer that can + * hold both, and concatenate the existing and the new seeds. + * Note that we should read the seed size with caution, in case the + * table got corrupted in memory somehow. + */ + prev_seed = get_efi_config_table(LINUX_EFI_RANDOM_SEED_TABLE_GUID); + if (prev_seed && prev_seed->size <= 512U) { + prev_seed_size = prev_seed->size; + seed_size += prev_seed_size; + } + /* * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the * allocation will survive a kexec reboot (although we refresh the seed * beforehand) */ status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, - sizeof(*seed) + EFI_RANDOM_SEED_SIZE, + struct_size(seed, bits, seed_size), (void **)&seed); - if (status != EFI_SUCCESS) - return status; + if (status != EFI_SUCCESS) { + efi_warn("Failed to allocate memory for RNG seed.\n"); + goto err_warn; + } status = efi_call_proto(rng, get_rng, &rng_algo_raw, - EFI_RANDOM_SEED_SIZE, seed->bits); + EFI_RANDOM_SEED_SIZE, seed->bits); if (status == EFI_UNSUPPORTED) /* @@ -100,14 +116,28 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) goto err_freepool; - seed->size = EFI_RANDOM_SEED_SIZE; + seed->size = seed_size; + if (prev_seed_size) + memcpy(seed->bits + EFI_RANDOM_SEED_SIZE, prev_seed->bits, + prev_seed_size); + status = efi_bs_call(install_configuration_table, &rng_table_guid, seed); if (status != EFI_SUCCESS) goto err_freepool; + if (prev_seed_size) { + /* wipe and free the old seed if we managed to install the new one */ + memzero_explicit(prev_seed->bits, prev_seed_size); + efi_bs_call(free_pool, prev_seed); + } return EFI_SUCCESS; err_freepool: + memzero_explicit(seed, struct_size(seed, bits, seed_size)); efi_bs_call(free_pool, seed); + efi_warn("Failed to obtain seed from EFI_RNG_PROTOCOL\n"); +err_warn: + if (prev_seed) + efi_warn("Retaining bootloader-supplied seed only"); return status; } diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 4b8978b254f9a91ce50ab45c1e008b8f21e45c86..dba315f675bc79134bb4a9b88d93ce9a4175293e 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -272,6 +272,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) int ret = PTR_ERR(fw->chan); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get mbox channel: %d\n", ret); + kfree(fw); return ret; } diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index ebc32bbd9b83380ed22c03e3101c6d5faf999476..6281e7153b47565debaadef346415af72492c240 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -429,15 +429,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, * during noirq phase, so we must manually poll the completion. */ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state, - true, 1, + done_state, 1, info->desc->max_rx_timeout_ms * 1000, false, &xfer->done); } - if (ret == -ETIMEDOUT || !done_state) { + if (ret == -ETIMEDOUT) dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", (void *)_RET_IP_); - } /* * NOTE: we might prefer not to need the mailbox ticker to manage the diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index ebe1943b85dd99f77a7c8883c2759ef666abc703..bf21803a00363e3e3a75b01175db0b93d6fb192b 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -473,6 +473,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off) case PCAL6524_DEBOUNCE: pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c; break; + default: + pinctrl = 0; + break; } return pinctrl + addr + (off / BANK_SZ); diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index 238f3210970cfa0ed3d38d43b141afc0e03135e8..bc5660f61c570d770a2d5cb44ee6bd2768aa6189 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) return -ENODEV; } parent = irq_find_host(irq_parent); + of_node_put(irq_parent); if (!parent) { dev_err(dev, "no IRQ parent domain\n"); return -ENODEV; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 0cb6b468f364f99c6ff4ed0e68b2718c52e74201..6ab1cf489d0359f85536bd8c4499d99dcaf1070b 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -55,6 +55,50 @@ static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8)); * interface to gpiolib GPIOs via ioctl()s. */ +typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *); +typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long); +typedef ssize_t (*read_fn)(struct file *, char __user *, + size_t count, loff_t *); + +static __poll_t call_poll_locked(struct file *file, + struct poll_table_struct *wait, + struct gpio_device *gdev, poll_fn func) +{ + __poll_t ret; + + down_read(&gdev->sem); + ret = func(file, wait); + up_read(&gdev->sem); + + return ret; +} + +static long call_ioctl_locked(struct file *file, unsigned int cmd, + unsigned long arg, struct gpio_device *gdev, + ioctl_fn func) +{ + long ret; + + down_read(&gdev->sem); + ret = func(file, cmd, arg); + up_read(&gdev->sem); + + return ret; +} + +static ssize_t call_read_locked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps, + struct gpio_device *gdev, read_fn func) +{ + ssize_t ret; + + down_read(&gdev->sem); + ret = func(file, buf, count, f_ps); + up_read(&gdev->sem); + + return ret; +} + /* * GPIO line handle management */ @@ -191,8 +235,8 @@ static long linehandle_set_config(struct linehandle_state *lh, return 0; } -static long linehandle_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linehandle_state *lh = file->private_data; void __user *ip = (void __user *)arg; @@ -201,6 +245,9 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, unsigned int i; int ret; + if (!lh->gdev->chip) + return -ENODEV; + switch (cmd) { case GPIOHANDLE_GET_LINE_VALUES_IOCTL: /* NOTE: It's okay to read values of output lines */ @@ -247,6 +294,15 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd, } } +static long linehandle_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linehandle_state *lh = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lh->gdev, + linehandle_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long linehandle_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -1378,12 +1434,15 @@ static long linereq_set_config(struct linereq *lr, void __user *ip) return ret; } -static long linereq_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct linereq *lr = file->private_data; void __user *ip = (void __user *)arg; + if (!lr->gdev->chip) + return -ENODEV; + switch (cmd) { case GPIO_V2_LINE_GET_VALUES_IOCTL: return linereq_get_values(lr, ip); @@ -1396,6 +1455,15 @@ static long linereq_ioctl(struct file *file, unsigned int cmd, } } +static long linereq_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct linereq *lr = file->private_data; + + return call_ioctl_locked(file, cmd, arg, lr->gdev, + linereq_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long linereq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -1404,12 +1472,15 @@ static long linereq_ioctl_compat(struct file *file, unsigned int cmd, } #endif -static __poll_t linereq_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t linereq_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct linereq *lr = file->private_data; __poll_t events = 0; + if (!lr->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &lr->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events, @@ -1419,16 +1490,25 @@ static __poll_t linereq_poll(struct file *file, return events; } -static ssize_t linereq_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static __poll_t linereq_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct linereq *lr = file->private_data; + + return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked); +} + +static ssize_t linereq_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct linereq *lr = file->private_data; struct gpio_v2_line_event le; ssize_t bytes_read = 0; int ret; + if (!lr->gdev->chip) + return -ENODEV; + if (count < sizeof(le)) return -EINVAL; @@ -1473,6 +1553,15 @@ static ssize_t linereq_read(struct file *file, return bytes_read; } +static ssize_t linereq_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct linereq *lr = file->private_data; + + return call_read_locked(file, buf, count, f_ps, lr->gdev, + linereq_read_unlocked); +} + static void linereq_free(struct linereq *lr) { unsigned int i; @@ -1710,12 +1799,15 @@ struct lineevent_state { (GPIOEVENT_REQUEST_RISING_EDGE | \ GPIOEVENT_REQUEST_FALLING_EDGE) -static __poll_t lineevent_poll(struct file *file, - struct poll_table_struct *wait) +static __poll_t lineevent_poll_unlocked(struct file *file, + struct poll_table_struct *wait) { struct lineevent_state *le = file->private_data; __poll_t events = 0; + if (!le->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &le->wait, wait); if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) @@ -1724,15 +1816,21 @@ static __poll_t lineevent_poll(struct file *file, return events; } +static __poll_t lineevent_poll(struct file *file, + struct poll_table_struct *wait) +{ + struct lineevent_state *le = file->private_data; + + return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked); +} + struct compat_gpioeevent_data { compat_u64 timestamp; u32 id; }; -static ssize_t lineevent_read(struct file *file, - char __user *buf, - size_t count, - loff_t *f_ps) +static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) { struct lineevent_state *le = file->private_data; struct gpioevent_data ge; @@ -1740,6 +1838,9 @@ static ssize_t lineevent_read(struct file *file, ssize_t ge_size; int ret; + if (!le->gdev->chip) + return -ENODEV; + /* * When compatible system call is being used the struct gpioevent_data, * in case of at least ia32, has different size due to the alignment @@ -1797,6 +1898,15 @@ static ssize_t lineevent_read(struct file *file, return bytes_read; } +static ssize_t lineevent_read(struct file *file, char __user *buf, + size_t count, loff_t *f_ps) +{ + struct lineevent_state *le = file->private_data; + + return call_read_locked(file, buf, count, f_ps, le->gdev, + lineevent_read_unlocked); +} + static void lineevent_free(struct lineevent_state *le) { if (le->irq) @@ -1814,13 +1924,16 @@ static int lineevent_release(struct inode *inode, struct file *file) return 0; } -static long lineevent_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd, + unsigned long arg) { struct lineevent_state *le = file->private_data; void __user *ip = (void __user *)arg; struct gpiohandle_data ghd; + if (!le->gdev->chip) + return -ENODEV; + /* * We can get the value for an event line but not set it, * because it is input by definition. @@ -1843,6 +1956,15 @@ static long lineevent_ioctl(struct file *file, unsigned int cmd, return -EINVAL; } +static long lineevent_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct lineevent_state *le = file->private_data; + + return call_ioctl_locked(file, cmd, arg, le->gdev, + lineevent_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long lineevent_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) @@ -2401,12 +2523,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb, return NOTIFY_OK; } -static __poll_t lineinfo_watch_poll(struct file *file, - struct poll_table_struct *pollt) +static __poll_t lineinfo_watch_poll_unlocked(struct file *file, + struct poll_table_struct *pollt) { struct gpio_chardev_data *cdev = file->private_data; __poll_t events = 0; + if (!cdev->gdev->chip) + return EPOLLHUP | EPOLLERR; + poll_wait(file, &cdev->wait, pollt); if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events, @@ -2416,8 +2541,17 @@ static __poll_t lineinfo_watch_poll(struct file *file, return events; } -static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, - size_t count, loff_t *off) +static __poll_t lineinfo_watch_poll(struct file *file, + struct poll_table_struct *pollt) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_poll_locked(file, pollt, cdev->gdev, + lineinfo_watch_poll_unlocked); +} + +static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf, + size_t count, loff_t *off) { struct gpio_chardev_data *cdev = file->private_data; struct gpio_v2_line_info_changed event; @@ -2425,6 +2559,9 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, int ret; size_t event_size; + if (!cdev->gdev->chip) + return -ENODEV; + #ifndef CONFIG_GPIO_CDEV_V1 event_size = sizeof(struct gpio_v2_line_info_changed); if (count < event_size) @@ -2492,6 +2629,15 @@ static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, return bytes_read; } +static ssize_t lineinfo_watch_read(struct file *file, char __user *buf, + size_t count, loff_t *off) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_read_locked(file, buf, count, off, cdev->gdev, + lineinfo_watch_read_unlocked); +} + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -2505,13 +2651,17 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) struct gpio_chardev_data *cdev; int ret = -ENOMEM; + down_read(&gdev->sem); + /* Fail on open if the backing gpiochip is gone */ - if (!gdev->chip) - return -ENODEV; + if (!gdev->chip) { + ret = -ENODEV; + goto out_unlock; + } cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) - return -ENOMEM; + goto out_unlock; cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); if (!cdev->watched_lines) @@ -2534,6 +2684,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) if (ret) goto out_unregister_notifier; + up_read(&gdev->sem); + return ret; out_unregister_notifier: @@ -2543,6 +2695,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) bitmap_free(cdev->watched_lines); out_free_cdev: kfree(cdev); +out_unlock: + up_read(&gdev->sem); return ret; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a70522aef3557e3e1b63aa6f68bab187ea969b6f..5974cfc61b417679d6fd18f0dadf97c4c0a9112b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -735,6 +735,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, spin_unlock_irqrestore(&gpio_lock, flags); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier); + init_rwsem(&gdev->sem); #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); @@ -875,6 +876,8 @@ void gpiochip_remove(struct gpio_chip *gc) unsigned long flags; unsigned int i; + down_write(&gdev->sem); + /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); gpiochip_free_hogs(gc); @@ -909,6 +912,7 @@ void gpiochip_remove(struct gpio_chip *gc) * gone. */ gcdev_unregister(gdev); + up_write(&gdev->sem); put_device(&gdev->dev); } EXPORT_SYMBOL_GPL(gpiochip_remove); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index d900ecdbac46dfd4258b1e536c1f0d2fe57a874e..9ad68a0adf4a8a08205ec44ab477fcdc837f96ee 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,6 +15,7 @@ #include #include #include +#include #define GPIOCHIP_NAME "gpiochip" @@ -39,6 +40,9 @@ * @list: links gpio_device:s together for traversal * @notifier: used to notify subscribers about lines being requested, released * or reconfigured + * @sem: protects the structure from a NULL-pointer dereference of @chip by + * user-space operations when the device gets unregistered during + * a hot-unplug event * @pin_ranges: range of pins served by the GPIO driver * * This state container holds most of the runtime variable data @@ -60,6 +64,7 @@ struct gpio_device { void *data; struct list_head list; struct blocking_notifier_head notifier; + struct rw_semaphore sem; #ifdef CONFIG_PINCTRL /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2eca58220550eb9e10f2a42a64a0d5bfacbbbfcc..5f1d0990c6f34f374bb130366e5be942dd46e421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -196,6 +196,7 @@ extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; +extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dc_visual_confirm; extern uint amdgpu_dm_abm_level; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 647220a8762dc591cbf83fba338235ca9798ac9e..30f145dc8724edd650c01aaaab6f634e697dd24c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -265,8 +265,10 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_ (&((struct amdgpu_fpriv *) \ ((struct drm_file *)(drm_priv))->driver_priv)->vm) +int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, + struct file *filp, u32 pasid); int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, - struct file *filp, u32 pasid, + struct file *filp, void **process_info, struct dma_fence **ef); void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1f76e27f1a35454ded8fdf0d231228290e137c45..29f045079a3e16ca0159a89431f59c61dc8d6b8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1473,10 +1473,9 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) amdgpu_bo_unreserve(bo); } -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, - struct file *filp, u32 pasid, - void **process_info, - struct dma_fence **ef) +int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, + struct file *filp, u32 pasid) + { struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; @@ -1487,10 +1486,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, return ret; avm = &drv_priv->vm; - /* Already a compute VM? */ - if (avm->process_info) - return -EINVAL; - /* Free the original amdgpu allocated pasid, * will be replaced with kfd allocated pasid. */ @@ -1499,14 +1494,36 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, amdgpu_vm_set_pasid(adev, avm, 0); } - /* Convert VM into a compute VM */ - ret = amdgpu_vm_make_compute(adev, avm); + ret = amdgpu_vm_set_pasid(adev, avm, pasid); if (ret) return ret; - ret = amdgpu_vm_set_pasid(adev, avm, pasid); + return 0; +} + +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, + struct file *filp, + void **process_info, + struct dma_fence **ef) +{ + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; + int ret; + + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + + /* Already a compute VM? */ + if (avm->process_info) + return -EINVAL; + + /* Convert VM into a compute VM */ + ret = amdgpu_vm_make_compute(adev, avm); if (ret) return ret; + /* Initialize KFD part of the VM and process info */ ret = init_kfd_vm(avm, process_info, ef); if (ret) @@ -2256,7 +2273,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, ret = drm_vma_node_allow(&obj->vma_node, drm_priv); if (ret) { - kfree(mem); + kfree(*mem); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index e363f56c72af1ec6dd3822c7ec09796848429f3d..30c28a69e847d25cdf7461a184e896df497c6810 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -317,6 +317,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) if (!found) return false; + pci_dev_put(pdev); adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f1e9663b4051075a78c2d72f5c85ca40e1050bfa..0be85d19a6f3ed5e812972b1d820e2d27b754227 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2462,6 +2462,11 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + if (WARN_ON(!hive)) { + r = -ENOENT; + goto init_failed; + } + if (!hive->reset_domain || !amdgpu_reset_get_reset_domain(hive->reset_domain)) { r = -ENOENT; @@ -3000,14 +3005,15 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) continue; } - /* skip suspend of gfx and psp for S0ix + /* skip suspend of gfx/mes and psp for S0ix * gfx is in gfxoff state, so on resume it will exit gfxoff just * like at runtime. PSP is also part of the always on hardware * so no need to suspend it. */ if (adev->in_s0ix && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) continue; /* XXX handle errors */ @@ -5027,6 +5033,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) pm_runtime_enable(&(p->dev)); pm_runtime_resume(&(p->dev)); } + + pci_dev_put(p); } static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) @@ -5065,6 +5073,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) if (expires < ktime_get_mono_fast_ns()) { dev_warn(adev->dev, "failed to suspend display audio\n"); + pci_dev_put(p); /* TODO: abort the succeeding gpu reset? */ return -ETIMEDOUT; } @@ -5072,6 +5081,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) pm_runtime_disable(&(p->dev)); + pci_dev_put(p); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bf2d50c8c92ad5e1f64ce125f6793a1b3131e88a..b59466972ed7ac411b5b4d21302af0a8032d9b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -180,6 +180,7 @@ int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ +uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; @@ -877,6 +878,32 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444); MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); module_param_named(tmz, amdgpu_tmz, int, 0444); +/** + * DOC: freesync_video (uint) + * Enable the optimization to adjust front porch timing to achieve seamless + * mode change experience when setting a freesync supported mode for which full + * modeset is not needed. + * + * The Display Core will add a set of modes derived from the base FreeSync + * video mode into the corresponding connector's mode list based on commonly + * used refresh rates and VRR range of the connected display, when users enable + * this feature. From the userspace perspective, they can see a seamless mode + * change experience when the change between different refresh rates under the + * same resolution. Additionally, userspace applications such as Video playback + * can read this modeset list and change the refresh rate based on the video + * frame rate. Finally, the userspace can also derive an appropriate mode for a + * particular refresh rate based on the FreeSync Mode and add it to the + * connector's mode list. + * + * Note: This is an experimental feature. + * + * The default value: 0 (off). + */ +MODULE_PARM_DESC( + freesync_video, + "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); +module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); + /** * DOC: reset_method (int) * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) @@ -2040,6 +2067,15 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, "See modparam exp_hw_support\n"); return -ENODEV; } + /* differentiate between P10 and P11 asics with the same DID */ + if (pdev->device == 0x67FF && + (pdev->revision == 0xE3 || + pdev->revision == 0xE7 || + pdev->revision == 0xF3 || + pdev->revision == 0xF7)) { + flags &= ~AMD_ASIC_MASK; + flags |= CHIP_POLARIS10; + } /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping, * however, SME requires an indirect IOMMU mapping because the encryption @@ -2109,12 +2145,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ddev); - ret = amdgpu_driver_load_kms(adev, ent->driver_data); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; retry_init: - ret = drm_dev_register(ddev, ent->driver_data); + ret = drm_dev_register(ddev, flags); if (ret == -EAGAIN && ++retry <= 3) { DRM_INFO("retry init %d\n", retry); /* Don't request EX mode too frequently which is attacking */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2e8f6cd7a72935fb817a80b76c15c7c44df6aeac..3be3cba3a16dbb17fdf837b786721540c610776a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -446,27 +446,24 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, /* * If GTT is part of requested domains the check must succeed to - * allow fall back to GTT + * allow fall back to GTT. */ if (domain & AMDGPU_GEM_DOMAIN_GTT) { man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - if (size < man->size) + if (man && size < man->size) return true; - else - goto fail; - } - - if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + else if (!man) + WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized"); + goto fail; + } else if (domain & AMDGPU_GEM_DOMAIN_VRAM) { man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - if (size < man->size) + if (man && size < man->size) return true; - else - goto fail; + goto fail; } - /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ return true; @@ -1509,7 +1506,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, uint32_t domain) { - if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) && + ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) { domain = AMDGPU_GEM_DOMAIN_VRAM; if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) domain = AMDGPU_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 49c4347d154ce49c74d4c8bda3c7b6a88b7d052e..2b9d806e23afb4b28ea5d7a8613257a876ddd477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -75,6 +75,8 @@ struct amdgpu_vf_error_buffer { uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; }; +enum idh_request; + /** * struct amdgpu_virt_ops - amdgpu device virt operations */ @@ -84,7 +86,8 @@ struct amdgpu_virt_ops { int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); - void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); + void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, + u32 data1, u32 data2, u32 data3); }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 47159e9a08848c72125bb9a6cc81b7c316d5e426..4b9e7b050ccd25fe5a33cba18c04e5829a46d8ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -386,7 +386,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); kobject_put(&hive->kobj); - kfree(hive); hive = NULL; goto pro_end; } @@ -410,7 +409,6 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n"); ret = -ENOMEM; kobject_put(&hive->kobj); - kfree(hive); hive = NULL; goto pro_end; } diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index f141fadd2d86ffa66143c2909834f5dae3fec69a..725876b4f02edeb95bc18268c1ebf4c87e6b3888 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1339,7 +1339,8 @@ static int mes_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_in_reset(adev) && + /* it's only intended for use in mes_self_test case, not for s0ix and reset */ + if (!amdgpu_in_reset(adev) && !adev->in_s0ix && (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) amdgpu_mes_self_test(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 998b5d17b271b62df41a571dbfc589745ac992ce..0e664d0cc8d51ff3195e0349bc0fae374660601a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -319,7 +319,7 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) tmp = mmMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); - WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); } static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 1b027d069ab4042c294890bbd7cdd4cb449bffc2..4638ea7c2eec5c7599a80ff707ce65ea9a8fbc57 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -243,7 +243,7 @@ static void mmhub_v2_3_init_cache_regs(struct amdgpu_device *adev) tmp = mmMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); - WREG32_SOC15(GC, 0, mmMMVM_L2_CNTL5, tmp); + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL5, tmp); } static void mmhub_v2_3_enable_system_domain(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index a1d26c4d80b8c7802e70ffd881a6b252e9ded6fa..16cc82215e2e16c35278c6184b2bf857aef344b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -275,7 +275,7 @@ static void mmhub_v3_0_init_cache_regs(struct amdgpu_device *adev) tmp = regMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); - WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); } static void mmhub_v3_0_enable_system_domain(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index e8058edc1d1083969381374daf0ebe11751c15c4..6bdf2ef0298d6ea6cf857effddbe8411ad820a0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -269,7 +269,7 @@ static void mmhub_v3_0_1_init_cache_regs(struct amdgpu_device *adev) tmp = regMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); - WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); } static void mmhub_v3_0_1_enable_system_domain(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 770be0a8f7ce7a4d1d8d2a687aa96cf310cb822b..45465acaa943aff683d1229d0d69dbb9534e9638 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -268,7 +268,7 @@ static void mmhub_v3_0_2_init_cache_regs(struct amdgpu_device *adev) tmp = regMMVM_L2_CNTL5_DEFAULT; tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); - WREG32_SOC15(GC, 0, regMMVM_L2_CNTL5, tmp); + WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); } static void mmhub_v3_0_2_enable_system_domain(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index b3fba8dea63ca868fa20da2ca08e0b3a24508713..6853b93ac82e763ca5910e2415d094c08298ab90 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -82,10 +82,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = /* Navi1x */ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, @@ -100,10 +100,10 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = /* Sienna Cichlid */ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, @@ -125,10 +125,10 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, @@ -149,7 +149,7 @@ static struct amdgpu_video_codecs sriov_sc_video_codecs_decode = /* Beige Goby*/ static const struct amdgpu_video_codec_info bg_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; @@ -166,7 +166,7 @@ static const struct amdgpu_video_codecs bg_video_codecs_encode = { /* Yellow Carp*/ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index e3b2b6b4f1a662fa10a6c5b7a34374c6a1b084c3..7cd17dda32ceb3026425c8db3e293f19b7613ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -103,10 +103,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode = /* Vega */ static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, }; @@ -120,10 +120,10 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode = /* Raven */ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, @@ -138,10 +138,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode = /* Renoir, Arcturus */ static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index e08044008186e13aff2db614dde992a2009e87be..8b297ade69a240fb478802a210053195c98700f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -61,7 +61,7 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode = static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] = { - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 951b636772484304a06b32fed2a5f48c852b09ed..dd351105c1bcf5f5d3cb5bef93ba3e38059e71a3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -689,13 +689,13 @@ void kfd_process_destroy_wq(void) } static void kfd_process_free_gpuvm(struct kgd_mem *mem, - struct kfd_process_device *pdd, void *kptr) + struct kfd_process_device *pdd, void **kptr) { struct kfd_dev *dev = pdd->dev; - if (kptr) { + if (kptr && *kptr) { amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); - kptr = NULL; + *kptr = NULL; } amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); @@ -795,7 +795,7 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) if (!qpd->ib_kaddr || !qpd->ib_base) return; - kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr); + kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); } struct kfd_process *kfd_create_process(struct file *filep) @@ -1277,7 +1277,7 @@ static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) return; - kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr); + kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); } void kfd_process_set_trap_handler(struct qcm_process_device *qpd, @@ -1576,9 +1576,9 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, p = pdd->process; dev = pdd->dev; - ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( - dev->adev, drm_file, p->pasid, - &p->kgd_process_info, &p->ef); + ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file, + &p->kgd_process_info, + &p->ef); if (ret) { pr_err("Failed to create process VM object\n"); return ret; @@ -1593,13 +1593,19 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, if (ret) goto err_init_cwsr; + ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid); + if (ret) + goto err_set_pasid; + pdd->drm_file = drm_file; return 0; +err_set_pasid: + kfd_process_device_destroy_cwsr_dgpu(pdd); err_init_cwsr: + kfd_process_device_destroy_ib_mem(pdd); err_reserve_ib_mem: - kfd_process_device_free_bos(pdd); pdd->drm_priv = NULL; return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3f0a4a415907d425b113f251684822f90ed4c495..35a9b702508af0bc0cbeead95a9f708c518bc5e7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, p2plink->attr.name = "properties"; p2plink->attr.mode = KFD_SYSFS_FILE_MODE; - sysfs_attr_init(&iolink->attr); + sysfs_attr_init(&p2plink->attr); ret = sysfs_create_file(p2plink->kobj, &p2plink->attr); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 512c32327eb11c514e5e28335b02588a4a6aaf1d..dacad8b85963c8351848fcd2bf426c7f7af0c194 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1512,6 +1512,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): init_data.flags.gpu_vm_support = true; @@ -4371,6 +4372,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_set_panel_orientation(&aconnector->base); } + /* If we didn't find a panel, notify the acpi video detection */ + if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0) + acpi_video_report_nolcd(); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -5802,7 +5807,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = is_freesync_video_mode(&mode, aconnector); + recalculate_timing = amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); drm_mode_copy(&saved_mode, &mode); @@ -6887,7 +6893,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!edid) + if (!(amdgpu_freesync_vid_mode && edid)) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -8748,7 +8754,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -8783,7 +8790,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; - if (dm_new_crtc_state->stream && + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; @@ -8795,7 +8802,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (aconnector && + } else if (amdgpu_freesync_vid_mode && aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index f0b01c8dc4a6b819731f666da64acefc079a0935..f72c013d3a5b05b3a58e7ef8d8f3437a7a511e03 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -42,39 +42,6 @@ #include "dm_helpers.h" #include "ddc_service_types.h" -struct monitor_patch_info { - unsigned int manufacturer_id; - unsigned int product_id; - void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); - unsigned int patch_param; -}; -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); - -static const struct monitor_patch_info monitor_patch_table[] = { -{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, -{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, -}; - -static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) -{ - if (edid_caps) - edid_caps->panel_patch.max_dsc_target_bpp_limit = param; -} - -static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) -{ - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) - if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) - && (edid_caps->product_id == monitor_patch_table[i].product_id)) { - monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); - ret++; - } - - return ret; -} - /* dm_helpers_parse_edid_caps * * Parse edid caps @@ -149,8 +116,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps( kfree(sads); kfree(sadb); - amdgpu_dm_patch_edid_caps(edid_caps); - return result; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index e0c8d6f09bb4b922a93f6a7061c904620199ca88..074e70a5c458e3e304cc0f9ba35747d57b59ec2c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -462,6 +462,7 @@ static enum bp_result get_gpio_i2c_info( uint32_t count = 0; unsigned int table_index = 0; bool find_valid = false; + struct atom_gpio_pin_assignment *pin; if (!info) return BP_RESULT_BADINPUT; @@ -489,20 +490,17 @@ static enum bp_result get_gpio_i2c_info( - sizeof(struct atom_common_table_header)) / sizeof(struct atom_gpio_pin_assignment); + pin = (struct atom_gpio_pin_assignment *) header->gpio_pin; + for (table_index = 0; table_index < count; table_index++) { - if (((record->i2c_id & I2C_HW_CAP) == ( - header->gpio_pin[table_index].gpio_id & - I2C_HW_CAP)) && - ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_ENGINE_ID_MASK)) && - ((record->i2c_id & I2C_HW_LANE_MUX) == - (header->gpio_pin[table_index].gpio_id & - I2C_HW_LANE_MUX))) { + if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) && + ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) && + ((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) { /* still valid */ find_valid = true; break; } + pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment)); } /* If we don't find the entry that we are looking for then diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 6f77d8e538ab14a16e3bfd354c6bc8d4157c5127..9eb9fe5b8d2c5bc31b0f331e98208c3af4d383d1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -438,7 +438,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, } if (!new_clocks->dtbclk_en) { - new_clocks->ref_dtbclk_khz = 0; + new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000; } /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 997ab031f816da0186ce3cf32fb21f0c33ac2cc4..5260ad6de8038e591bf75e6f2ab1dfd2ad08fb64 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1070,6 +1070,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) int i, j; struct dc_state *dangling_context = dc_create_state(dc); struct dc_state *current_ctx; + struct pipe_ctx *pipe; if (dangling_context == NULL) return; @@ -1112,6 +1113,16 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + /* When disabling plane for a phantom pipe, we must turn on the + * phantom OTG so the disable programming gets the double buffer + * update. Otherwise the pipe will be left in a partially disabled + * state that can result in underflow or hang when enabling it + * again for different use. + */ + if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); + } dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1760,6 +1771,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) dc->hwss.prepare_bandwidth(dc, context); + /* When SubVP is active, all HW programming must be done while + * SubVP lock is acquired + */ + if (dc->hwss.subvp_pipe_control_lock) + dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); + if (dc->debug.enable_double_buffered_dsc_pg_support) dc->hwss.update_dsc_pg(dc, context, false); @@ -1787,9 +1804,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); } - if (dc->hwss.subvp_pipe_control_lock) - dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); - result = dc->hwss.apply_ctx_to_hw(dc, context); if (result != DC_OK) { @@ -3576,7 +3590,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); bool force_minimal_pipe_splitting = false; - uint32_t i; *is_plane_addition = false; @@ -3608,27 +3621,11 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, } } - /* For SubVP pipe split case when adding MPO video - * we need to add a minimal transition. In this case - * there will be 2 streams (1 main stream, 1 phantom - * stream). + /* For SubVP when adding MPO video we need to add a minimal transition. */ - if (cur_stream_status && - dc->current_state->stream_count == 2 && - stream->mall_stream_config.type == SUBVP_MAIN) { - bool is_pipe_split = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream && - (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe || - dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) { - is_pipe_split = true; - break; - } - } - + if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) { /* determine if minimal transition is required due to SubVP*/ - if (surface_count > 0 && is_pipe_split) { + if (surface_count > 0) { if (cur_stream_status->plane_count > surface_count) { force_minimal_pipe_splitting = true; } else if (cur_stream_status->plane_count < surface_count) { @@ -3650,10 +3647,32 @@ static bool commit_minimal_transition_state(struct dc *dc, bool temp_subvp_policy; enum dc_status ret = DC_ERROR_UNEXPECTED; unsigned int i, j; + unsigned int pipe_in_use = 0; if (!transition_context) return false; + /* check current pipes in use*/ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) + pipe_in_use++; + } + + /* When the OS add a new surface if we have been used all of pipes with odm combine + * and mpc split feature, it need use commit_minimal_transition_state to transition safely. + * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need + * call it again. Otherwise return true to skip. + * + * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially + * enter/exit MPO when DCN still have enough resources. + */ + if (pipe_in_use != dc->res_pool->pipe_count) { + dc_release_state(transition_context); + return true; + } + if (!dc->config.is_vmin_only_asic) { tmp_mpc_policy = dc->debug.pipe_split_policy; dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c index fc6aa098bda06fb36b76262a146e66bc19caaff6..8db9f75144662eb5598689fa9c759d0c3dba7021 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c @@ -1128,6 +1128,7 @@ struct resource_pool *dce60_create_resource_pool( if (dce60_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1325,6 +1326,7 @@ struct resource_pool *dce61_create_resource_pool( if (dce61_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1518,6 +1520,7 @@ struct resource_pool *dce64_create_resource_pool( if (dce64_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index b28025960050c2af4c42371050629608c2d1e6a5..5825e6f412bd7642f8264c4b5c3440be1b8c73e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -1137,6 +1137,7 @@ struct resource_pool *dce80_create_resource_pool( if (dce80_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } @@ -1336,6 +1337,7 @@ struct resource_pool *dce81_create_resource_pool( if (dce81_construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 11e4c4e46947348af7121425aa362153292aa75c..c06538c37a11f1024a6e67e85ed6caa178bfbd07 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -867,6 +867,32 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ + struct pipe_ctx *other_pipe; + int vready_offset = pipe->pipe_dlg_param.vready_offset; + + /* Always use the largest vready_offset of all connected pipes */ + for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + + return vready_offset; +} + enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -910,7 +936,7 @@ enum dc_status dcn10_enable_stream_timing( pipe_ctx->stream_res.tg->funcs->program_timing( pipe_ctx->stream_res.tg, &stream->timing, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width, @@ -2900,7 +2926,7 @@ void dcn10_program_pipe( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a7e0001a8f46dca488244114b1436e1d6f4d81dc..f348bc15a92568c8bde75647c7d41390a38368b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1616,6 +1616,31 @@ static void dcn20_update_dchubp_dpp( hubp->funcs->phantom_hubp_post_enable(hubp); } +static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) +{ + struct pipe_ctx *other_pipe; + int vready_offset = pipe->pipe_dlg_param.vready_offset; + + /* Always use the largest vready_offset of all connected pipes */ + for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { + if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) + vready_offset = other_pipe->pipe_dlg_param.vready_offset; + } + + return vready_offset; +} static void dcn20_program_pipe( struct dc *dc, @@ -1634,16 +1659,14 @@ static void dcn20_program_pipe( && !pipe_ctx->prev_odm_pipe) { pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { - pipe_ctx->stream_res.tg->funcs->wait_for_state( - pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); - pipe_ctx->stream_res.tg->funcs->wait_for_state( - pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); } pipe_ctx->stream_res.tg->funcs->set_vtg_params( @@ -2037,7 +2060,7 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, + calculate_vready_offset_for_group(pipe_ctx), pipe_ctx->pipe_dlg_param.vstartup_start, pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c index df4f251191424e730699249ef2d1657db5556d0c..e4472c6be6c3231a29aa161fcf948f627eb1201f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c @@ -225,11 +225,7 @@ static void dccg32_set_dtbclk_dto( } else { REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, - PIPE_DTO_SRC_SEL[params->otg_inst], 1); - if (params->is_hdmi) - REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], - PIPE_DTO_SRC_SEL[params->otg_inst], 0); - + PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index d1598e3131f66d861099a6e29ad0e5318f1c731b..33ab6fdc3617560c635cfb3d02872f4b7dac8643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1901,7 +1901,7 @@ int dcn32_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; if (context->stream_count == 1 && - context->stream_status[0].plane_count <= 1 && + context->stream_status[0].plane_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) && is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) && pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 2abe3967f7fbdeac93c94060c335846967685ee3..d1bf49d207de438a31269f07b04e6cd0cb9bfc08 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -531,9 +531,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc, unsigned int i, pipe_idx; struct pipe_ctx *pipe; uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines; + unsigned int num_dpp; unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel; unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel]; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; dc_assert_fp_enabled(); @@ -569,6 +571,11 @@ void dcn32_set_phantom_stream_timing(struct dc *dc, phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) + pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines; + // W/A for DCC corruption with certain high resolution timings. + // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive. + num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]]; + phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0; + // For backporch of phantom pipe, use vstartup of the main pipe phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 9afd9ba23fb2af9d6cabc2454236e05957bec815..820042f6aaca579335e5b8f5ab4f90d80b730c44 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -670,6 +670,25 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->cursor_bw[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / 8 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]; } + v->NotEnoughDETSwathFillLatencyHiding = dml32_CalculateDETSwathFillLatencyHiding( + mode_lib->vba.NumberOfActiveSurfaces, + mode_lib->vba.ReturnBW, + v->UrgentLatency, + mode_lib->vba.SwathHeightY, + mode_lib->vba.SwathHeightC, + v->swath_width_luma_ub, + v->swath_width_chroma_ub, + v->BytePerPixelDETY, + v->BytePerPixelDETC, + mode_lib->vba.DETBufferSizeY, + mode_lib->vba.DETBufferSizeC, + mode_lib->vba.DPPPerPlane, + mode_lib->vba.HTotal, + mode_lib->vba.PixelClock, + mode_lib->vba.VRatio, + mode_lib->vba.VRatioChroma, + mode_lib->vba.UsesMALLForPStateChange); + for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { v->MaxVStartupLines[k] = ((mode_lib->vba.Interlace[k] && !mode_lib->vba.ProgressiveToInterlaceUnitInOPP) ? @@ -1664,6 +1683,7 @@ static void mode_support_configuration(struct vba_vars_st *v, && mode_lib->vba.PTEBufferSizeNotExceeded[i][j] == true && mode_lib->vba.DCCMetaBufferSizeNotExceeded[i][j] == true && mode_lib->vba.NonsupportedDSCInputBPC == false + && mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] == false && !mode_lib->vba.ExceededMALLSize && ((mode_lib->vba.HostVMEnable == false && !mode_lib->vba.ImmediateFlipRequiredFinal) @@ -3158,6 +3178,25 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.UrgentBurstFactorChroma, mode_lib->vba.UrgentBurstFactorCursor); + mode_lib->vba.NotEnoughDETSwathFillLatencyHidingPerState[i][j] = dml32_CalculateDETSwathFillLatencyHiding( + mode_lib->vba.NumberOfActiveSurfaces, + mode_lib->vba.ReturnBWPerState[i][j], + mode_lib->vba.UrgLatency[i], + mode_lib->vba.SwathHeightYThisState, + mode_lib->vba.SwathHeightCThisState, + mode_lib->vba.swath_width_luma_ub_this_state, + mode_lib->vba.swath_width_chroma_ub_this_state, + mode_lib->vba.BytePerPixelInDETY, + mode_lib->vba.BytePerPixelInDETC, + mode_lib->vba.DETBufferSizeYThisState, + mode_lib->vba.DETBufferSizeCThisState, + mode_lib->vba.NoOfDPPThisState, + mode_lib->vba.HTotal, + mode_lib->vba.PixelClock, + mode_lib->vba.VRatio, + mode_lib->vba.VRatioChroma, + mode_lib->vba.UsesMALLForPStateChange); + v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.VMDataOnlyReturnBWPerState = dml32_get_return_bw_mbps_vm_only(&mode_lib->vba.soc, i, mode_lib->vba.DCFCLKState[i][j], mode_lib->vba.FabricClockPerState[i], mode_lib->vba.DRAMSpeedPerState[i]); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index debe46b24a3e14addc4aa7c43158979e10c32c8f..b53feeaf5cf117031d531410830292f4dc9306f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -6228,3 +6228,72 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf *ImmediateFlipBandwidthSupport = (*TotalBandwidth <= ReturnBW); *FractionOfUrgentBandwidth = *TotalBandwidth / ReturnBW; } + +bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces, + double ReturnBW, + double UrgentLatency, + unsigned int SwathHeightY[], + unsigned int SwathHeightC[], + unsigned int SwathWidthY[], + unsigned int SwathWidthC[], + double BytePerPixelInDETY[], + double BytePerPixelInDETC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], + unsigned int NumOfDPP[], + unsigned int HTotal[], + double PixelClock[], + double VRatioY[], + double VRatioC[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]) +{ + int k; + double SwathSizeAllSurfaces = 0; + double SwathSizeAllSurfacesInFetchTimeUs; + double DETSwathLatencyHidingUs; + double DETSwathLatencyHidingYUs; + double DETSwathLatencyHidingCUs; + double SwathSizePerSurfaceY[DC__NUM_DPP__MAX]; + double SwathSizePerSurfaceC[DC__NUM_DPP__MAX]; + bool NotEnoughDETSwathFillLatencyHiding = false; + + /* calculate sum of single swath size for all pipes in bytes */ + for (k = 0; k < NumberOfActiveSurfaces; k++) { + SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; + + if (SwathHeightC[k] != 0) + SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; + else + SwathSizePerSurfaceC[k] = 0; + + SwathSizeAllSurfaces += SwathSizePerSurfaceY[k] + SwathSizePerSurfaceC[k]; + } + + SwathSizeAllSurfacesInFetchTimeUs = SwathSizeAllSurfaces / ReturnBW + UrgentLatency; + + /* ensure all DET - 1 swath can hide a fetch for all surfaces */ + for (k = 0; k < NumberOfActiveSurfaces; k++) { + double LineTime = HTotal[k] / PixelClock[k]; + + /* only care if surface is not phantom */ + if (UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) { + DETSwathLatencyHidingYUs = (dml_floor(DETBufferSizeY[k] / BytePerPixelInDETY[k] / SwathWidthY[k], 1.0) - SwathHeightY[k]) / VRatioY[k] * LineTime; + + if (SwathHeightC[k] != 0) { + DETSwathLatencyHidingCUs = (dml_floor(DETBufferSizeC[k] / BytePerPixelInDETC[k] / SwathWidthC[k], 1.0) - SwathHeightC[k]) / VRatioC[k] * LineTime; + + DETSwathLatencyHidingUs = dml_min(DETSwathLatencyHidingYUs, DETSwathLatencyHidingCUs); + } else { + DETSwathLatencyHidingUs = DETSwathLatencyHidingYUs; + } + + /* DET must be able to hide time to fetch 1 swath for each surface */ + if (DETSwathLatencyHidingUs < SwathSizeAllSurfacesInFetchTimeUs) { + NotEnoughDETSwathFillLatencyHiding = true; + break; + } + } + } + + return NotEnoughDETSwathFillLatencyHiding; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 3989c2a28faec6680392c9c4a56575a00e0be10f..779c6805f59977ffd8533f4ef56d9efab3564f2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -1141,4 +1141,22 @@ void dml32_CalculateImmediateFlipBandwithSupport(unsigned int NumberOfActiveSurf double *FractionOfUrgentBandwidth, bool *ImmediateFlipBandwidthSupport); +bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurfaces, + double ReturnBW, + double UrgentLatency, + unsigned int SwathHeightY[], + unsigned int SwathHeightC[], + unsigned int SwathWidthY[], + unsigned int SwathWidthC[], + double BytePerPixelInDETY[], + double BytePerPixelInDETC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], + unsigned int NumOfDPP[], + unsigned int HTotal[], + double PixelClock[], + double VRatioY[], + double VRatioC[], + enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index a0207a8f875651d37aa9885757b61c4a8c0b534c..2b34b02dbd4599e2b569bc1998475a7148a75a21 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -1041,6 +1041,7 @@ struct vba_vars_st { double MinFullDETBufferingTime; double AverageReadBandwidthGBytePerSecond; bool FirstMainPlane; + bool NotEnoughDETSwathFillLatencyHiding; unsigned int ViewportWidthChroma[DC__NUM_DPP__MAX]; unsigned int ViewportHeightChroma[DC__NUM_DPP__MAX]; @@ -1224,6 +1225,7 @@ struct vba_vars_st { unsigned int BlockWidthC[DC__NUM_DPP__MAX]; unsigned int SubViewportLinesNeededInMALL[DC__NUM_DPP__MAX]; bool VActiveBandwithSupport[DC__VOLTAGE_STATES][2]; + bool NotEnoughDETSwathFillLatencyHidingPerState[DC__VOLTAGE_STATES][2]; struct dummy_vars dummy_vars; }; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index a40ead44778af4386b68aabadff85e7744453597..d18162e9ed1da177dc02c162c040ca31af8c2441 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -354,7 +354,8 @@ struct amd_pm_funcs { int (*get_power_profile_mode)(void *handle, char *buf); int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size); - int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); + int (*odn_edit_dpm_table)(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); int (*smu_i2c_bus_access)(void *handle, bool acquire); int (*gfx_state_change_set)(void *handle, uint32_t state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index ec055858eb95abc6b9c120b805ab2573f8847763..1159ae114dd02160645990dba6d008052d155e34 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -838,7 +838,8 @@ static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, u return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size); } -static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) +static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c index 67d7da0b6fed5ff83efa645a7697b74e2a199c76..1d829402cd2e23461b10e59d8817bf9d47357cb9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c @@ -75,8 +75,10 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) for (i = 0; i < table_entries; i++) { result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); if (result) { + kfree(hwmgr->current_ps); kfree(hwmgr->request_ps); kfree(hwmgr->ps); + hwmgr->current_ps = NULL; hwmgr->request_ps = NULL; hwmgr->ps = NULL; return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 190af79f3236fdb097b6c9e521ad8aac76bbbbe4..dad3e3741a4e8a5b2852cc7ae700eb3a55274005 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -67,21 +67,22 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr, uint32_t *speed) { - struct amdgpu_device *adev = hwmgr->adev; - uint32_t duty100, duty; - uint64_t tmp64; + uint32_t current_rpm; + uint32_t percent = 0; - duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), - CG_FDO_CTRL1, FMAX_DUTY100); - duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), - CG_THERMAL_STATUS, FDO_PWM_DUTY); + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; - if (!duty100) - return -EINVAL; + if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) + return -1; + + if (hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM != 0) + percent = current_rpm * 255 / + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; - tmp64 = (uint64_t)duty * 255; - do_div(tmp64, duty100); - *speed = MIN((uint32_t)tmp64, 255); + *speed = MIN(percent, 255); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 97b3ad36904676cf6675fd0a7c631fc1527afc0f..b30684c84e20e497bb5784c35d933759486395ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -2961,7 +2961,8 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - int32_t input_index, input_clk, input_vol, i; + int32_t input_clk, input_vol, i; + uint32_t input_index; int od8_id; int ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index b76f0f7e429981edb5b7da856165fe35b91d621b..d6b964cf73bd18fe6c6a7519b2da57e679dc4454 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -522,9 +522,9 @@ typedef enum { TEMP_HOTSPOT_M, TEMP_MEM, TEMP_VR_GFX, + TEMP_VR_SOC, TEMP_VR_MEM0, TEMP_VR_MEM1, - TEMP_VR_SOC, TEMP_VR_U, TEMP_LIQUID0, TEMP_LIQUID1, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 865d6358918d27a0a8152836cf2892a6b4e8f4de..a9122b3b153224d6ac1e1035c3756a8d69d1b008 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -28,6 +28,7 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 70b560737687e95db48dac437eae24705c4504e3..ad5f6a15a1d7d51d52423f2556db83b130a5de17 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1588,6 +1588,10 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) return false; + /* return true if ASIC is in BACO state already */ + if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; + /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 89f0f6eb19f3d12d7077a436961a0d1841c208e7..8e4830a311bde422788695af79e36d499642a83d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -289,6 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; case IP_VERSION(13, 0, 0): + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0; + break; case IP_VERSION(13, 0, 10): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index f0121d171630198a7297d2d8e6cea9fc90836b8e..b8430601304f0f729de9410ea275cae5d2571e05 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -187,6 +187,8 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = FEA_MAP(MEM_TEMP_READ), FEA_MAP(ATHUB_MMHUB_PG), FEA_MAP(SOC_PCC), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, }; static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { @@ -517,6 +519,23 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table); if (ret) return ret; + + /* + * Update the reported maximum shader clock to the value + * which can be guarded to be achieved on all cards. This + * is aligned with Window setting. And considering that value + * might be not the peak frequency the card can achieve, it + * is normal some real-time clock frequency can overtake this + * labelled maximum clock frequency(for example in pp_dpm_sclk + * sysfs output). + */ + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } } else { dpm_table->count = 1; dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; @@ -779,6 +798,57 @@ static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu, return ret; } +static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + static int smu_v13_0_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, @@ -1281,9 +1351,17 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) &dpm_context->dpm_tables.fclk_table; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + DriverReportedClocks_t driver_clocks = + pptable->SkuTable.DriverReportedClocks; pstate_table->gfxclk_pstate.min = gfx_table->min; - pstate_table->gfxclk_pstate.peak = gfx_table->max; + if (driver_clocks.GameClockAc && + (driver_clocks.GameClockAc < gfx_table->max)) + pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; + else + pstate_table->gfxclk_pstate.peak = gfx_table->max; pstate_table->uclk_pstate.min = mem_table->min; pstate_table->uclk_pstate.peak = mem_table->max; @@ -1300,12 +1378,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu) pstate_table->fclk_pstate.min = fclk_table->min; pstate_table->fclk_pstate.peak = fclk_table->max; - /* - * For now, just use the mininum clock frequency. - * TODO: update them when the real pstate settings available - */ - pstate_table->gfxclk_pstate.standard = gfx_table->min; - pstate_table->uclk_pstate.standard = mem_table->min; + if (driver_clocks.BaseClockAc && + driver_clocks.BaseClockAc < gfx_table->max) + pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; + else + pstate_table->gfxclk_pstate.standard = gfx_table->max; + pstate_table->uclk_pstate.standard = mem_table->max; pstate_table->socclk_pstate.standard = soc_table->min; pstate_table->vclk_pstate.standard = vclk_table->min; pstate_table->dclk_pstate.standard = dclk_table->min; @@ -1339,12 +1417,23 @@ static void smu_v13_0_0_get_unique_id(struct smu_context *smu) static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu, uint32_t *speed) { + int ret; + if (!speed) return -EINVAL; - return smu_v13_0_0_get_smu_metrics_data(smu, - METRICS_CURR_FANPWM, - speed); + ret = smu_v13_0_0_get_smu_metrics_data(smu, + METRICS_CURR_FANPWM, + speed); + if (ret) { + dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); + return ret; + } + + /* Convert the PMFW output which is in percent to pwm(255) based */ + *speed = MIN(*speed * 255 / 100, 255); + + return 0; } static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu, @@ -1813,7 +1902,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_enabled_mask = smu_cmn_get_enabled_mask, .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq, .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .read_sensor = smu_v13_0_0_read_sensor, .feature_is_enabled = smu_cmn_feature_is_enabled, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index d74debc584f89abec9bba396e8988d7d8b0573c0..222924363a68172a95934300e4bea9cb551ae5fd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -189,6 +189,8 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = FEA_MAP(MEM_TEMP_READ), FEA_MAP(ATHUB_MMHUB_PG), FEA_MAP(SOC_PCC), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, }; static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { @@ -1359,12 +1361,23 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, uint32_t *speed) { + int ret; + if (!speed) return -EINVAL; - return smu_v13_0_7_get_smu_metrics_data(smu, - METRICS_CURR_FANPWM, - speed); + ret = smu_v13_0_7_get_smu_metrics_data(smu, + METRICS_CURR_FANPWM, + speed); + if (ret) { + dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); + return ret; + } + + /* Convert the PMFW output which is in percent to pwm(255) based */ + *speed = MIN(*speed * 255 / 100, 255); + + return 0; } static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, @@ -1436,7 +1449,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu, static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf) { - DpmActivityMonitorCoeffIntExternal_t activity_monitor_external[PP_SMC_POWER_PROFILE_COUNT]; + DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external; uint32_t i, j, size = 0; int16_t workload_type = 0; int result = 0; @@ -1444,6 +1457,12 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf if (!buf) return -EINVAL; + activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT, + sizeof(*activity_monitor_external), + GFP_KERNEL); + if (!activity_monitor_external) + return -ENOMEM; + size += sysfs_emit_at(buf, size, " "); for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], @@ -1456,15 +1475,17 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf workload_type = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_WORKLOAD, i); - if (workload_type < 0) - return -EINVAL; + if (workload_type < 0) { + result = -EINVAL; + goto out; + } result = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, (void *)(&activity_monitor_external[i]), false); if (result) { dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); - return result; + goto out; } } @@ -1492,7 +1513,10 @@ do { \ PRINT_DPM_MONITOR(Fclk_BoosterFreq); #undef PRINT_DPM_MONITOR - return size; + result = size; +out: + kfree(activity_monitor_external); + return result; } static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 94de73cbeb2ddd893652e947648360e838a61ebe..17445800248dde845b5cfb7ee849eb2d3afbb288 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -402,7 +402,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) void adv7533_dsi_power_on(struct adv7511 *adv); void adv7533_dsi_power_off(struct adv7511 *adv); -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode); int adv7533_patch_registers(struct adv7511 *adv); int adv7533_patch_cec_registers(struct adv7511 *adv); int adv7533_attach_dsi(struct adv7511 *adv); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index f887200e8abc9e751bb43030095ebfa90f48336d..78b72739e5c3e8c4b2b89045d5dac826a5c34762 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) } static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; @@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511, regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_mode_set(adv7511, adj_mode); - drm_mode_copy(&adv7511->curr_mode, adj_mode); /* @@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge, adv7511_mode_set(adv, mode, adj_mode); } +static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct adv7511 *adv = bridge_to_adv7511(bridge); + + if (adv->type == ADV7533 || adv->type == ADV7535) + return adv7533_mode_valid(adv, mode); + else + return adv7511_mode_valid(adv, mode); +} + static int adv7511_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -960,6 +969,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, + .mode_valid = adv7511_bridge_mode_valid, .attach = adv7511_bridge_attach, .detect = adv7511_bridge_detect, .get_edid = adv7511_bridge_get_edid, diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index ef6270806d1d39115554a6c0a9b4d261589bfdb1..258c79d4dab0a5546e85d95c439be64e1b68efd2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv) regmap_write(adv->regmap_cec, 0x27, 0x0b); } -void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode) +enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, + const struct drm_display_mode *mode) { + int lanes; struct mipi_dsi_device *dsi = adv->dsi; - int lanes, ret; - - if (adv->num_dsi_lanes != 4) - return; if (mode->clock > 80000) lanes = 4; else lanes = 3; - if (lanes != dsi->lanes) { - mipi_dsi_detach(dsi); - dsi->lanes = lanes; - ret = mipi_dsi_attach(dsi); - if (ret) - dev_err(&dsi->dev, "failed to change host lanes\n"); - } + /* + * TODO: add support for dynamic switching of lanes + * by using the bridge pre_enable() op . Till then filter + * out the modes which shall need different number of lanes + * than what was configured in the device tree. + */ + if (lanes != dsi->lanes) + return MODE_BAD; + + return MODE_OK; } int adv7533_patch_registers(struct adv7511 *adv) diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index dfe4351c9bdd36b1637dc355ac85ae1817406202..99123eec45511ec62256ebb863776758935ac1a7 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2860,10 +2860,7 @@ static int it6505_bridge_attach(struct drm_bridge *bridge, } /* Register aux channel */ - it6505->aux.name = "DP-AUX"; - it6505->aux.dev = dev; it6505->aux.drm_dev = bridge->dev; - it6505->aux.transfer = it6505_aux_transfer; ret = drm_dp_aux_register(&it6505->aux); @@ -3316,6 +3313,11 @@ static int it6505_i2c_probe(struct i2c_client *client, DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev)); debugfs_init(it6505); + it6505->aux.name = "DP-AUX"; + it6505->aux.dev = dev; + it6505->aux.transfer = it6505_aux_transfer; + drm_dp_aux_init(&it6505->aux); + it6505->bridge.funcs = &it6505_bridge_funcs; it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 98cc3137c0625d53500cd57c819678b4ab094844..02b4a7dc92f5e4f3069935cffc996bf31558e57a 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -945,7 +945,6 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state, bool can_disable_primary_planes) { struct drm_device *dev = crtc_state->crtc->dev; - struct drm_atomic_state *state = crtc_state->state; if (!crtc_state->enable) return 0; @@ -956,14 +955,7 @@ int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state, struct drm_plane *plane; drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { - struct drm_plane_state *plane_state; - - if (plane->type != DRM_PLANE_TYPE_PRIMARY) - continue; - plane_state = drm_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - if (plane_state->fb && plane_state->crtc) { + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { has_primary_plane = true; break; } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 61c29ce74b0357c3331286aaec7e2bb4728d1b8c..27de2a97f1d11afda9f86524e8fbb056e96f1886 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -582,6 +582,9 @@ void drm_connector_cleanup(struct drm_connector *connector) mutex_destroy(&connector->mutex); memset(connector, 0, sizeof(*connector)); + + if (dev->registered) + drm_sysfs_hotplug_event(dev); } EXPORT_SYMBOL(drm_connector_cleanup); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4005dab6147d9bb37d172926d5322e11b01263c7..b36abfa915813b01a9cbdb80e817ad0c80d489e7 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -87,6 +87,8 @@ static int oui(u8 first, u8 second, u8 third) #define EDID_QUIRK_FORCE_10BPC (1 << 11) /* Non desktop display (i.e. HMD) */ #define EDID_QUIRK_NON_DESKTOP (1 << 12) +/* Cap the DSC target bitrate to 15bpp */ +#define EDID_QUIRK_CAP_DSC_15BPP (1 << 13) #define MICROSOFT_IEEE_OUI 0xca125c @@ -147,6 +149,12 @@ static const struct edid_quirk { EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 | EDID_QUIRK_DETAILED_IN_CM), + /* LG 27GP950 */ + EDID_QUIRK('G', 'S', 'M', 0x5bbf, EDID_QUIRK_CAP_DSC_15BPP), + + /* LG 27GN950 */ + EDID_QUIRK('G', 'S', 'M', 0x5b9a, EDID_QUIRK_CAP_DSC_15BPP), + /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */ EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC), @@ -6166,6 +6174,7 @@ static void drm_reset_display_info(struct drm_connector *connector) info->mso_stream_count = 0; info->mso_pixel_overlap = 0; + info->max_dsc_bpp = 0; } static u32 update_display_info(struct drm_connector *connector, @@ -6252,6 +6261,9 @@ static u32 update_display_info(struct drm_connector *connector, info->non_desktop = true; } + if (quirks & EDID_QUIRK_CAP_DSC_15BPP) + info->max_dsc_bpp = 15; + return quirks; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index e09331bb3bc73f21385b47a1c723f69e3abaad91..6242dfbe92402e7dca4c9ffdccde428196f4e0a6 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -297,12 +297,12 @@ const struct drm_format_info *__drm_format_info(u32 format) .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q401, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index cc386f8a7116e6d9bb5504872917281b65903954..5cf13e52f7c9416c525ba5d3ce88b233927253b9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -258,7 +258,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( if (mapping->use == 0) { mutex_lock(&mmu_context->lock); if (mapping->context == mmu_context) - mapping->use += 1; + if (va && mapping->iova != va) { + etnaviv_iommu_reap_mapping(mapping); + mapping = NULL; + } else { + mapping->use += 1; + } else mapping = NULL; mutex_unlock(&mmu_context->lock); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 37018bc55810d1385c455ba0b274f79162b33faa..f667e7906d1f4e6b9f651d1d75e83eb1da5e8778 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + /* These models/revisions don't have the 2D pipe bit */ + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision <= 2) || + gpu->identity.model == chipModel_GC300) + gpu->identity.features |= chipFeatures_PIPE_2D; + if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2) || (gpu->identity.model == chipModel_GC300 && @@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } - /* GC600 idle register reports zero bits where modules aren't present */ - if (gpu->identity.model == chipModel_GC600) + /* GC600/300 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600 || + gpu->identity.model == chipModel_GC300) gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA | VIVS_HI_IDLE_STATE_SE | diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index dc1aa738c4f18ad28cf7162215fd9976e65e2a02..55479cb8b1ac3cd55e66fedc7a01f4e95ad1817a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -135,6 +135,19 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, drm_mm_remove_node(&mapping->vram_node); } +void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_iommu_context *context = mapping->context; + + lockdep_assert_held(&context->lock); + WARN_ON(mapping->use); + + etnaviv_iommu_remove_mapping(context, mapping); + etnaviv_iommu_context_put(mapping->context); + mapping->context = NULL; + list_del_init(&mapping->mmu_node); +} + static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, struct drm_mm_node *node, size_t size) { @@ -202,10 +215,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, * this mapping. */ list_for_each_entry_safe(m, n, &list, scan_node) { - etnaviv_iommu_remove_mapping(context, m); - etnaviv_iommu_context_put(m->context); - m->context = NULL; - list_del_init(&m->mmu_node); + etnaviv_iommu_reap_mapping(m); list_del_init(&m->scan_node); } @@ -257,10 +267,7 @@ static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, } list_for_each_entry_safe(m, n, &scan_list, scan_node) { - etnaviv_iommu_remove_mapping(context, m); - etnaviv_iommu_context_put(m->context); - m->context = NULL; - list_del_init(&m->mmu_node); + etnaviv_iommu_reap_mapping(m); list_del_init(&m->scan_node); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index e4a0b7d09c2eab6f39498d03123e575b60cd4198..c01a147f0dfddfbc2809b7d99ce85b23cd558983 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -91,6 +91,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping, u64 va); void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping); int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx, struct etnaviv_vram_mapping *mapping, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 4d4a715b429d1d4461b7a79debe829c940385655..2c2b92324a2e90fe13dcfa889986f00419c4be60 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -60,8 +60,9 @@ static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(fsl_connector->panel, connector); } -static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { if (mode->hdisplay & 0xf) return MODE_ERROR; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 28bdb936cd1fc12aaeec8d8723d8fc8a84dec1de..edbdb949b6ced70b98756c8add75f4e6070ddcb3 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -414,7 +414,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, ptrs->lvds_entries++; if (size != 0 || ptrs->lvds_entries != 3) { - kfree(ptrs); + kfree(ptrs_block); return NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 2b5bc95a8b0df467461b6c5ea05b2e0c610b620c..78b3427471bd7a95cd6c8980a458d9b06dcc080b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3675,61 +3675,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, } } -static void -intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - enum pipe pipe = crtc->pipe; - u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; - - trans_ddi_func_ctl_value = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(pipe)); - trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); - dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); - - trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | - TGL_TRANS_DDI_PORT_MASK); - trans_conf_value &= ~PIPECONF_ENABLE; - dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; - - intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), - trans_ddi_func_ctl_value); - intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); -} - -static void -intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum port port = dig_port->base.port; - struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); - enum pipe pipe = crtc->pipe; - u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; - - trans_ddi_func_ctl_value = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(pipe)); - trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); - dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); - - trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | - TGL_TRANS_DDI_SELECT_PORT(port); - trans_conf_value |= PIPECONF_ENABLE; - dp_tp_ctl_value |= DP_TP_CTL_ENABLE; - - intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); - intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), - trans_ddi_func_ctl_value); -} - static void intel_dp_process_phy_request(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -3748,14 +3693,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); - intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); - intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); intel_dp_phy_pattern_update(intel_dp, crtc_state); - intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); - drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, intel_dp->train_set, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 75e8cc4337c93b2e806bc0423bf6a3745ac4592c..2cbc1292ab3821f557e09ed349091c3685a87804 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -41,9 +41,11 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_dsi_vbt.h" +#include "intel_gmbus_regs.h" #include "vlv_dsi.h" #include "vlv_dsi_regs.h" #include "vlv_sideband.h" @@ -137,9 +139,9 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, return ffs(intel_dsi->ports) - 1; if (seq_port) { - if (intel_dsi->ports & PORT_B) + if (intel_dsi->ports & BIT(PORT_B)) return PORT_B; - else if (intel_dsi->ports & PORT_C) + else if (intel_dsi->ports & BIT(PORT_C)) return PORT_C; } @@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector, drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); } +enum { + MIPI_RESET_1 = 0, + MIPI_AVDD_EN_1, + MIPI_BKLT_EN_1, + MIPI_AVEE_EN_1, + MIPI_VIO_EN_1, + MIPI_RESET_2, + MIPI_AVDD_EN_2, + MIPI_BKLT_EN_2, + MIPI_AVEE_EN_2, + MIPI_VIO_EN_2, +}; + +static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, + int gpio, bool value) +{ + int index; + + if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) + return; + + switch (gpio) { + case MIPI_RESET_1: + case MIPI_RESET_2: + index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B; + + /* + * Disable HPD to set the pin to output, and set output + * value. The HPD pin should not be enabled for DSI anyway, + * assuming the board design and VBT are sane, and the pin isn't + * used by a non-DSI encoder. + * + * The locking protects against concurrent SHOTPLUG_CTL_DDI + * modifications in irq setup and handling. + */ + spin_lock_irq(&dev_priv->irq_lock); + intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI, + SHOTPLUG_CTL_DDI_HPD_ENABLE(index) | + SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index), + value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0); + spin_unlock_irq(&dev_priv->irq_lock); + break; + case MIPI_AVDD_EN_1: + case MIPI_AVDD_EN_2: + index = gpio == MIPI_AVDD_EN_1 ? 0 : 1; + + intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON, + value ? PANEL_POWER_ON : 0); + break; + case MIPI_BKLT_EN_1: + case MIPI_BKLT_EN_2: + index = gpio == MIPI_BKLT_EN_1 ? 0 : 1; + + intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE, + value ? EDP_BLC_ENABLE : 0); + break; + case MIPI_AVEE_EN_1: + case MIPI_AVEE_EN_2: + index = gpio == MIPI_AVEE_EN_1 ? 1 : 2; + + intel_de_rmw(dev_priv, GPIO(dev_priv, index), + GPIO_CLOCK_VAL_OUT, + GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT | + GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0)); + break; + case MIPI_VIO_EN_1: + case MIPI_VIO_EN_2: + index = gpio == MIPI_VIO_EN_1 ? 1 : 2; + + intel_de_rmw(dev_priv, GPIO(dev_priv, index), + GPIO_DATA_VAL_OUT, + GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT | + GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0)); + break; + default: + MISSING_CASE(gpio); + } +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) struct intel_connector *connector = intel_dsi->attached_connector; u8 gpio_source, gpio_index = 0, gpio_number; bool value; - - drm_dbg_kms(&dev_priv->drm, "\n"); + bool native = DISPLAY_VER(dev_priv) >= 11; if (connector->panel.vbt.dsi.seq_version >= 3) gpio_index = *data++; @@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) else gpio_source = 0; + if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1)) + native = false; + /* pull up/down */ value = *data++ & 1; - if (DISPLAY_VER(dev_priv) >= 11) + drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", + gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); + + if (native) + icl_native_gpio_set_value(dev_priv, gpio_number, value); + else if (DISPLAY_VER(dev_priv) >= 11) icl_exec_gpio(connector, gpio_source, gpio_index, value); else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(connector, gpio_source, gpio_number, value); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 845023c14eb36f1b525d943fe632e58da95f82d8..f461e34cc5f0762fbb771f84f75d1cffeaa3cf6d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -729,32 +729,69 @@ static int eb_reserve(struct i915_execbuffer *eb) bool unpinned; /* - * Attempt to pin all of the buffers into the GTT. - * This is done in 2 phases: + * We have one more buffers that we couldn't bind, which could be due to + * various reasons. To resolve this we have 4 passes, with every next + * level turning the screws tighter: * - * 1. Unbind all objects that do not match the GTT constraints for - * the execbuffer (fenceable, mappable, alignment etc). - * 2. Bind new objects. + * 0. Unbind all objects that do not match the GTT constraints for the + * execbuffer (fenceable, mappable, alignment etc). Bind all new + * objects. This avoids unnecessary unbinding of later objects in order + * to make room for the earlier objects *unless* we need to defragment. * - * This avoid unnecessary unbinding of later objects in order to make - * room for the earlier objects *unless* we need to defragment. + * 1. Reorder the buffers, where objects with the most restrictive + * placement requirements go first (ignoring fixed location buffers for + * now). For example, objects needing the mappable aperture (the first + * 256M of GTT), should go first vs objects that can be placed just + * about anywhere. Repeat the previous pass. * - * Defragmenting is skipped if all objects are pinned at a fixed location. + * 2. Consider buffers that are pinned at a fixed location. Also try to + * evict the entire VM this time, leaving only objects that we were + * unable to lock. Try again to bind the buffers. (still using the new + * buffer order). + * + * 3. We likely have object lock contention for one or more stubborn + * objects in the VM, for which we need to evict to make forward + * progress (perhaps we are fighting the shrinker?). When evicting the + * VM this time around, anything that we can't lock we now track using + * the busy_bo, using the full lock (after dropping the vm->mutex to + * prevent deadlocks), instead of trylock. We then continue to evict the + * VM, this time with the stubborn object locked, which we can now + * hopefully unbind (if still bound in the VM). Repeat until the VM is + * evicted. Finally we should be able bind everything. */ - for (pass = 0; pass <= 2; pass++) { + for (pass = 0; pass <= 3; pass++) { int pin_flags = PIN_USER | PIN_VALIDATE; if (pass == 0) pin_flags |= PIN_NONBLOCK; if (pass >= 1) - unpinned = eb_unbind(eb, pass == 2); + unpinned = eb_unbind(eb, pass >= 2); if (pass == 2) { err = mutex_lock_interruptible(&eb->context->vm->mutex); if (!err) { - err = i915_gem_evict_vm(eb->context->vm, &eb->ww); + err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL); + mutex_unlock(&eb->context->vm->mutex); + } + if (err) + return err; + } + + if (pass == 3) { +retry: + err = mutex_lock_interruptible(&eb->context->vm->mutex); + if (!err) { + struct drm_i915_gem_object *busy_bo = NULL; + + err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo); mutex_unlock(&eb->context->vm->mutex); + if (err && busy_bo) { + err = i915_gem_object_lock(busy_bo, &eb->ww); + i915_gem_object_put(busy_bo); + if (!err) + goto retry; + } } if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 73d9eda1d6b7a6fdd237da6bff783e67495648cc..354c1d6dab8462d5bd93427f04cc11e59619d96a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -369,7 +369,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) if (vma == ERR_PTR(-ENOSPC)) { ret = mutex_lock_interruptible(&ggtt->vm.mutex); if (!ret) { - ret = i915_gem_evict_vm(&ggtt->vm, &ww); + ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); mutex_unlock(&ggtt->vm.mutex); } if (ret) @@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) vma->mmo = mmo; if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, + intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); if (write) { @@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object * drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); - if (obj->userfault_count) { - /* rpm wakeref provide exclusive access */ - list_del(&obj->userfault_link); - obj->userfault_count = 0; - } + /* + * We have exclusive access here via runtime suspend. All other callers + * must first grab the rpm wakeref. + */ + GEM_BUG_ON(!obj->userfault_count); + list_del(&obj->userfault_link); + obj->userfault_count = 0; } void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) @@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) spin_lock(&obj->mmo.lock); } spin_unlock(&obj->mmo.lock); - - if (obj->userfault_count) { - mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); - list_del(&obj->userfault_link); - mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); - obj->userfault_count = 0; - } } static struct i915_mmap_offset * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 369006c5317f27e78816aeb3514d8cbc9fd151c1..a40bc17acead8cd5d5045c18253494fc538730f0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -761,6 +761,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) return false; + if (obj->flags & I915_BO_ALLOC_CCS_AUX) + return true; + for (i = 0; i < obj->mm.n_placements; i++) { /* Compression is not allowed for the objects with smem placement */ if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index d0d6772e6f36a2e9002b035f91a772a4de773af9..ab4c2f90a56436c9dd8e8aa14a6b1c14867df0cf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -327,16 +327,18 @@ struct drm_i915_gem_object { * dealing with userspace objects the CPU fault handler is free to ignore this. */ #define I915_BO_ALLOC_GPU_ONLY BIT(6) +#define I915_BO_ALLOC_CCS_AUX BIT(7) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ I915_BO_ALLOC_VOLATILE | \ I915_BO_ALLOC_CPU_CLEAR | \ I915_BO_ALLOC_USER | \ I915_BO_ALLOC_PM_VOLATILE | \ I915_BO_ALLOC_PM_EARLY | \ - I915_BO_ALLOC_GPU_ONLY) -#define I915_BO_READONLY BIT(7) -#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */ -#define I915_BO_PROTECTED BIT(9) + I915_BO_ALLOC_GPU_ONLY | \ + I915_BO_ALLOC_CCS_AUX) +#define I915_BO_READONLY BIT(8) +#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */ +#define I915_BO_PROTECTED BIT(10) /** * @mem_flags - Mutable placement-related flags * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3428f735e786c01d9a0dfc214312678dd7fea229..8d30db5e678c444c0e6c979c26b075969041ea6d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("%s\n", dev_name(i915->drm.dev)); - intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0); + intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0); flush_workqueue(i915->wq); /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 0d6d640225fc8bfc44c274d92390a1ba8c6cd691..be4c081e7e13d716701ec09f4f34e1ab860a9077 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -279,7 +279,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, struct i915_ttm_tt *i915_tt; int ret; - if (!obj) + if (i915_ttm_is_ghost_object(bo)) return NULL; i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); @@ -362,7 +362,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - if (!obj) + if (i915_ttm_is_ghost_object(bo)) return false; /* @@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - intel_wakeref_t wakeref = 0; - - if (bo->resource && likely(obj)) { - /* ttm_bo_release() already has dma_resv_lock */ - if (i915_ttm_cpu_maps_iomem(bo->resource)) - wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (bo->resource && !i915_ttm_is_ghost_object(bo)) { __i915_gem_object_pages_fini(obj); - - if (wakeref) - intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); - i915_ttm_free_cached_io_rsgt(obj); } } @@ -628,7 +619,7 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); int ret; - if (!obj) + if (i915_ttm_is_ghost_object(bo)) return; ret = i915_ttm_move_notify(bo); @@ -661,7 +652,7 @@ static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo); bool unknown_state; - if (!obj) + if (i915_ttm_is_ghost_object(mem->bo)) return -EINVAL; if (!kref_get_unless_zero(&obj->base.refcount)) @@ -694,7 +685,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long base; unsigned int ofs; - GEM_BUG_ON(!obj); + GEM_BUG_ON(i915_ttm_is_ghost_object(bo)); GEM_WARN_ON(bo->ttm); base = obj->mm.region->iomap.base - obj->mm.region->region.start; @@ -994,13 +985,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); intel_wakeref_t wakeref = 0; vm_fault_t ret; int idx; - obj = i915_ttm_to_gem(bo); - if (!obj) + if (i915_ttm_is_ghost_object(bo)) return VM_FAULT_SIGBUS; /* Sanity check that we allow writing into this object */ @@ -1057,16 +1047,19 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) goto out_rpm; - /* ttm_bo_vm_reserve() already has dma_resv_lock */ + /* + * ttm_bo_vm_reserve() already has dma_resv_lock. + * userfault_count is protected by dma_resv lock and rpm wakeref. + */ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) { obj->userfault_count = 1; - mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); - list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list); - mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); + list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list); + spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); } if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref, + intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); i915_ttm_adjust_lru(obj); @@ -1098,7 +1091,7 @@ static void ttm_vm_open(struct vm_area_struct *vma) struct drm_i915_gem_object *obj = i915_ttm_to_gem(vma->vm_private_data); - GEM_BUG_ON(!obj); + GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data)); i915_gem_object_get(obj); } @@ -1107,7 +1100,7 @@ static void ttm_vm_close(struct vm_area_struct *vma) struct drm_i915_gem_object *obj = i915_ttm_to_gem(vma->vm_private_data); - GEM_BUG_ON(!obj); + GEM_BUG_ON(i915_ttm_is_ghost_object(vma->vm_private_data)); i915_gem_object_put(obj); } @@ -1128,7 +1121,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) { + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + intel_wakeref_t wakeref = 0; + + assert_object_held_shared(obj); + + if (i915_ttm_cpu_maps_iomem(bo->resource)) { + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + + /* userfault_count is protected by obj lock and rpm wakeref. */ + if (obj->userfault_count) { + spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); + list_del(&obj->userfault_link); + spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock); + obj->userfault_count = 0; + } + } + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); + + if (wakeref) + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); } static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index e4842b4296fc2123382ccdd63858b67d02901b00..2a94a99ef76b4f4556e5b8b2c79f435cbfc790da 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -27,19 +27,27 @@ i915_gem_to_ttm(struct drm_i915_gem_object *obj) */ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo); +/** + * i915_ttm_is_ghost_object - Check if the ttm bo is a ghost object. + * @bo: Pointer to the ttm buffer object + * + * Return: True if the ttm bo is not a i915 object but a ghost ttm object, + * False otherwise. + */ +static inline bool i915_ttm_is_ghost_object(struct ttm_buffer_object *bo) +{ + return bo->destroy != i915_ttm_bo_destroy; +} + /** * i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding * struct drm_i915_gem_object. * - * Return: Pointer to the embedding struct ttm_buffer_object, or NULL - * if the object was not an i915 ttm object. + * Return: Pointer to the embedding struct ttm_buffer_object. */ static inline struct drm_i915_gem_object * i915_ttm_to_gem(struct ttm_buffer_object *bo) { - if (bo->destroy != i915_ttm_bo_destroy) - return NULL; - return container_of(bo, struct drm_i915_gem_object, __do_not_access); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 9a7e50534b84bb9023f1760ec8af90ff50250a34..f59f812dc6d29a6b8451a229b8fa348d0391f4f6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -560,7 +560,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, bool clear; int ret; - if (GEM_WARN_ON(!obj)) { + if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) { ttm_bo_move_null(bo, dst_mem); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 07e49f22f2de392ceed08e2630972b5472f22924..7e67742bc65e09d29fa3e1c30abfb15f795f49d9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, container_of(bo->bdev, typeof(*i915), bdev); struct drm_i915_gem_object *backup; struct ttm_operation_ctx ctx = {}; + unsigned int flags; int err = 0; if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) @@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) return 0; - backup = i915_gem_object_create_shmem(i915, obj->base.size); + /* + * It seems that we might have some framebuffers still pinned at this + * stage, but for such objects we might also need to deal with the CCS + * aux state. Make sure we force the save/restore of the CCS state, + * otherwise we might observe display corruption, when returning from + * suspend. + */ + flags = 0; + if (i915_gem_object_needs_ccs_pages(obj)) { + WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj)); + WARN_ON_ONCE(!pm_apply->allow_gpu); + + flags = I915_BO_ALLOC_CCS_AUX; + } + backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], + obj->base.size, 0, flags); if (IS_ERR(backup)) return PTR_ERR(backup); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 04e435bce79bdfc731b0f4bf834e3bd06381e2da..cbc8b857d5f7a0948897b12e154564395a16f282 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -348,4 +348,10 @@ intel_engine_get_hung_context(struct intel_engine_cs *engine) return engine->hung_ce; } +u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value); +u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value); +u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value); +u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value); +u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value); + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 1f7188129cd1f6a1f15a91f15121d42a333f43ce..83bfeb872bdaa20e038a1bfce2383706ff5f1cd2 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -486,6 +486,17 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, engine->logical_mask = BIT(logical_instance); __sprint_engine_name(engine); + if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && + __ffs(CCS_MASK(engine->gt)) == engine->instance) || + engine->class == RENDER_CLASS) + engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; + + /* features common between engines sharing EUs */ + if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { + engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; + engine->flags |= I915_ENGINE_HAS_EU_PRIORITY; + } + engine->props.heartbeat_interval_ms = CONFIG_DRM_I915_HEARTBEAT_INTERVAL; engine->props.max_busywait_duration_ns = @@ -498,19 +509,28 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, CONFIG_DRM_I915_TIMESLICE_DURATION; /* Override to uninterruptible for OpenCL workloads. */ - if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS) + if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) engine->props.preempt_timeout_ms = 0; - if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && - __ffs(CCS_MASK(engine->gt)) == engine->instance) || - engine->class == RENDER_CLASS) - engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; - - /* features common between engines sharing EUs */ - if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { - engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; - engine->flags |= I915_ENGINE_HAS_EU_PRIORITY; - } + /* Cap properties according to any system limits */ +#define CLAMP_PROP(field) \ + do { \ + u64 clamp = intel_clamp_##field(engine, engine->props.field); \ + if (clamp != engine->props.field) { \ + drm_notice(&engine->i915->drm, \ + "Warning, clamping %s to %lld to prevent overflow\n", \ + #field, clamp); \ + engine->props.field = clamp; \ + } \ + } while (0) + + CLAMP_PROP(heartbeat_interval_ms); + CLAMP_PROP(max_busywait_duration_ns); + CLAMP_PROP(preempt_timeout_ms); + CLAMP_PROP(stop_timeout_ms); + CLAMP_PROP(timeslice_duration_ms); + +#undef CLAMP_PROP engine->defaults = engine->props; /* never to change again */ @@ -534,6 +554,55 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, return 0; } +u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value) +{ + value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); + + return value; +} + +u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value) +{ + value = min(value, jiffies_to_nsecs(2)); + + return value; +} + +u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value) +{ + /* + * NB: The GuC API only supports 32bit values. However, the limit is further + * reduced due to internal calculations which would otherwise overflow. + */ + if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) + value = min_t(u64, value, guc_policy_max_preempt_timeout_ms()); + + value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); + + return value; +} + +u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value) +{ + value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); + + return value; +} + +u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value) +{ + /* + * NB: The GuC API only supports 32bit values. However, the limit is further + * reduced due to internal calculations which would otherwise overflow. + */ + if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) + value = min_t(u64, value, guc_policy_max_exec_quantum_ms()); + + value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); + + return value; +} + static void __setup_engine_capabilities(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 7caa3412a24468c42f2686e1d33908a50fac9dec..c7db49749a636f9fe66a3b3bc1a255efa7b57906 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt) { spin_lock_init(gt->irq_lock); - INIT_LIST_HEAD(>->lmem_userfault_list); - mutex_init(>->lmem_userfault_lock); INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -812,7 +810,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) } intel_uncore_init_early(gt->uncore, gt); - intel_wakeref_auto_init(>->userfault_wakeref, gt->uncore->rpm); ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index f19c2de77ff667ecd99cd242ef78d3f18a97c8ba..184ee9b11a4dac78508c8a24585bcc3a02dfef24 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -141,20 +141,6 @@ struct intel_gt { struct intel_wakeref wakeref; atomic_t user_wakeref; - /** - * Protects access to lmem usefault list. - * It is required, if we are outside of the runtime suspend path, - * access to @lmem_userfault_list requires always first grabbing the - * runtime pm, to ensure we can't race against runtime suspend. - * Once we have that we also need to grab @lmem_userfault_lock, - * at which point we have exclusive access. - * The runtime suspend path is special since it doesn't really hold any locks, - * but instead has exclusive access by virtue of all other accesses requiring - * holding the runtime pm wakeref. - */ - struct mutex lmem_userfault_lock; - struct list_head lmem_userfault_list; - struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ @@ -170,9 +156,6 @@ struct intel_gt { */ intel_wakeref_t awake; - /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ - struct intel_wakeref_auto userfault_wakeref; - u32 clock_frequency; u32 clock_period_ns; diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index aaaf1906026c1351b15cb4f74608fee41478f9b9..ee072c7d62eb18dd53dc1137cf3512d4333d11f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -341,6 +341,16 @@ static int emit_no_arbitration(struct i915_request *rq) return 0; } +static int max_pte_pkt_size(struct i915_request *rq, int pkt) +{ + struct intel_ring *ring = rq->ring; + + pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5); + pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); + + return pkt; +} + static int emit_pte(struct i915_request *rq, struct sgt_dma *it, enum i915_cache_level cache_level, @@ -387,8 +397,7 @@ static int emit_pte(struct i915_request *rq, return PTR_ERR(cs); /* Pack as many PTE updates as possible into a single MI command */ - pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5); - pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); + pkt = max_pte_pkt_size(rq, dword_length); hdr = cs; *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */ @@ -421,8 +430,7 @@ static int emit_pte(struct i915_request *rq, } } - pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5); - pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5); + pkt = max_pte_pkt_size(rq, dword_rem); hdr = cs; *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c index 9670310562029c005480528a1b996e39413ef6f6..f2d9858d827c23bd3a5bb16a4feaf8d1c9c7b4b8 100644 --- a/drivers/gpu/drm/i915/gt/sysfs_engines.c +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c @@ -144,7 +144,7 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct intel_engine_cs *engine = kobj_to_engine(kobj); - unsigned long long duration; + unsigned long long duration, clamped; int err; /* @@ -168,7 +168,8 @@ max_spin_store(struct kobject *kobj, struct kobj_attribute *attr, if (err) return err; - if (duration > jiffies_to_nsecs(2)) + clamped = intel_clamp_max_busywait_duration_ns(engine, duration); + if (duration != clamped) return -EINVAL; WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); @@ -203,7 +204,7 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct intel_engine_cs *engine = kobj_to_engine(kobj); - unsigned long long duration; + unsigned long long duration, clamped; int err; /* @@ -218,7 +219,8 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr, if (err) return err; - if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + clamped = intel_clamp_timeslice_duration_ms(engine, duration); + if (duration != clamped) return -EINVAL; WRITE_ONCE(engine->props.timeslice_duration_ms, duration); @@ -256,7 +258,7 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct intel_engine_cs *engine = kobj_to_engine(kobj); - unsigned long long duration; + unsigned long long duration, clamped; int err; /* @@ -272,7 +274,8 @@ stop_store(struct kobject *kobj, struct kobj_attribute *attr, if (err) return err; - if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + clamped = intel_clamp_stop_timeout_ms(engine, duration); + if (duration != clamped) return -EINVAL; WRITE_ONCE(engine->props.stop_timeout_ms, duration); @@ -306,7 +309,7 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct intel_engine_cs *engine = kobj_to_engine(kobj); - unsigned long long timeout; + unsigned long long timeout, clamped; int err; /* @@ -322,7 +325,8 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, if (err) return err; - if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + clamped = intel_clamp_preempt_timeout_ms(engine, timeout); + if (timeout != clamped) return -EINVAL; WRITE_ONCE(engine->props.preempt_timeout_ms, timeout); @@ -362,7 +366,7 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct intel_engine_cs *engine = kobj_to_engine(kobj); - unsigned long long delay; + unsigned long long delay, clamped; int err; /* @@ -379,7 +383,8 @@ heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr, if (err) return err; - if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + clamped = intel_clamp_heartbeat_interval_ms(engine, delay); + if (delay != clamped) return -EINVAL; err = intel_engine_set_heartbeat(engine, delay); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index 8f116514601318aa6a62f75c39f36509a7a75d54..685ddccc0f26ae2774f9672b33814eb052b6e12a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -165,7 +165,7 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = { } /* List of lists */ -static struct __guc_mmio_reg_descr_group default_lists[] = { +static const struct __guc_mmio_reg_descr_group default_lists[] = { MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0), MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS), MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS), @@ -419,6 +419,44 @@ guc_capture_get_device_reglist(struct intel_guc *guc) return default_lists; } +static const char * +__stringify_type(u32 type) +{ + switch (type) { + case GUC_CAPTURE_LIST_TYPE_GLOBAL: + return "Global"; + case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS: + return "Class"; + case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE: + return "Instance"; + default: + break; + } + + return "unknown"; +} + +static const char * +__stringify_engclass(u32 class) +{ + switch (class) { + case GUC_RENDER_CLASS: + return "Render"; + case GUC_VIDEO_CLASS: + return "Video"; + case GUC_VIDEOENHANCE_CLASS: + return "VideoEnhance"; + case GUC_BLITTER_CLASS: + return "Blitter"; + case GUC_COMPUTE_CLASS: + return "Compute"; + default: + break; + } + + return "unknown"; +} + static int guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid, struct guc_mmio_reg *ptr, u16 num_entries) @@ -482,32 +520,55 @@ guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u return num_regs; } -int -intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, - size_t *size) +static int +guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + size_t *size, bool is_purpose_est) { struct intel_guc_state_capture *gc = guc->capture; + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid]; int num_regs; - if (!gc->reglists) + if (!gc->reglists) { + drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n"); return -ENODEV; + } if (cache->is_valid) { *size = cache->size; return cache->status; } + if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF && + !guc_capture_get_one_list(gc->reglists, owner, type, classid)) { + if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL) + drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n"); + else + drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n", + __stringify_type(type), type, + __stringify_engclass(classid), classid); + return -ENODATA; + } + num_regs = guc_cap_list_num_regs(gc, owner, type, classid); + /* intentional empty lists can exist depending on hw config */ if (!num_regs) return -ENODATA; - *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + - (num_regs * sizeof(struct guc_mmio_reg))); + if (size) + *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) + + (num_regs * sizeof(struct guc_mmio_reg))); return 0; } +int +intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid, + size_t *size) +{ + return guc_capture_getlistsize(guc, owner, type, classid, size, false); +} + static void guc_capture_create_prealloc_nodes(struct intel_guc *guc); int @@ -606,7 +667,7 @@ guc_capture_output_min_size_est(struct intel_guc *guc) struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; - int worst_min_size = 0, num_regs = 0; + int worst_min_size = 0; size_t tmp = 0; if (!guc->capture) @@ -627,21 +688,19 @@ guc_capture_output_min_size_est(struct intel_guc *guc) worst_min_size += sizeof(struct guc_state_capture_group_header_t) + (3 * sizeof(struct guc_state_capture_header_t)); - if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp)) - num_regs += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true)) + worst_min_size += tmp; - if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, - engine->class, &tmp)) { - num_regs += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, + engine->class, &tmp, true)) { + worst_min_size += tmp; } - if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, - engine->class, &tmp)) { - num_regs += tmp; + if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE, + engine->class, &tmp, true)) { + worst_min_size += tmp; } } - worst_min_size += (num_regs * sizeof(struct guc_mmio_reg)); - return worst_min_size; } @@ -658,15 +717,23 @@ static void check_guc_capture_size(struct intel_guc *guc) int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER; u32 buffer_size = intel_guc_log_section_size_capture(&guc->log); + /* + * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB) + * Additionally, its based on space needed to fit all engines getting reset at once + * within the same G2H handler task slot. This is very unlikely. However, if GuC really + * does run out of space for whatever reason, we will see an separate warning message + * when processing the G2H event capture-notification, search for: + * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE. + */ if (min_size < 0) drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n", min_size); else if (min_size > buffer_size) - drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n", + drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n", buffer_size, min_size); else if (spare_size > buffer_size) - drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n", - buffer_size, spare_size, min_size); + drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n", + buffer_size, spare_size, min_size); } /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 323b055e5db97e9c8543a95c4347bc278b9b869e..502e7cb5a302552a65626c38be541a5ba8a3bc37 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -305,6 +305,27 @@ struct guc_update_context_policy { #define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +/* + * GuC converts the timeout to clock ticks internally. Different platforms have + * different GuC clocks. Thus, the maximum value before overflow is platform + * dependent. Current worst case scenario is about 110s. So, the spec says to + * limit to 100s to be safe. + */ +#define GUC_POLICY_MAX_EXEC_QUANTUM_US (100 * 1000 * 1000UL) +#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US (100 * 1000 * 1000UL) + +static inline u32 guc_policy_max_exec_quantum_ms(void) +{ + BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX); + return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000; +} + +static inline u32 guc_policy_max_preempt_timeout_ms(void) +{ + BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX); + return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000; +} + struct guc_policies { u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; /* In micro seconds. How much time to allow before DPC processing is diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 55d3ef93e86f856b4e04f10f249af80240bc71d7..68331c538b0a7959425fc914a0facf391d9713f6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -16,15 +16,15 @@ #if defined(CONFIG_DRM_I915_DEBUG_GUC) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M -#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M +#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #elif defined(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M -#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M +#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #else #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K -#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M +#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #endif static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 1db59eeb34db9e02782b50900d660aff97803ec9..1a23e901cc6632547d1254a4adef3f6a434e3129 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -2429,6 +2429,10 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) int ret; /* NB: For both of these, zero means disabled. */ + GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000, + execution_quantum)); + GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000, + preemption_timeout)); execution_quantum = engine->props.timeslice_duration_ms * 1000; preemption_timeout = engine->props.preempt_timeout_ms * 1000; @@ -2462,6 +2466,10 @@ static void guc_context_policy_init_v69(struct intel_engine_cs *engine, desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69; /* NB: For both of these, zero means disabled. */ + GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000, + desc->execution_quantum)); + GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000, + desc->preemption_timeout)); desc->execution_quantum = engine->props.timeslice_duration_ms * 1000; desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; } diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 9f1c209d9251107f95cad7ea4c28e62cf505439f..e08ed0e9f1653e9b88690ba6319262686a9a7419 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -175,8 +175,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { - debugfs_remove_recursive(vgpu->debugfs); - vgpu->debugfs = NULL; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root && gvt->debugfs_root) { + debugfs_remove_recursive(vgpu->debugfs); + vgpu->debugfs = NULL; + } } /** @@ -199,6 +204,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt) */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { - debugfs_remove_recursive(gvt->debugfs_root); - gvt->debugfs_root = NULL; + struct drm_minor *minor = gvt->gt->i915->drm.primary; + + if (minor->debugfs_root) { + debugfs_remove_recursive(gvt->debugfs_root); + gvt->debugfs_root = NULL; + } } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index ce0eb03709c3f6fa7ef30fa8a6f2049d0db41159..80c60754a5c1c25a30920ca5987d67542f7553f5 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1214,10 +1214,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, for_each_shadow_entry(sub_spt, &sub_se, sub_index) { ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, PAGE_SIZE, &dma_addr); - if (ret) { - ppgtt_invalidate_spt(spt); - return ret; - } + if (ret) + goto err; sub_se.val64 = se->val64; /* Copy the PAT field from PDE. */ @@ -1236,6 +1234,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, ops->set_pfn(se, sub_spt->shadow_page.mfn); ppgtt_set_shadow_entry(spt, se, index); return 0; +err: + /* Cancel the existing addess mappings of DMA addr. */ + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { + gvt_vdbg_mm("invalidate 4K entry\n"); + ppgtt_invalidate_pte(sub_spt, &sub_se); + } + /* Release the new allocated spt. */ + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); + ppgtt_free_spt(sub_spt); + return ret; } static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d6fe94cd0fdb611b2d0bacc33787f31a18c65963..8342d95f56cbc3d49dc70644e039ae4f94ad1564 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -696,6 +696,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload) if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || !workload->shadow_mm->ppgtt_mm.shadowed) { + intel_vgpu_unpin_mm(workload->shadow_mm); gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index f2a15d8155f4a1be085d86b7eb86d4ddfc018121..2ce30cff461a032e797faf8bea52db790f5f8b4f 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -1662,7 +1662,8 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_gt_runtime_resume(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_resume(gt); enable_rpm_wakeref_asserts(rpm); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2bdddb61ebd7ae6215eaa26706a2df3258d42171..38c26668b96022fa5972e0b9ddef72f33da1ca96 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) __i915_gem_object_release_mmap_gtt(obj); list_for_each_entry_safe(obj, on, - &to_gt(i915)->lmem_userfault_list, userfault_link) + &i915->runtime_pm.lmem_userfault_list, userfault_link) i915_gem_object_runtime_pm_release_mmap_offset(obj); /* @@ -1128,6 +1128,8 @@ void i915_gem_drain_workqueue(struct drm_i915_private *i915) int i915_gem_init(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ @@ -1158,9 +1160,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_gt_init(to_gt(dev_priv)); - if (ret) - goto err_unlock; + for_each_gt(gt, dev_priv, i) { + ret = intel_gt_init(gt); + if (ret) + goto err_unlock; + } return 0; @@ -1173,8 +1177,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_unlock: i915_gem_drain_workqueue(dev_priv); - if (ret != -EIO) - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + if (ret != -EIO) { + for_each_gt(gt, dev_priv, i) { + intel_gt_driver_remove(gt); + intel_gt_driver_release(gt); + intel_uc_cleanup_firmwares(>->uc); + } + } if (ret == -EIO) { /* @@ -1182,10 +1191,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * as wedged. But we only want to do this when the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!intel_gt_is_wedged(to_gt(dev_priv))) { - i915_probe_error(dev_priv, - "Failed to initialize GPU, declaring it wedged!\n"); - intel_gt_set_wedged(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) { + if (!intel_gt_is_wedged(gt)) { + i915_probe_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); + intel_gt_set_wedged(gt); + } } /* Minimal basic recovery for KMS */ @@ -1213,10 +1224,12 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); + struct intel_gt *gt; + unsigned int i; i915_gem_suspend_late(dev_priv); - intel_gt_driver_remove(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_remove(gt); dev_priv->uabi_engines = RB_ROOT; /* Flush any outstanding unpin_work. */ @@ -1227,9 +1240,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - intel_gt_driver_release(to_gt(dev_priv)); + struct intel_gt *gt; + unsigned int i; - intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc); + for_each_gt(gt, dev_priv, i) { + intel_gt_driver_release(gt); + intel_uc_cleanup_firmwares(>->uc); + } /* Flush any outstanding work, including i915_gem_context.release_work. */ i915_gem_drain_workqueue(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index f025ee4fa52618cb280864b322b84f5cd8ae2c1f..a4b4d9b7d26c7abc302e88e7b770a9f9fcb9b190 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * @vm: Address space to cleanse * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm * will be able to evict vma's locked by the ww as well. + * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then + * in the event i915_gem_evict_vm() is unable to trylock an object for eviction, + * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop + * the vm->mutex, before trying again to acquire the contended lock. The caller + * also owns a reference to the object. * * This function evicts all vmas from a vm. * @@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ -int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) +int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object **busy_bo) { int ret = 0; @@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) * the resv is shared among multiple objects, we still * need the object ref. */ - if (dying_vma(vma) || + if (!i915_gem_object_get_rcu(vma->obj) || (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) { __i915_vma_pin(vma); list_add(&vma->evict_link, &locked_eviction_list); continue; } - if (!i915_gem_object_trylock(vma->obj, ww)) + if (!i915_gem_object_trylock(vma->obj, ww)) { + if (busy_bo) { + *busy_bo = vma->obj; /* holds ref */ + ret = -EBUSY; + break; + } + i915_gem_object_put(vma->obj); continue; + } __i915_vma_pin(vma); list_add(&vma->evict_link, &eviction_list); @@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) if (list_empty(&eviction_list) && list_empty(&locked_eviction_list)) break; - ret = 0; /* Unbind locked objects first, before unlocking the eviction_list */ list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) { __i915_vma_unpin(vma); - if (ret == 0) + if (ret == 0) { ret = __i915_vma_unbind(vma); - if (ret != -EINTR) /* "Get me out of here!" */ - ret = 0; + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } + if (!dying_vma(vma)) + i915_gem_object_put(vma->obj); } list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { __i915_vma_unpin(vma); - if (ret == 0) + if (ret == 0) { ret = __i915_vma_unbind(vma); - if (ret != -EINTR) /* "Get me out of here!" */ - ret = 0; + if (ret != -EINTR) /* "Get me out of here!" */ + ret = 0; + } i915_gem_object_unlock(vma->obj); + i915_gem_object_put(vma->obj); } } while (ret == 0); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h index e593c530f9bd7ac3be83a89733ff9e24ad471a47..bf0ee0e4fe60886eceac0786bc902d6398dae773 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.h +++ b/drivers/gpu/drm/i915/i915_gem_evict.h @@ -11,6 +11,7 @@ struct drm_mm_node; struct i915_address_space; struct i915_gem_ww_ctx; +struct drm_i915_gem_object; int __must_check i915_gem_evict_something(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww, @@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm, - struct i915_gem_ww_ctx *ww); + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object **busy_bo); #endif /* __I915_GEM_EVICT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 86a42d9e8041258d14002441bf4f0a0172449244..f93ffa6626a57b632eb7ee6f585f22fd50d9a72f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1981,8 +1981,11 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; + /* Locking due to DSI native GPIO sequences */ + spin_lock(&dev_priv->irq_lock); dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); + spin_unlock(&dev_priv->irq_lock); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da35bb2db26b6eea152cef50f62bd58532eb850d..64eacd11b8bff74d3db516dc64ba4761ea3fa32b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6035,6 +6035,7 @@ #define SHOTPLUG_CTL_DDI _MMIO(0xc4030) #define SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin) (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4)) +#define SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin) (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin) (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin) (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4)) #define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin) (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index f17c09ead7d778752c332c267c9da41b07d8dc22..4d06875de14a141dd9482f9a8888865d979f2df1 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1569,7 +1569,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, * locked objects when called from execbuf when pinning * is removed. This would probably regress badly. */ - i915_gem_evict_vm(vm, NULL); + i915_gem_evict_vm(vm, NULL, NULL); mutex_unlock(&vm->mutex); } } while (1); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 744cca507946be24b93a566adeaa16182a95c6da..129746713d072f06e7ad1a3e4ad3b8e8aaeb8898 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) runtime_pm); int count = atomic_read(&rpm->wakeref_count); + intel_wakeref_auto_fini(&rpm->userfault_wakeref); + drm_WARN(&i915->drm, count, "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", intel_rpm_raw_wakeref_count(count), @@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) rpm->available = HAS_RUNTIME_PM(i915); init_intel_runtime_pm_wakeref(rpm); + INIT_LIST_HEAD(&rpm->lmem_userfault_list); + spin_lock_init(&rpm->lmem_userfault_lock); + intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm); } diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index d9160e3ff4afcf99adb8da12c58f0b1779da252b..98b8b28baaa15ef21ee6a804b66cc801868a309a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -53,6 +53,28 @@ struct intel_runtime_pm { bool irqs_enabled; bool no_wakeref_tracking; + /* + * Protects access to lmem usefault list. + * It is required, if we are outside of the runtime suspend path, + * access to @lmem_userfault_list requires always first grabbing the + * runtime pm, to ensure we can't race against runtime suspend. + * Once we have that we also need to grab @lmem_userfault_lock, + * at which point we have exclusive access. + * The runtime suspend path is special since it doesn't really hold any locks, + * but instead has exclusive access by virtue of all other accesses requiring + * holding the runtime pm wakeref. + */ + spinlock_t lmem_userfault_lock; + + /* + * Keep list of userfaulted gem obj, which require to release their + * mmap mappings at runtime suspend path. + */ + struct list_head lmem_userfault_list; + + /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ + struct intel_wakeref_auto userfault_wakeref; + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /* * To aide detection of wakeref leaks and general misuse, we diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 8c6517d29b8e0c409626b9d3694995cd191b151e..37068542aafe7f6399d1b1222c643e8f17e0c6e8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg) /* Everything is pinned, nothing should happen */ mutex_lock(&ggtt->vm.mutex); - err = i915_gem_evict_vm(&ggtt->vm, NULL); + err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL); mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", @@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg) for_i915_gem_ww(&ww, err, false) { mutex_lock(&ggtt->vm.mutex); - err = i915_gem_evict_vm(&ggtt->vm, &ww); + err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); mutex_unlock(&ggtt->vm.mutex); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index dba4f7d81d69359e0553bdf41b9052cb35dd3ae9..80142d9a4a55218e1c43375c25d5543a07bbb708 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } + if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG) + width = ipu_src_rect_width(new_state); + else + width = drm_rect_width(&new_state->src) >> 16; + eba = drm_plane_state_to_eba(new_state, 0); /* @@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, */ if (ipu_state->use_pre) { axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); - ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, - ipu_src_rect_width(new_state), + ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width, drm_rect_height(&new_state->src) >> 16, fb->pitches[0], fb->format->format, fb->modifier, &eba); @@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, break; } - ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8)); + ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width); - width = ipu_src_rect_width(new_state); height = drm_rect_height(&new_state->src) >> 16; info = drm_format_info(fb->format->format); ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], @@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_zero(ipu_plane->alpha_ch); - ipu_cpmem_set_resolution(ipu_plane->alpha_ch, - ipu_src_rect_width(new_state), + ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, drm_rect_height(&new_state->src) >> 16); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index ab0515d2c420a20964d56547c56dbea2f179df18..4499a04f7c138e60c9481836b90a6da45e71e1ce 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -1629,7 +1629,11 @@ static int ingenic_drm_init(void) return err; } - return platform_driver_register(&ingenic_drm_driver); + err = platform_driver_register(&ingenic_drm_driver); + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) + platform_driver_unregister(ingenic_ipu_driver_ptr); + + return err; } module_init(ingenic_drm_init); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 508a6d994e831a750f0c8b5c3f37ef62bb8832da..1f5d39a4077cdbe5a56af38308397d12552450ca 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -461,9 +461,6 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) if (--dpi->refcount != 0) return; - if (dpi->pinctrl && dpi->pins_gpio) - pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); - mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); clk_disable_unprepare(dpi->engine_clk); @@ -488,9 +485,6 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_pixel; } - if (dpi->pinctrl && dpi->pins_dpi) - pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); - return 0; err_pixel: @@ -721,12 +715,18 @@ static void mtk_dpi_bridge_disable(struct drm_bridge *bridge) struct mtk_dpi *dpi = bridge_to_dpi(bridge); mtk_dpi_power_off(dpi); + + if (dpi->pinctrl && dpi->pins_gpio) + pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio); } static void mtk_dpi_bridge_enable(struct drm_bridge *bridge) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); + if (dpi->pinctrl && dpi->pins_dpi) + pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi); + mtk_dpi_power_on(dpi); mtk_dpi_set_display_mode(dpi, &dpi->mode); mtk_dpi_enable(dpi); diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 4c80b6896dc3d6cf4f59e0ae0f1a366db8f57bf3..6e8f99554f548761cc52daea93b094ace3d55589 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1202,9 +1202,10 @@ static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi) return mtk_hdmi_update_plugged_status(hdmi); } -static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode) +static enum drm_mode_status +mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); struct drm_bridge *next_bridge; diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index 5675bc2a92cf8c0537d4cee4167a94df73244764..3f73b211fa8e3e3bc4812180883c9685f8377f19 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -116,9 +116,10 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge, return i; } -static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) +static enum drm_mode_status +meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) { if (meson_cvbs_get_mode(mode)) return MODE_OK; diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index d4b907889a21d199a43f0af7a7a6bcf56ec201c8..cd399b0b7181499218a8f969c0d320be88fd93c4 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv) /* Initialize OSD1 fifo control register */ reg = VIU_OSD_DDR_PRIORITY_URGENT | - VIU_OSD_HOLD_FIFO_LINES(31) | VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */ VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */ VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= VIU_OSD_BURST_LENGTH_32; + reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31)); else - reg |= VIU_OSD_BURST_LENGTH_64; + reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index be389ed91cbd8ebbfca37185b092f165e34cfb31..bd6e573c9a1a31bd5599fd656fbeee31176537a4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -284,7 +284,8 @@ static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc, pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; - xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); + // For G200SE A, BIT(7) should be set unconditionally. + xpixpllcm = BIT(7) | pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index fdc578016e0bf33dcf6a2b460bec5ec2f94aa9af..e846e629c00d8cd021cdd643b9fd2997d902c90a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1906,7 +1906,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) if (val == UINT_MAX) { DRM_DEV_ERROR(dev, - "missing support for speed-bin: %u. Some OPPs may not be supported by hardware", + "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", fuse); return UINT_MAX; } @@ -1916,7 +1916,7 @@ static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) { - u32 supp_hw = UINT_MAX; + u32 supp_hw; u32 speedbin; int ret; @@ -1928,15 +1928,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) if (ret == -ENOENT) { return 0; } else if (ret) { - DRM_DEV_ERROR(dev, - "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", - ret); - goto done; + dev_err_probe(dev, ret, + "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); + return ret; } supp_hw = fuse_to_supp_hw(dev, rev, speedbin); -done: ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index f2ddcfb6f7ee6e10a80d78bca82c59acf2e6334f..3662df698dae5206dbbf7c312341c65c10be8212 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -42,7 +42,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, u32 initial_lines) { struct dpu_hw_blk_reg_map *c = &hw_dsc->hw; - u32 data, lsb, bpp; + u32 data; u32 slice_last_group_size; u32 det_thresh_flatness; bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); @@ -56,14 +56,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, data = (initial_lines << 20); data |= ((slice_last_group_size - 1) << 18); /* bpp is 6.4 format, 4 LSBs bits are for fractional part */ - data |= dsc->bits_per_pixel << 12; - lsb = dsc->bits_per_pixel % 4; - bpp = dsc->bits_per_pixel / 4; - bpp *= 4; - bpp <<= 4; - bpp |= lsb; - - data |= bpp << 8; + data |= (dsc->bits_per_pixel << 8); data |= (dsc->block_pred_enable << 7); data |= (dsc->line_buf_depth << 3); data |= (dsc->simple_422 << 2); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index b0d21838a13439027f1df3c3fd23b21c857eb1e5..29ae5c9613f36602877bacc5edb12bbce9bb4f6b 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -203,7 +203,7 @@ static int mdp5_set_split_display(struct msm_kms *kms, slave_encoder); } -static void mdp5_destroy(struct platform_device *pdev); +static void mdp5_destroy(struct mdp5_kms *mdp5_kms); static void mdp5_kms_destroy(struct msm_kms *kms) { @@ -223,7 +223,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms) } mdp_kms_destroy(&mdp5_kms->base); - mdp5_destroy(mdp5_kms->pdev); + mdp5_destroy(mdp5_kms); } #ifdef CONFIG_DEBUG_FS @@ -559,6 +559,8 @@ static int mdp5_kms_init(struct drm_device *dev) int irq, i, ret; ret = mdp5_init(to_platform_device(dev->dev), dev); + if (ret) + return ret; /* priv->kms would have been populated by the MDP5 driver */ kms = priv->kms; @@ -632,9 +634,8 @@ static int mdp5_kms_init(struct drm_device *dev) return ret; } -static void mdp5_destroy(struct platform_device *pdev) +static void mdp5_destroy(struct mdp5_kms *mdp5_kms) { - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); int i; if (mdp5_kms->ctlm) @@ -648,7 +649,7 @@ static void mdp5_destroy(struct platform_device *pdev) kfree(mdp5_kms->intfs[i]); if (mdp5_kms->rpm_enabled) - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(&mdp5_kms->pdev->dev); drm_atomic_private_obj_fini(&mdp5_kms->glob_state); drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); @@ -797,8 +798,6 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) goto fail; } - platform_set_drvdata(pdev, mdp5_kms); - spin_lock_init(&mdp5_kms->resource_lock); mdp5_kms->dev = dev; @@ -839,6 +838,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) */ clk_set_rate(mdp5_kms->core_clk, 200000000); + /* set uninit-ed kms */ + priv->kms = &mdp5_kms->base.base; + pm_runtime_enable(&pdev->dev); mdp5_kms->rpm_enabled = true; @@ -890,13 +892,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) if (ret) goto fail; - /* set uninit-ed kms */ - priv->kms = &mdp5_kms->base.base; - return 0; fail: if (mdp5_kms) - mdp5_destroy(pdev); + mdp5_destroy(mdp5_kms); return ret; } @@ -953,7 +952,8 @@ static int mdp5_dev_remove(struct platform_device *pdev) static __maybe_unused int mdp5_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); DBG(""); @@ -963,7 +963,8 @@ static __maybe_unused int mdp5_runtime_suspend(struct device *dev) static __maybe_unused int mdp5_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); DBG(""); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index a49f6dbbe8883d8e84ba8b502983a6f8c3559cca..c9d9b384ddd03f123ac0613518ef7a84332c8853 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -857,7 +857,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); - dp->panel->dp_mode.drm_mode = mode->drm_mode; + drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); dp->panel->dp_mode.bpp = mode->bpp; dp->panel->dp_mode.capabilities = mode->capabilities; dp_panel_init_panel_info(dp->panel); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 7fbf391c024f8e9d69bd0952deba7a584feb7c47..89aadd3b3202b3abdf7ba1fef43676367fcdf9b6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -21,6 +21,7 @@ #include