diff options
381 files changed, 8770 insertions, 3698 deletions
diff --git a/Documentation/ABI/testing/rtc-cdev b/Documentation/ABI/testing/rtc-cdev index 25910c3c3d7eb1..cec099a27c6d5c 100644 --- a/Documentation/ABI/testing/rtc-cdev +++ b/Documentation/ABI/testing/rtc-cdev @@ -14,7 +14,7 @@ Description: for RTCs that support alarms * RTC_ALM_READ, RTC_ALM_SET: Read or set the alarm time for - RTCs that support alarms. Can be set upto 24 hours in the + RTCs that support alarms. Can be set up to 24 hours in the future. Requires a separate RTC_AIE_ON call to enable the alarm interrupt. (Prefer to use RTC_WKALM_*) diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst index bb17e26e3c1be0..e11f1076477095 100644 --- a/Documentation/admin-guide/device-mapper/dm-raid.rst +++ b/Documentation/admin-guide/device-mapper/dm-raid.rst @@ -20,10 +20,10 @@ The target is named "raid" and it accepts the following parameters:: raid0 RAID0 striping (no resilience) raid1 RAID1 mirroring raid4 RAID4 with dedicated last parity disk - raid5_n RAID5 with dedicated last parity disk supporting takeover + raid5_n RAID5 with dedicated last parity disk supporting takeover from/to raid1 Same as raid4 - - Transitory layout + - Transitory layout for takeover from/to raid1 raid5_la RAID5 left asymmetric - rotating parity 0 with data continuation @@ -48,8 +48,8 @@ The target is named "raid" and it accepts the following parameters:: raid6_n_6 RAID6 with dedicate parity disks - parity and Q-syndrome on the last 2 disks; - layout for takeover from/to raid4/raid5_n - raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk + layout for takeover from/to raid0/raid4/raid5_n + raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk supporting takeover from/to raid5 - layout for takeover from raid5_la from/to raid6 raid6_ra_6 Same as "raid5_ra" dedicated last Q-syndrome disk @@ -173,9 +173,9 @@ The target is named "raid" and it accepts the following parameters:: The delta_disks option value (-251 < N < +251) triggers device removal (negative value) or device addition (positive value) to any reshape supporting raid levels 4/5/6 and 10. - RAID levels 4/5/6 allow for addition of devices (metadata - and data device tuple), raid10_near and raid10_offset only - allow for device addition. raid10_far does not support any + RAID levels 4/5/6 allow for addition and removal of devices + (metadata and data device tuple), raid10_near and raid10_offset + only allow for device addition. raid10_far does not support any reshaping at all. A minimum of devices have to be kept to enforce resilience, which is 3 devices for raid4/5 and 4 devices for raid6. @@ -372,6 +372,72 @@ to safely enable discard support for RAID 4/5/6: 'devices_handle_discards_safely' +Takeover/Reshape Support +------------------------ +The target natively supports these two types of MDRAID conversions: + +o Takeover: Converts an array from one RAID level to another + +o Reshape: Changes the internal layout while maintaining the current RAID level + +Each operation is only valid under specific constraints imposed by the existing array's layout and configuration. + + +Takeover: +linear -> raid1 with N >= 2 mirrors +raid0 -> raid4 (add dedicated parity device) +raid0 -> raid5 (add dedicated parity device) +raid0 -> raid10 with near layout and N >= 2 mirror groups (raid0 stripes have to become first member within mirror groups) +raid1 -> linear +raid1 -> raid5 with 2 mirrors +raid4 -> raid5 w/ rotating parity +raid5 with dedicated parity device -> raid4 +raid5 -> raid6 (with dedicated Q-syndrome) +raid6 (with dedicated Q-syndrome) -> raid5 +raid10 with near layout and even number of disks -> raid0 (select any in-sync device from each mirror group) + +Reshape: +linear: not possible +raid0: not possible +raid1: change number of mirrors +raid4: add and remove stripes (minimum 3), change stripesize +raid5: add and remove stripes (minimum 3, special case 2 for raid1 takeover), change rotating parity algorithms, change stripesize +raid6: add and remove stripes (minimum 4), change rotating syndrome algorithms, change stripesize +raid10 near: add stripes (minimum 4), change stripesize, no stripe removal possible, change to offset layout +raid10 offset: add stripes, change stripesize, no stripe removal possible, change to near layout +raid10 far: not possible + +Table line examples: + +### raid1 -> raid5 +# +# 2 devices limitation in raid1. +# raid5 personality is able to just map 2 like raid1. +# Reshape after takeover to change to full raid5 layout + + 0 1960886272 raid raid1 3 0 region_size 2048 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + +# dm-0 and dm-2 are e.g. 4MiB large metadata devices, dm-1 and dm-3 have to be at least 1960886272 big. +# +# Table line to takeover to raid5 + + 0 1960886272 raid raid5 3 0 region_size 2048 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + +# Add required out-of-place reshape space to the beginniong of the given 2 data devices, +# allocate another metadata/data device tuple with the same sizes for the parity space +# and zero the first 4K of the metadata device. +# +# Example table of the out-of-place reshape space addition for one data device, e.g. dm-1 + + 0 8192 linear 8:0 0 1960903888 # <- must be free space segment + 8192 1960886272 linear 8:0 0 2048 # previous data segment + +# Mapping table for e.g. raid5_rs reshape causing the size of the raid device to double-fold once the reshape finishes. +# Check the status output (e.g. "dmsetup status $RaidDev") for progess. + + 0 $((2 * 1960886272)) raid raid5 7 0 region_size 2048 data_offset 8192 delta_disk 1 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + + Version History --------------- diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst index 8c3f1f967a3cdf..3ecab1cff9c64c 100644 --- a/Documentation/admin-guide/device-mapper/verity.rst +++ b/Documentation/admin-guide/device-mapper/verity.rst @@ -236,8 +236,10 @@ is available at the cryptsetup project's wiki page Status ====== -V (for Valid) is returned if every check performed so far was valid. -If any check failed, C (for Corruption) is returned. +1. V (for Valid) is returned if every check performed so far was valid. + If any check failed, C (for Corruption) is returned. +2. Number of corrected blocks by Forward Error Correction. + '-' if Forward Error Correction is not enabled. Example ======= diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst index fa2988dd465732..deb3f67a633cfc 100644 --- a/Documentation/dev-tools/checkpatch.rst +++ b/Documentation/dev-tools/checkpatch.rst @@ -1002,6 +1002,29 @@ Functions and Variables return bar; + **UNINITIALIZED_PTR_WITH_FREE** + Pointers with __free attribute should be declared at the place of use + and initialized (see include/linux/cleanup.h). In this case + declarations at the top of the function rule can be relaxed. Not doing + so may lead to undefined behavior as the memory assigned (garbage, + in case not initialized) to the pointer is freed automatically when + the pointer goes out of scope. + + Also see: https://lore.kernel.org/lkml/58fd478f408a34b578ee8d949c5c4b4da4d4f41d.camel@HansenPartnership.com/ + + Example:: + + type var __free(free_func); + ... // var not used, but, in future someone might add a return here + var = malloc(var_size); + ... + + should be initialized as:: + + ... + type var __free(free_func) = malloc(var_size); + ... + Permissions ----------- diff --git a/Documentation/devicetree/bindings/mfd/apple,smc.yaml b/Documentation/devicetree/bindings/mfd/apple,smc.yaml index 5429538f7e2e91..0410e712c900a7 100644 --- a/Documentation/devicetree/bindings/mfd/apple,smc.yaml +++ b/Documentation/devicetree/bindings/mfd/apple,smc.yaml @@ -46,6 +46,9 @@ properties: reboot: $ref: /schemas/power/reset/apple,smc-reboot.yaml + rtc: + $ref: /schemas/rtc/apple,smc-rtc.yaml + additionalProperties: false required: @@ -80,5 +83,11 @@ examples: nvmem-cell-names = "shutdown_flag", "boot_stage", "boot_error_count", "panic_count"; }; + + rtc { + compatible = "apple,smc-rtc"; + nvmem-cells = <&rtc_offset>; + nvmem-cell-names = "rtc_offset"; + }; }; }; diff --git a/Documentation/devicetree/bindings/rtc/andestech,atcrtc100.yaml b/Documentation/devicetree/bindings/rtc/andestech,atcrtc100.yaml new file mode 100644 index 00000000000000..ec0a736793c7ea --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/andestech,atcrtc100.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/andestech,atcrtc100.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Andes ATCRTC100 Real-Time Clock + +maintainers: + - CL Wang <cl634@andestech.com> + +allOf: + - $ref: rtc.yaml# + +properties: + compatible: + enum: + - andestech,atcrtc100 + + reg: + maxItems: 1 + + interrupts: + items: + - description: Periodic timekeeping interrupt + - description: RTC alarm interrupt + +required: + - compatible + - reg + - interrupts + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + + rtc@f0300000 { + compatible = "andestech,atcrtc100"; + reg = <0xf0300000 0x100>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>, <2 IRQ_TYPE_LEVEL_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/rtc/apple,smc-rtc.yaml b/Documentation/devicetree/bindings/rtc/apple,smc-rtc.yaml new file mode 100644 index 00000000000000..607b610665a28b --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/apple,smc-rtc.yaml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/apple,smc-rtc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Apple SMC RTC + +description: + Apple Silicon Macs (M1, etc.) have an RTC that is part of the PMU IC, + but most of the PMU functionality is abstracted out by the SMC. + An additional RTC offset stored inside NVMEM is required to compute + the current date/time. + +maintainers: + - Sven Peter <sven@kernel.org> + +properties: + compatible: + const: apple,smc-rtc + + nvmem-cells: + items: + - description: 48bit RTC offset, specified in 32768 (2^15) Hz clock ticks + + nvmem-cell-names: + items: + - const: rtc_offset + +required: + - compatible + - nvmem-cells + - nvmem-cell-names + +additionalProperties: false diff --git a/Documentation/devicetree/bindings/rtc/nvidia,vrs-10.yaml b/Documentation/devicetree/bindings/rtc/nvidia,vrs-10.yaml new file mode 100644 index 00000000000000..c7dbc8b83c0047 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/nvidia,vrs-10.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rtc/nvidia,vrs-10.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NVIDIA Voltage Regulator Specification Real Time Clock + +maintainers: + - Shubhi Garg <shgarg@nvidia.com> + +description: + NVIDIA VRS-10 (Voltage Regulator Specification) is a Power Management IC + (PMIC) that implements a power sequencing solution with I2C interface. + The device includes a real-time clock (RTC) with 32kHz clock output and + backup battery support, alarm functionality for system wake-up from + suspend and shutdown states, OTP memory for power sequencing configuration, + and an interrupt controller for managing VRS events. + +properties: + compatible: + const: nvidia,vrs-10 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + interrupt-controller: true + + '#interrupt-cells': + const: 2 + +required: + - compatible + - reg + - interrupts + - interrupt-controller + - '#interrupt-cells' + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + i2c { + #address-cells = <1>; + #size-cells = <0>; + + pmic@3c { + compatible = "nvidia,vrs-10"; + reg = <0x3c>; + interrupt-parent = <&pmc>; + interrupts = <24 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/rtc/renesas,rz-rtca3.yaml b/Documentation/devicetree/bindings/rtc/renesas,rz-rtca3.yaml index e70eeb66aa648b..ccb1638c35b9bf 100644 --- a/Documentation/devicetree/bindings/rtc/renesas,rz-rtca3.yaml +++ b/Documentation/devicetree/bindings/rtc/renesas,rz-rtca3.yaml @@ -9,14 +9,12 @@ title: Renesas RTCA-3 Real Time Clock maintainers: - Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com> -allOf: - - $ref: rtc.yaml# - properties: compatible: items: - enum: - renesas,r9a08g045-rtca3 # RZ/G3S + - renesas,r9a09g057-rtca3 # RZ/V2H - const: renesas,rz-rtca3 reg: @@ -48,8 +46,12 @@ properties: maxItems: 1 resets: - items: - - description: VBATTB module reset + minItems: 1 + maxItems: 2 + + reset-names: + minItems: 1 + maxItems: 2 required: - compatible @@ -61,6 +63,39 @@ required: - power-domains - resets +allOf: + - $ref: rtc.yaml# + + - if: + properties: + compatible: + contains: + const: renesas,r9a08g045-rtca3 + then: + properties: + resets: + items: + - description: VBATTB module reset + reset-names: + const: vbattb + - if: + properties: + compatible: + contains: + const: renesas,r9a09g057-rtca3 + then: + properties: + resets: + items: + - description: RTC reset + - description: Reset for the RTEST registers + reset-names: + items: + - const: rtc + - const: rtest + required: + - reset-names + additionalProperties: false examples: @@ -81,4 +116,5 @@ examples: clock-names = "bus", "counter"; power-domains = <&cpg>; resets = <&cpg R9A08G045_VBAT_BRESETN>; + reset-names = "vbattb"; }; diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42xx8.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42xx8.yaml index cd47905eb20a79..7ae72bd901f4da 100644 --- a/Documentation/devicetree/bindings/sound/cirrus,cs42xx8.yaml +++ b/Documentation/devicetree/bindings/sound/cirrus,cs42xx8.yaml @@ -9,6 +9,9 @@ title: Cirrus Logic CS42448/CS42888 audio CODEC maintainers: - patches@opensource.cirrus.com +allOf: + - $ref: dai-common.yaml# + properties: compatible: enum: @@ -63,7 +66,7 @@ then: - VLC-supply - VLS-supply -additionalProperties: false +unevaluatedProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/sound/cix,sky1-ipbloq-hda.yaml b/Documentation/devicetree/bindings/sound/cix,sky1-ipbloq-hda.yaml new file mode 100644 index 00000000000000..02ac5f1aa926c2 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cix,sky1-ipbloq-hda.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/cix,sky1-ipbloq-hda.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: CIX IPBLOQ HDA controller + +description: + CIX IPBLOQ High Definition Audio (HDA) Controller + +maintainers: + - Joakim Zhang <joakim.zhang@cixtech.com> + +allOf: + - $ref: sound-card-common.yaml# + +properties: + compatible: + const: cix,sky1-ipbloq-hda + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: ipg + - const: per + + resets: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - resets + +unevaluatedProperties: false + +examples: + - | + #include<dt-bindings/interrupt-controller/arm-gic.h> + + hda@70c0000 { + compatible = "cix,sky1-ipbloq-hda"; + reg = <0x70c0000 0x10000>; + interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&audss_clk 7>, + <&audss_clk 8>; + clock-names = "ipg", "per"; + resets = <&audss_rst 14>; + model = "CIX SKY1 EVB HDA"; + }; diff --git a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml index 95d947fda6a705..003023729fb8ca 100644 --- a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml +++ b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml @@ -23,6 +23,7 @@ properties: - qcom,soundwire-v1.6.0 - qcom,soundwire-v1.7.0 - qcom,soundwire-v2.0.0 + - qcom,soundwire-v3.1.0 - items: - enum: - qcom,soundwire-v2.1.0 @@ -73,10 +74,12 @@ properties: qcom,din-ports: $ref: /schemas/types.yaml#/definitions/uint32 description: count of data in ports + deprecated: true qcom,dout-ports: $ref: /schemas/types.yaml#/definitions/uint32 description: count of data out ports + deprecated: true qcom,ports-word-length: $ref: /schemas/types.yaml#/definitions/uint8-array @@ -223,8 +226,6 @@ required: - '#sound-dai-cells' - '#address-cells' - '#size-cells' - - qcom,dout-ports - - qcom,din-ports - qcom,ports-offset1 - qcom,ports-offset2 @@ -257,9 +258,6 @@ examples: clocks = <&lpass_rx_macro>; clock-names = "iface"; - qcom,din-ports = <0>; - qcom,dout-ports = <5>; - resets = <&lpass_audiocc LPASS_AUDIO_SWR_RX_CGCR>; reset-names = "swr_audio_cgcr"; diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst index 4a2d3b27aa4d21..aaefeda0e16444 100644 --- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst +++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst @@ -109,10 +109,6 @@ irq_domain维护着从hwirq号到Linux IRQ的radix的树状映射。 当一个hw 如果hwirq号可以非常大,树状映射是一个很好的选择,因为它不需要分配一个和最大hwirq 号一样大的表。 缺点是,hwirq到IRQ号的查找取决于表中有多少条目。 -irq_domain_add_tree()和irq_domain_create_tree()在功能上是等价的,除了第一 -个参数不同——前者接受一个Open Firmware特定的 'struct device_node' ,而后者接受 -一个更通用的抽象 'struct fwnode_handle' 。 - 很少有驱动应该需要这个映射。 无映射 diff --git a/MAINTAINERS b/MAINTAINERS index aff3e162180d1c..5b11839cba9de1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2475,6 +2475,7 @@ F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: Documentation/devicetree/bindings/power/apple* F: Documentation/devicetree/bindings/power/reset/apple,smc-reboot.yaml F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml +F: Documentation/devicetree/bindings/rtc/apple,smc-rtc.yaml F: Documentation/devicetree/bindings/spi/apple,spi.yaml F: Documentation/devicetree/bindings/spmi/apple,spmi.yaml F: Documentation/devicetree/bindings/usb/apple,dwc3.yaml @@ -2501,6 +2502,7 @@ F: drivers/nvmem/apple-spmi-nvmem.c F: drivers/pinctrl/pinctrl-apple-gpio.c F: drivers/power/reset/macsmc-reboot.c F: drivers/pwm/pwm-apple.c +F: drivers/rtc/rtc-macsmc.c F: drivers/soc/apple/* F: drivers/spi/spi-apple.c F: drivers/spmi/spmi-apple-controller.c @@ -4015,6 +4017,12 @@ F: drivers/power/reset/atc260x-poweroff.c F: drivers/regulator/atc260x-regulator.c F: include/linux/mfd/atc260x/* +ATCRTC100 RTC DRIVER +M: CL Wang <cl634@andestech.com> +S: Supported +F: Documentation/devicetree/bindings/rtc/andestech,atcrtc100.yaml +F: drivers/rtc/rtc-atcrtc100.c + ATHEROS 71XX/9XXX GPIO DRIVER M: Alban Bedel <albeu@free.fr> S: Maintained @@ -7225,6 +7233,7 @@ DEVICE-MAPPER (LVM) M: Alasdair Kergon <agk@redhat.com> M: Mike Snitzer <snitzer@kernel.org> M: Mikulas Patocka <mpatocka@redhat.com> +M: Benjamin Marzinski <bmarzins@redhat.com> L: dm-devel@lists.linux.dev S: Maintained Q: http://patchwork.kernel.org/project/dm-devel/list/ @@ -18691,6 +18700,13 @@ S: Maintained F: drivers/video/fbdev/nvidia/ F: drivers/video/fbdev/riva/ +NVIDIA VRS RTC DRIVER +M: Shubhi Garg <shgarg@nvidia.com> +L: linux-tegra@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/rtc/nvidia,vrs-10.yaml +F: drivers/rtc/rtc-nvidia-vrs10.c + NVIDIA WMI EC BACKLIGHT DRIVER M: Daniel Dadap <ddadap@nvidia.com> L: platform-driver-x86@vger.kernel.org @@ -28305,6 +28321,7 @@ M: Matthew Wilcox <willy@infradead.org> L: linux-fsdevel@vger.kernel.org L: linux-mm@kvack.org S: Supported +F: Documentation/core-api/idr.rst F: Documentation/core-api/xarray.rst F: include/linux/idr.h F: include/linux/xarray.h diff --git a/Makefile b/Makefile index 2f545ec1690fb9..e404e4767944ed 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 -PATCHLEVEL = 18 +PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig index 134a559aba3dd5..2367b1685c1cf8 100644 --- a/arch/arm/configs/am200epdkit_defconfig +++ b/arch/arm/configs/am200epdkit_defconfig @@ -68,7 +68,6 @@ CONFIG_SOUND=m CONFIG_SND=m CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_PXA2XX_AC97=m CONFIG_USB_GADGET=y diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig index 2bddb0924a8c02..b9e2e603cd95e4 100644 --- a/arch/arm/configs/lpc32xx_defconfig +++ b/arch/arm/configs/lpc32xx_defconfig @@ -113,7 +113,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_DEBUG=y CONFIG_SND_DEBUG_VERBOSE=y diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index dee820474f444e..df88763fc7c3d4 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -148,7 +148,6 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_DUMMY=y CONFIG_SND_USB_AUDIO=y diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index ce70ff07c978a5..68aedaf92667a4 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -219,7 +219,6 @@ CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_DRIVERS is not set CONFIG_SND_HDA_TEGRA=y CONFIG_SND_HDA_INPUT_BEEP=y diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index b087b900d2790b..c51d4487e9e9b6 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -549,38 +549,37 @@ static int __maybe_unused xts_encrypt(struct skcipher_request *req) tail = 0; } - for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { - int nbytes = walk.nbytes; + scoped_ksimd() { + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; - if (walk.nbytes < walk.total) - nbytes &= ~(AES_BLOCK_SIZE - 1); + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); - scoped_ksimd() aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } - if (err || likely(!tail)) - return err; + if (err || likely(!tail)) + return err; - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_enc, rounds, walk.nbytes, ctx->key2.key_enc, walk.iv, first); - + } return skcipher_walk_done(&walk, 0); } @@ -619,39 +618,37 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req) tail = 0; } - for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { - int nbytes = walk.nbytes; + scoped_ksimd() { + for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) { + int nbytes = walk.nbytes; - if (walk.nbytes < walk.total) - nbytes &= ~(AES_BLOCK_SIZE - 1); + if (walk.nbytes < walk.total) + nbytes &= ~(AES_BLOCK_SIZE - 1); - scoped_ksimd() aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, nbytes, ctx->key2.key_enc, walk.iv, first); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } - if (err || likely(!tail)) - return err; - - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + if (err || likely(!tail)) + return err; - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ctx->key1.key_dec, rounds, walk.nbytes, ctx->key2.key_enc, walk.iv, first); - + } return skcipher_walk_done(&walk, 0); } diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index d496effb0a5b77..cb87c8fc66b3b0 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -312,13 +312,13 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, if (err) return err; - while (walk.nbytes >= AES_BLOCK_SIZE) { - int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; - out = walk.dst.virt.addr; - in = walk.src.virt.addr; - nbytes = walk.nbytes; + scoped_ksimd() { + while (walk.nbytes >= AES_BLOCK_SIZE) { + int blocks = (walk.nbytes / AES_BLOCK_SIZE) & ~7; + out = walk.dst.virt.addr; + in = walk.src.virt.addr; + nbytes = walk.nbytes; - scoped_ksimd() { if (blocks >= 8) { if (first == 1) neon_aes_ecb_encrypt(walk.iv, walk.iv, @@ -344,30 +344,28 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, ctx->twkey, walk.iv, first); nbytes = first = 0; } + err = skcipher_walk_done(&walk, nbytes); } - err = skcipher_walk_done(&walk, nbytes); - } - if (err || likely(!tail)) - return err; + if (err || likely(!tail)) + return err; - /* handle ciphertext stealing */ - dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); + /* handle ciphertext stealing */ + dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen); - skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail, + req->iv); - err = skcipher_walk_virt(&walk, req, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; - out = walk.dst.virt.addr; - in = walk.src.virt.addr; - nbytes = walk.nbytes; + out = walk.dst.virt.addr; + in = walk.src.virt.addr; + nbytes = walk.nbytes; - scoped_ksimd() { if (encrypt) neon_aes_xts_encrypt(out, in, ctx->cts.key_enc, ctx->key.rounds, nbytes, ctx->twkey, diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 7951557a285a9d..ef249d06c92cc4 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -133,7 +133,7 @@ static int ghash_finup(struct shash_desc *desc, const u8 *src, u8 buf[GHASH_BLOCK_SIZE] = {}; memcpy(buf, src, len); - ghash_do_simd_update(1, ctx->digest, src, key, NULL, + ghash_do_simd_update(1, ctx->digest, buf, key, NULL, pmull_ghash_update_p8); memzero_explicit(buf, sizeof(buf)); } diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 5569cece5a0b85..0eeabfa9ef25e4 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -346,11 +346,11 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt) tail = 0; } - while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { - if (nbytes < walk.total) - nbytes &= ~(SM4_BLOCK_SIZE - 1); + scoped_ksimd() { + while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { + if (nbytes < walk.total) + nbytes &= ~(SM4_BLOCK_SIZE - 1); - scoped_ksimd() { if (encrypt) sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, nbytes, @@ -359,32 +359,30 @@ static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt) sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, nbytes, rkey2_enc); - } - rkey2_enc = NULL; + rkey2_enc = NULL; - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - if (err) - return err; - } + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + if (err) + return err; + } - if (likely(tail == 0)) - return 0; + if (likely(tail == 0)) + return 0; - /* handle ciphertext stealing */ + /* handle ciphertext stealing */ - dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); - if (req->dst != req->src) - dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); + dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen); - skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail, - req->iv); + skcipher_request_set_crypt(&subreq, src, dst, + SM4_BLOCK_SIZE + tail, req->iv); - err = skcipher_walk_virt(&walk, &subreq, false); - if (err) - return err; + err = skcipher_walk_virt(&walk, &subreq, false); + if (err) + return err; - scoped_ksimd() { if (encrypt) sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr, walk.src.virt.addr, walk.iv, walk.nbytes, diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 5b1116733d881b..730f342145197f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -115,6 +115,7 @@ config LOONGARCH select GPIOLIB select HAS_IOPORT select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_BITREVERSE select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN @@ -567,6 +568,10 @@ config ARCH_STRICT_ALIGN to run kernel only on systems with h/w unaligned access support in order to optimise for performance. +config CPU_HAS_AMO + bool + default 64BIT + config CPU_HAS_FPU bool default y diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 96ca1a688984e9..8d45b860fe562b 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -5,7 +5,12 @@ boot := arch/loongarch/boot -KBUILD_DEFCONFIG := loongson3_defconfig +ifeq ($(shell uname -m),loongarch32) +KBUILD_DEFCONFIG := loongson32_defconfig +else +KBUILD_DEFCONFIG := loongson64_defconfig +endif + KBUILD_DTBS := dtbs image-name-y := vmlinux diff --git a/arch/loongarch/configs/loongson32_defconfig b/arch/loongarch/configs/loongson32_defconfig new file mode 100644 index 00000000000000..276b1577e0be95 --- /dev/null +++ b/arch/loongarch/configs/loongson32_defconfig @@ -0,0 +1,1105 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_ZSTD=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_DMEM=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_MISC=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_KEXEC=y +CONFIG_LOONGARCH=y +CONFIG_32BIT=y +CONFIG_32BIT_STANDARD=y +CONFIG_MACH_LOONGSON32=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_250=y +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_SUSPEND=y +CONFIG_HIBERNATION=y +CONFIG_ACPI=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_BGRT=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_VIRTUALIZATION=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_COMPRESS=y +CONFIG_MODULE_COMPRESS_ZSTD=y +CONFIG_MODULE_DECOMPRESS=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_BLK_CGROUP_IOLATENCY=y +CONFIG_BLK_CGROUP_FC_APPID=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_BLK_CGROUP_IOPRIO=y +CONFIG_BLK_INLINE_ENCRYPTION=y +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_CMDLINE_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set +CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_CMA=y +CONFIG_CMA_SYSFS=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_MROUTE=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_IP_SCTP=m +CONFIG_RDS=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_BPF=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_MTK=y +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_ATH3K=m +CONFIG_BT_VIRTIO=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB=m +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_IOV=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCCARD=m +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_BOOTLOADER_CONTROL=m +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_RAW_NAND=m +CONFIG_MTD_NAND_PLATFORM=m +CONFIG_MTD_NAND_LOONGSON=m +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y +CONFIG_MTD_NAND_ECC_SW_BCH=y +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=y +CONFIG_PARPORT_PC=y +CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT_PC_FIFO=y +CONFIG_ZRAM=m +CONFIG_ZRAM_BACKEND_LZ4=y +CONFIG_ZRAM_BACKEND_LZ4HC=y +CONFIG_ZRAM_BACKEND_ZSTD=y +CONFIG_ZRAM_BACKEND_DEFLATE=y +CONFIG_ZRAM_BACKEND_842=y +CONFIG_ZRAM_BACKEND_LZO=y +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_ZRAM_MEMORY_TRACKING=y +CONFIG_ZRAM_MULTI_COMP=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_EEPROM_AT24=m +CONFIG_PVPANIC=y +CONFIG_PVPANIC_MMIO=m +CONFIG_PVPANIC_PCI=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=y +CONFIG_SCSI_MPT2SAS=y +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_AHCI_DWC=y +CONFIG_PATA_ATIIXP=y +CONFIG_PATA_PCMCIA=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LLBITMAP=y +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_MULTIPATH_HST=m +CONFIG_DM_MULTIPATH_IOA=m +CONFIG_DM_INIT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_INTEGRITY=m +CONFIG_DM_ZONED=m +CONFIG_DM_VDO=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=y +CONFIG_WIREGUARD=m +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_VXLAN=y +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IXGBE=y +CONFIG_I40E=y +CONFIG_ICE=y +CONFIG_FM10K=y +CONFIG_IGC=y +CONFIG_IDPF=y +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_R8169=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=y +CONFIG_TXGBE=y +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_MOTORCOMM_PHY=y +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_USBNET=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_RNDIS_HOST=m +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_HTC=m +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_MT7601U=m +CONFIG_RT2X00=m +CONFIG_RT2800USB=m +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTL8192DU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +CONFIG_RTW88=m +CONFIG_RTW88_8822BE=m +CONFIG_RTW88_8822BU=m +CONFIG_RTW88_8822CE=m +CONFIG_RTW88_8822CU=m +CONFIG_RTW88_8723DE=m +CONFIG_RTW88_8723DU=m +CONFIG_RTW88_8821CE=m +CONFIG_RTW88_8821CU=m +CONFIG_RTW88_8821AU=m +CONFIG_RTW88_8812AU=m +CONFIG_RTW88_8814AE=m +CONFIG_RTW88_8814AU=m +CONFIG_RTW89=m +CONFIG_RTW89_8851BE=m +CONFIG_RTW89_8852AE=m +CONFIG_RTW89_8852BE=m +CONFIG_RTW89_8852BTE=m +CONFIG_RTW89_8852CE=m +CONFIG_RTW89_8922AE=m +CONFIG_ZD1211RW=m +CONFIG_USB4_NET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +CONFIG_KEYBOARD_MATRIX=m +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_LOONGSON=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_PRINTER=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_PIIX4=y +CONFIG_I2C_DESIGNWARE_CORE=y +CONFIG_I2C_DESIGNWARE_SLAVE=y +CONFIG_I2C_DESIGNWARE_PCI=y +CONFIG_I2C_GPIO=y +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=m +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PINCTRL=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_LOONGSON1=y +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCA9570=m +CONFIG_GPIO_PCF857X=m +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y +CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83627HF=m +CONFIG_WATCHDOG=y +CONFIG_LOONGSON1_WDT=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_EFIDRM=y +CONFIG_DRM_SIMPLEDRM=y +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=y +CONFIG_FB=y +CONFIG_FB_RADEON=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_PLATFORM=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_HDA_INTEL=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_REALTEK_LIB=y +CONFIG_SND_HDA_CODEC_ALC260=y +CONFIG_SND_HDA_CODEC_ALC262=y +CONFIG_SND_HDA_CODEC_ALC268=y +CONFIG_SND_HDA_CODEC_ALC269=y +CONFIG_SND_HDA_CODEC_ALC662=y +CONFIG_SND_HDA_CODEC_ALC680=y +CONFIG_SND_HDA_CODEC_ALC861=y +CONFIG_SND_HDA_CODEC_ALC861VD=y +CONFIG_SND_HDA_CODEC_ALC880=y +CONFIG_SND_HDA_CODEC_ALC882=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_HDMI_GENERIC=y +CONFIG_SND_HDA_CODEC_HDMI_INTEL=y +CONFIG_SND_HDA_CODEC_HDMI_ATI=y +CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_MIDI_V2=y +CONFIG_SND_SOC=m +CONFIG_SND_SOC_LOONGSON_CARD=m +CONFIG_SND_LOONGSON1_AC97=m +CONFIG_SND_SOC_ES7134=m +CONFIG_SND_SOC_ES7241=m +CONFIG_SND_SOC_ES8311=m +CONFIG_SND_SOC_ES8316=m +CONFIG_SND_SOC_ES8323=m +CONFIG_SND_SOC_ES8326=m +CONFIG_SND_SOC_ES8328_I2C=m +CONFIG_SND_SOC_ES8328_SPI=m +CONFIG_SND_SOC_UDA1334=m +CONFIG_SND_SOC_UDA1342=m +CONFIG_SND_VIRTIO=m +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_CHERRY=m +CONFIG_HID_ELAN=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_WACOM=m +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID_ACPI=m +CONFIG_I2C_HID_OF=m +CONFIG_I2C_HID_OF_ELAN=m +CONFIG_USB=y +CONFIG_USB_OTG=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=m +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_UAS=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_MMC=y +CONFIG_INFINIBAND=m +CONFIG_EDAC=y +# CONFIG_EDAC_LEGACY_SYSFS is not set +CONFIG_EDAC_LOONGSON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_LOONGSON1_APB_DMA=y +CONFIG_UDMABUF=y +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_SYSTEM=y +CONFIG_DMABUF_HEAPS_CMA=y +CONFIG_UIO=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_CLKSRC_LOONGSON1_PWM=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_NTB=m +CONFIG_NTB_MSI=y +CONFIG_NTB_IDT=m +CONFIG_NTB_EPF=m +CONFIG_NTB_SWITCHTEC=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_GENERIC_PHY=y +CONFIG_USB4=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_F2FS_FS=m +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_CHECK_FS=y +CONFIG_F2FS_FS_COMPRESSION=y +CONFIG_FS_ENCRYPTION=y +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y +CONFIG_FS_VERITY=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=y +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZSTD=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_PSTORE_COMPRESS=y +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ZIP_ZSTD=y +CONFIG_EROFS_FS_ONDEMAND=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_DEBUG is not set +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_SELFTESTS=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_SM3_GENERIC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_USER_API_RNG=m +CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_CRYPTO_DEV_LOONGSON_RNG=m +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_PRINTK_TIME=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_FS=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_FTRACE is not set diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson64_defconfig index 50e1304e7a6f4a..a14db1a95e7e4a 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson64_defconfig @@ -435,7 +435,6 @@ CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y CONFIG_FW_LOADER_COMPRESS_ZSTD=y -CONFIG_SYSFB_SIMPLEFB=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_BOOTLOADER_CONTROL=m CONFIG_EFI_CAPSULE_LOADER=m @@ -530,6 +529,7 @@ CONFIG_PATA_ATIIXP=y CONFIG_PATA_PCMCIA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=m +CONFIG_MD_LLBITMAP=y CONFIG_MD_RAID0=m CONFIG_MD_RAID1=m CONFIG_MD_RAID10=m @@ -738,6 +738,7 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_LOONGSON=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_NONSTANDARD=y CONFIG_PRINTER=m @@ -801,6 +802,8 @@ CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_DRM=y CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_EFIDRM=y +CONFIG_DRM_SIMPLEDRM=y CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m @@ -811,9 +814,7 @@ CONFIG_DRM_AST=y CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y -CONFIG_DRM_SIMPLEDRM=y CONFIG_FB=y -CONFIG_FB_EFI=y CONFIG_FB_RADEON=y CONFIG_FIRMWARE_EDID=y CONFIG_LCD_CLASS_DEVICE=y diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index b04d2cef935f6d..9034b583a88a69 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += syscall_table_32.h syscall-y += syscall_table_64.h generated-y += orc_hash.h diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index e739dbc6329dce..d6472cafb32cd7 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -38,11 +38,20 @@ extern unsigned long vm_map_base; #endif #ifndef WRITECOMBINE_BASE +#ifdef CONFIG_32BIT +#define WRITECOMBINE_BASE CSR_DMW0_BASE +#else #define WRITECOMBINE_BASE CSR_DMW2_BASE #endif +#endif +#ifdef CONFIG_32BIT +#define DMW_PABITS 29 +#define TO_PHYS_MASK ((_UL(1) << _UL(DMW_PABITS)) - 1) +#else #define DMW_PABITS 48 -#define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) +#define TO_PHYS_MASK ((_ULL(1) << _ULL(DMW_PABITS)) - 1) +#endif /* * Memory above this physical address will be considered highmem. @@ -112,7 +121,11 @@ extern unsigned long vm_map_base; /* * Returns the physical address of a KPRANGEx / XKPRANGE address */ +#ifdef CONFIG_32BIT +#define PHYSADDR(a) ((_ACAST32_(a)) & TO_PHYS_MASK) +#else #define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) +#endif /* * On LoongArch, I/O ports mappring is following: diff --git a/arch/loongarch/include/asm/asm.h b/arch/loongarch/include/asm/asm.h index f018d26fc995a6..719cab1a0ad8c0 100644 --- a/arch/loongarch/include/asm/asm.h +++ b/arch/loongarch/include/asm/asm.h @@ -72,11 +72,11 @@ #define INT_SUB sub.w #define INT_L ld.w #define INT_S st.w -#define INT_SLL slli.w +#define INT_SLLI slli.w #define INT_SLLV sll.w -#define INT_SRL srli.w +#define INT_SRLI srli.w #define INT_SRLV srl.w -#define INT_SRA srai.w +#define INT_SRAI srai.w #define INT_SRAV sra.w #endif @@ -86,11 +86,11 @@ #define INT_SUB sub.d #define INT_L ld.d #define INT_S st.d -#define INT_SLL slli.d +#define INT_SLLI slli.d #define INT_SLLV sll.d -#define INT_SRL srli.d +#define INT_SRLI srli.d #define INT_SRLV srl.d -#define INT_SRA srai.d +#define INT_SRAI srai.d #define INT_SRAV sra.d #endif @@ -100,15 +100,23 @@ #if (__SIZEOF_LONG__ == 4) #define LONG_ADD add.w #define LONG_ADDI addi.w +#define LONG_ALSL alsl.w +#define LONG_BSTRINS bstrins.w +#define LONG_BSTRPICK bstrpick.w #define LONG_SUB sub.w #define LONG_L ld.w +#define LONG_LI li.w +#define LONG_LPTR ld.w #define LONG_S st.w -#define LONG_SLL slli.w +#define LONG_SPTR st.w +#define LONG_SLLI slli.w #define LONG_SLLV sll.w -#define LONG_SRL srli.w +#define LONG_SRLI srli.w #define LONG_SRLV srl.w -#define LONG_SRA srai.w +#define LONG_SRAI srai.w #define LONG_SRAV sra.w +#define LONG_ROTR rotr.w +#define LONG_ROTRI rotri.w #ifdef __ASSEMBLER__ #define LONG .word @@ -121,15 +129,23 @@ #if (__SIZEOF_LONG__ == 8) #define LONG_ADD add.d #define LONG_ADDI addi.d +#define LONG_ALSL alsl.d +#define LONG_BSTRINS bstrins.d +#define LONG_BSTRPICK bstrpick.d #define LONG_SUB sub.d #define LONG_L ld.d +#define LONG_LI li.d +#define LONG_LPTR ldptr.d #define LONG_S st.d -#define LONG_SLL slli.d +#define LONG_SPTR stptr.d +#define LONG_SLLI slli.d #define LONG_SLLV sll.d -#define LONG_SRL srli.d +#define LONG_SRLI srli.d #define LONG_SRLV srl.d -#define LONG_SRA srai.d +#define LONG_SRAI srai.d #define LONG_SRAV sra.d +#define LONG_ROTR rotr.d +#define LONG_ROTRI rotri.d #ifdef __ASSEMBLER__ #define LONG .dword @@ -145,16 +161,23 @@ #if (__SIZEOF_POINTER__ == 4) #define PTR_ADD add.w #define PTR_ADDI addi.w +#define PTR_ALSL alsl.w +#define PTR_BSTRINS bstrins.w +#define PTR_BSTRPICK bstrpick.w #define PTR_SUB sub.w #define PTR_L ld.w -#define PTR_S st.w #define PTR_LI li.w -#define PTR_SLL slli.w +#define PTR_LPTR ld.w +#define PTR_S st.w +#define PTR_SPTR st.w +#define PTR_SLLI slli.w #define PTR_SLLV sll.w -#define PTR_SRL srli.w +#define PTR_SRLI srli.w #define PTR_SRLV srl.w -#define PTR_SRA srai.w +#define PTR_SRAI srai.w #define PTR_SRAV sra.w +#define PTR_ROTR rotr.w +#define PTR_ROTRI rotri.w #define PTR_SCALESHIFT 2 @@ -168,16 +191,23 @@ #if (__SIZEOF_POINTER__ == 8) #define PTR_ADD add.d #define PTR_ADDI addi.d +#define PTR_ALSL alsl.d +#define PTR_BSTRINS bstrins.d +#define PTR_BSTRPICK bstrpick.d #define PTR_SUB sub.d #define PTR_L ld.d -#define PTR_S st.d #define PTR_LI li.d -#define PTR_SLL slli.d +#define PTR_LPTR ldptr.d +#define PTR_S st.d +#define PTR_SPTR stptr.d +#define PTR_SLLI slli.d #define PTR_SLLV sll.d -#define PTR_SRL srli.d +#define PTR_SRLI srli.d #define PTR_SRLV srl.d -#define PTR_SRA srai.d +#define PTR_SRAI srai.d #define PTR_SRAV sra.d +#define PTR_ROTR rotr.d +#define PTR_ROTRI rotri.d #define PTR_SCALESHIFT 3 @@ -190,10 +220,17 @@ /* Annotate a function as being unsuitable for kprobes. */ #ifdef CONFIG_KPROBES +#ifdef CONFIG_32BIT +#define _ASM_NOKPROBE(name) \ + .pushsection "_kprobe_blacklist", "aw"; \ + .long name; \ + .popsection +#else #define _ASM_NOKPROBE(name) \ .pushsection "_kprobe_blacklist", "aw"; \ .quad name; \ .popsection +#endif #else #define _ASM_NOKPROBE(name) #endif diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h index 8d7f501b0a124e..a648be5f723fcb 100644 --- a/arch/loongarch/include/asm/asmmacro.h +++ b/arch/loongarch/include/asm/asmmacro.h @@ -5,43 +5,55 @@ #ifndef _ASM_ASMMACRO_H #define _ASM_ASMMACRO_H +#include <linux/sizes.h> #include <asm/asm-offsets.h> #include <asm/regdef.h> #include <asm/fpregdef.h> #include <asm/loongarch.h> +#ifdef CONFIG_64BIT +#define TASK_STRUCT_OFFSET 0 +#else +#define TASK_STRUCT_OFFSET 2000 +#endif + .macro cpu_save_nonscratch thread - stptr.d s0, \thread, THREAD_REG23 - stptr.d s1, \thread, THREAD_REG24 - stptr.d s2, \thread, THREAD_REG25 - stptr.d s3, \thread, THREAD_REG26 - stptr.d s4, \thread, THREAD_REG27 - stptr.d s5, \thread, THREAD_REG28 - stptr.d s6, \thread, THREAD_REG29 - stptr.d s7, \thread, THREAD_REG30 - stptr.d s8, \thread, THREAD_REG31 - stptr.d sp, \thread, THREAD_REG03 - stptr.d fp, \thread, THREAD_REG22 + LONG_SPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET) + LONG_SPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET) + LONG_SPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET) + LONG_SPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET) + LONG_SPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET) + LONG_SPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET) + LONG_SPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET) + LONG_SPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET) + LONG_SPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET) + LONG_SPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET) + LONG_SPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET) + LONG_SPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET) .endm .macro cpu_restore_nonscratch thread - ldptr.d s0, \thread, THREAD_REG23 - ldptr.d s1, \thread, THREAD_REG24 - ldptr.d s2, \thread, THREAD_REG25 - ldptr.d s3, \thread, THREAD_REG26 - ldptr.d s4, \thread, THREAD_REG27 - ldptr.d s5, \thread, THREAD_REG28 - ldptr.d s6, \thread, THREAD_REG29 - ldptr.d s7, \thread, THREAD_REG30 - ldptr.d s8, \thread, THREAD_REG31 - ldptr.d ra, \thread, THREAD_REG01 - ldptr.d sp, \thread, THREAD_REG03 - ldptr.d fp, \thread, THREAD_REG22 + LONG_LPTR s0, \thread, (THREAD_REG23 - TASK_STRUCT_OFFSET) + LONG_LPTR s1, \thread, (THREAD_REG24 - TASK_STRUCT_OFFSET) + LONG_LPTR s2, \thread, (THREAD_REG25 - TASK_STRUCT_OFFSET) + LONG_LPTR s3, \thread, (THREAD_REG26 - TASK_STRUCT_OFFSET) + LONG_LPTR s4, \thread, (THREAD_REG27 - TASK_STRUCT_OFFSET) + LONG_LPTR s5, \thread, (THREAD_REG28 - TASK_STRUCT_OFFSET) + LONG_LPTR s6, \thread, (THREAD_REG29 - TASK_STRUCT_OFFSET) + LONG_LPTR s7, \thread, (THREAD_REG30 - TASK_STRUCT_OFFSET) + LONG_LPTR s8, \thread, (THREAD_REG31 - TASK_STRUCT_OFFSET) + LONG_LPTR ra, \thread, (THREAD_REG01 - TASK_STRUCT_OFFSET) + LONG_LPTR sp, \thread, (THREAD_REG03 - TASK_STRUCT_OFFSET) + LONG_LPTR fp, \thread, (THREAD_REG22 - TASK_STRUCT_OFFSET) .endm .macro fpu_save_csr thread tmp movfcsr2gr \tmp, fcsr0 +#ifdef CONFIG_32BIT + st.w \tmp, \thread, THREAD_FCSR +#else stptr.w \tmp, \thread, THREAD_FCSR +#endif #ifdef CONFIG_CPU_HAS_LBT /* TM bit is always 0 if LBT not supported */ andi \tmp, \tmp, FPU_CSR_TM @@ -56,7 +68,11 @@ .endm .macro fpu_restore_csr thread tmp0 tmp1 +#ifdef CONFIG_32BIT + ld.w \tmp0, \thread, THREAD_FCSR +#else ldptr.w \tmp0, \thread, THREAD_FCSR +#endif movgr2fcsr fcsr0, \tmp0 #ifdef CONFIG_CPU_HAS_LBT /* TM bit is always 0 if LBT not supported */ @@ -88,9 +104,52 @@ #endif .endm +#ifdef CONFIG_32BIT .macro fpu_save_cc thread tmp0 tmp1 movcf2gr \tmp0, $fcc0 - move \tmp1, \tmp0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.w \tmp1, \tmp0, 31, 24 + st.w \tmp1, \thread, THREAD_FCC + movcf2gr \tmp0, $fcc4 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc5 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc6 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc7 + bstrins.w \tmp1, \tmp0, 31, 24 + st.w \tmp1, \thread, (THREAD_FCC + 4) + .endm + + .macro fpu_restore_cc thread tmp0 tmp1 + ld.w \tmp0, \thread, THREAD_FCC + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + ld.w \tmp0, \thread, (THREAD_FCC + 4) + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc4, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc5, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc6, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc7, \tmp1 + .endm +#else + .macro fpu_save_cc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 movcf2gr \tmp0, $fcc1 bstrins.d \tmp1, \tmp0, 15, 8 movcf2gr \tmp0, $fcc2 @@ -109,7 +168,7 @@ .endm .macro fpu_restore_cc thread tmp0 tmp1 - ldptr.d \tmp0, \thread, THREAD_FCC + ldptr.d \tmp0, \thread, THREAD_FCC bstrpick.d \tmp1, \tmp0, 7, 0 movgr2cf $fcc0, \tmp1 bstrpick.d \tmp1, \tmp0, 15, 8 @@ -127,6 +186,7 @@ bstrpick.d \tmp1, \tmp0, 63, 56 movgr2cf $fcc7, \tmp1 .endm +#endif .macro fpu_save_double thread tmp li.w \tmp, THREAD_FPR0 @@ -606,12 +666,14 @@ 766: lu12i.w \reg, 0 ori \reg, \reg, 0 +#ifdef CONFIG_64BIT lu32i.d \reg, 0 lu52i.d \reg, \reg, 0 +#endif .pushsection ".la_abs", "aw", %progbits - .p2align 3 - .dword 766b - .dword \sym + .p2align PTRLOG + PTR 766b + PTR \sym .popsection #endif .endm diff --git a/arch/loongarch/include/asm/atomic-amo.h b/arch/loongarch/include/asm/atomic-amo.h new file mode 100644 index 00000000000000..d5efa5252d567d --- /dev/null +++ b/arch/loongarch/include/asm/atomic-amo.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations (AMO). + * + * Copyright (C) 2020-2025 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_ATOMIC_AMO_H +#define _ASM_ATOMIC_AMO_H + +#include <linux/types.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op".w" " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ +{ \ + int result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op, I, asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC_OPS(add, i, add, +) +ATOMIC_OPS(sub, -i, add, +) + +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#ifdef CONFIG_64BIT + +#define ATOMIC64_OP(op, I, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + __asm__ __volatile__( \ + "am"#asm_op".d " " $zero, %1, %0 \n" \ + : "+ZB" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} + +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \ +{ \ + long result; \ + __asm__ __volatile__( \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result c_op I; \ +} + +#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \ +{ \ + long result; \ + \ + __asm__ __volatile__( \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ + : "+ZB" (v->counter), "=&r" (result) \ + : "r" (I) \ + : "memory"); \ + \ + return result; \ +} + +#define ATOMIC64_OPS(op, I, asm_op, c_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC64_OPS(add, i, add, +) +ATOMIC64_OPS(sub, -i, add, +) + +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS + +#define ATOMIC64_OPS(op, I, asm_op) \ + ATOMIC64_OP(op, I, asm_op) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) + +ATOMIC64_OPS(and, i, and) +ATOMIC64_OPS(or, i, or) +ATOMIC64_OPS(xor, i, xor) + +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +#endif + +#endif /* _ASM_ATOMIC_AMO_H */ diff --git a/arch/loongarch/include/asm/atomic-llsc.h b/arch/loongarch/include/asm/atomic-llsc.h new file mode 100644 index 00000000000000..3ce500c8827293 --- /dev/null +++ b/arch/loongarch/include/asm/atomic-llsc.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Atomic operations (LLSC). + * + * Copyright (C) 2024-2025 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_ATOMIC_LLSC_H +#define _ASM_ATOMIC_LLSC_H + +#include <linux/types.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> + +#define ATOMIC_OP(op, I, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + int temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %0, %1 #atomic_" #op " \n" \ + " " #asm_op " %0, %0, %2 \n" \ + " sc.w %0, %1 \n" \ + " beq %0, $r0, 1b \n" \ + :"=&r" (temp) , "+ZC"(v->counter) \ + :"r" (I) \ + ); \ +} + +#define ATOMIC_OP_RETURN(op, I, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int result, temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %1, %2 # atomic_" #op "_return \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc.w %0, %2 \n" \ + " beq %0, $r0 ,1b \n" \ + " " #asm_op " %0, %1, %3 \n" \ + : "=&r" (result), "=&r" (temp), "+ZC"(v->counter) \ + : "r" (I)); \ + \ + return result; \ +} + +#define ATOMIC_FETCH_OP(op, I, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int result, temp; \ + \ + __asm__ __volatile__( \ + "1: ll.w %1, %2 # atomic_fetch_" #op " \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc.w %0, %2 \n" \ + " beq %0, $r0 ,1b \n" \ + " add.w %0, %1 ,$r0 \n" \ + : "=&r" (result), "=&r" (temp), "+ZC" (v->counter) \ + : "r" (I)); \ + \ + return result; \ +} + +#define ATOMIC_OPS(op,I ,asm_op, c_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_OP_RETURN(op, I , asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(add, i , add.w ,+=) +ATOMIC_OPS(sub, -i , add.w ,+=) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, I, asm_op) \ + ATOMIC_OP(op, I, asm_op) \ + ATOMIC_FETCH_OP(op, I, asm_op) + +ATOMIC_OPS(and, i, and) +ATOMIC_OPS(or, i, or) +ATOMIC_OPS(xor, i, xor) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#ifdef CONFIG_64BIT +#error "64-bit LLSC atomic operations are not supported" +#endif + +#endif /* _ASM_ATOMIC_LLSC_H */ diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index c86f0ab922ec7d..444b9ddcd0049d 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -11,6 +11,16 @@ #include <asm/barrier.h> #include <asm/cmpxchg.h> +#ifdef CONFIG_CPU_HAS_AMO +#include <asm/atomic-amo.h> +#else +#include <asm/atomic-llsc.h> +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif + #if __SIZEOF_LONG__ == 4 #define __LL "ll.w " #define __SC "sc.w " @@ -34,100 +44,6 @@ #define arch_atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) -#define ATOMIC_OP(op, I, asm_op) \ -static inline void arch_atomic_##op(int i, atomic_t *v) \ -{ \ - __asm__ __volatile__( \ - "am"#asm_op".w" " $zero, %1, %0 \n" \ - : "+ZB" (v->counter) \ - : "r" (I) \ - : "memory"); \ -} - -#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ -static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ -{ \ - int result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".w" " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result c_op I; \ -} - -#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ -static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ -{ \ - int result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".w" " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result; \ -} - -#define ATOMIC_OPS(op, I, asm_op, c_op) \ - ATOMIC_OP(op, I, asm_op) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ - ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC_OPS(add, i, add, +) -ATOMIC_OPS(sub, -i, add, +) - -#define arch_atomic_add_return arch_atomic_add_return -#define arch_atomic_add_return_acquire arch_atomic_add_return -#define arch_atomic_add_return_release arch_atomic_add_return -#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed -#define arch_atomic_sub_return arch_atomic_sub_return -#define arch_atomic_sub_return_acquire arch_atomic_sub_return -#define arch_atomic_sub_return_release arch_atomic_sub_return -#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed -#define arch_atomic_fetch_add arch_atomic_fetch_add -#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add -#define arch_atomic_fetch_add_release arch_atomic_fetch_add -#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed -#define arch_atomic_fetch_sub arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub -#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed - -#undef ATOMIC_OPS - -#define ATOMIC_OPS(op, I, asm_op) \ - ATOMIC_OP(op, I, asm_op) \ - ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC_OPS(and, i, and) -ATOMIC_OPS(or, i, or) -ATOMIC_OPS(xor, i, xor) - -#define arch_atomic_fetch_and arch_atomic_fetch_and -#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and -#define arch_atomic_fetch_and_release arch_atomic_fetch_and -#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed -#define arch_atomic_fetch_or arch_atomic_fetch_or -#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or -#define arch_atomic_fetch_or_release arch_atomic_fetch_or -#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed -#define arch_atomic_fetch_xor arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor -#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed - -#undef ATOMIC_OPS -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int prev, rc; @@ -194,99 +110,6 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) #define arch_atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) -#define ATOMIC64_OP(op, I, asm_op) \ -static inline void arch_atomic64_##op(long i, atomic64_t *v) \ -{ \ - __asm__ __volatile__( \ - "am"#asm_op".d " " $zero, %1, %0 \n" \ - : "+ZB" (v->counter) \ - : "r" (I) \ - : "memory"); \ -} - -#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ -static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \ -{ \ - long result; \ - __asm__ __volatile__( \ - "am"#asm_op#mb".d " " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result c_op I; \ -} - -#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ -static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \ -{ \ - long result; \ - \ - __asm__ __volatile__( \ - "am"#asm_op#mb".d " " %1, %2, %0 \n" \ - : "+ZB" (v->counter), "=&r" (result) \ - : "r" (I) \ - : "memory"); \ - \ - return result; \ -} - -#define ATOMIC64_OPS(op, I, asm_op, c_op) \ - ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ - ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC64_OPS(add, i, add, +) -ATOMIC64_OPS(sub, -i, add, +) - -#define arch_atomic64_add_return arch_atomic64_add_return -#define arch_atomic64_add_return_acquire arch_atomic64_add_return -#define arch_atomic64_add_return_release arch_atomic64_add_return -#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed -#define arch_atomic64_sub_return arch_atomic64_sub_return -#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return -#define arch_atomic64_sub_return_release arch_atomic64_sub_return -#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed -#define arch_atomic64_fetch_add arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add -#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed -#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub -#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed - -#undef ATOMIC64_OPS - -#define ATOMIC64_OPS(op, I, asm_op) \ - ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ - ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) - -ATOMIC64_OPS(and, i, and) -ATOMIC64_OPS(or, i, or) -ATOMIC64_OPS(xor, i, xor) - -#define arch_atomic64_fetch_and arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and -#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed -#define arch_atomic64_fetch_or arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or -#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor -#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed - -#undef ATOMIC64_OPS -#undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP_RETURN -#undef ATOMIC64_OP - static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) { long prev, rc; diff --git a/arch/loongarch/include/asm/bitops.h b/arch/loongarch/include/asm/bitops.h index 69e00f8d803416..411106bf99026e 100644 --- a/arch/loongarch/include/asm/bitops.h +++ b/arch/loongarch/include/asm/bitops.h @@ -13,11 +13,22 @@ #include <asm/barrier.h> +#ifdef CONFIG_32BIT_REDUCED + +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/__fls.h> + +#else /* CONFIG_32BIT_STANDARD || CONFIG_64BIT */ + #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-fls.h> #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-__fls.h> +#endif + #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> diff --git a/arch/loongarch/include/asm/bitrev.h b/arch/loongarch/include/asm/bitrev.h index 46f275b9cdf756..757738ea38d764 100644 --- a/arch/loongarch/include/asm/bitrev.h +++ b/arch/loongarch/include/asm/bitrev.h @@ -11,7 +11,7 @@ static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x) { u32 ret; - asm("bitrev.4b %0, %1" : "=r"(ret) : "r"(__swab32(x))); + asm("bitrev.w %0, %1" : "=r"(ret) : "r"(x)); return ret; } diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include/asm/checksum.h index cabbf6af44c43a..cc2754e0aa2512 100644 --- a/arch/loongarch/include/asm/checksum.h +++ b/arch/loongarch/include/asm/checksum.h @@ -9,6 +9,8 @@ #include <linux/bitops.h> #include <linux/in6.h> +#ifdef CONFIG_64BIT + #define _HAVE_ARCH_IPV6_CSUM __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, @@ -61,6 +63,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) extern unsigned int do_csum(const unsigned char *buff, int len); #define do_csum do_csum +#endif + #include <asm-generic/checksum.h> #endif /* __ASM_CHECKSUM_H */ diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index 979fde61bba8a4..0494c2ab553e95 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -9,17 +9,33 @@ #include <linux/build_bug.h> #include <asm/barrier.h> -#define __xchg_asm(amswap_db, m, val) \ +#define __xchg_amo_asm(amswap_db, m, val) \ ({ \ - __typeof(val) __ret; \ + __typeof(val) __ret; \ \ - __asm__ __volatile__ ( \ - " "amswap_db" %1, %z2, %0 \n" \ - : "+ZB" (*m), "=&r" (__ret) \ - : "Jr" (val) \ - : "memory"); \ + __asm__ __volatile__ ( \ + " "amswap_db" %1, %z2, %0 \n" \ + : "+ZB" (*m), "=&r" (__ret) \ + : "Jr" (val) \ + : "memory"); \ \ - __ret; \ + __ret; \ +}) + +#define __xchg_llsc_asm(ld, st, m, val) \ +({ \ + __typeof(val) __ret, __tmp; \ + \ + asm volatile ( \ + "1: ll.w %0, %3 \n" \ + " move %1, %z4 \n" \ + " sc.w %1, %2 \n" \ + " beqz %1, 1b \n" \ + : "=&r" (__ret), "=&r" (__tmp), "=ZC" (*m) \ + : "ZC" (*m), "Jr" (val) \ + : "memory"); \ + \ + __ret; \ }) static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, @@ -67,13 +83,23 @@ __arch_xchg(volatile void *ptr, unsigned long x, int size) switch (size) { case 1: case 2: - return __xchg_small(ptr, x, size); + return __xchg_small((volatile void *)ptr, x, size); case 4: - return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); +#ifdef CONFIG_CPU_HAS_AMO + return __xchg_amo_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); +#else + return __xchg_llsc_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)x); +#endif /* CONFIG_CPU_HAS_AMO */ +#ifdef CONFIG_64BIT case 8: - return __xchg_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); +#ifdef CONFIG_CPU_HAS_AMO + return __xchg_amo_asm("amswap_db.d", (volatile u64 *)ptr, (u64)x); +#else + return __xchg_llsc_asm("ll.d", "sc.d", (volatile u64 *)ptr, (u64)x); +#endif /* CONFIG_CPU_HAS_AMO */ +#endif /* CONFIG_64BIT */ default: BUILD_BUG(); diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index bd5f0457ad21d8..3745d991a99a9b 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -20,16 +20,13 @@ #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) #ifdef CONFIG_32BIT -# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_vabits 31 # define cpu_pabits 31 #endif #ifdef CONFIG_64BIT -# define cpu_has_64bits 1 # define cpu_vabits cpu_data[0].vabits # define cpu_pabits cpu_data[0].pabits -# define __NEED_ADDRBITS_PROBE #endif /* diff --git a/arch/loongarch/include/asm/dmi.h b/arch/loongarch/include/asm/dmi.h index 605493417753c8..11bb3c8a7179f6 100644 --- a/arch/loongarch/include/asm/dmi.h +++ b/arch/loongarch/include/asm/dmi.h @@ -12,7 +12,7 @@ #define dmi_early_unmap(x, l) dmi_unmap(x) #define dmi_alloc(l) memblock_alloc(l, PAGE_SIZE) -static inline void *dmi_remap(u64 phys_addr, unsigned long size) +static inline void *dmi_remap(phys_addr_t phys_addr, unsigned long size) { return ((void *)TO_CACHE(phys_addr)); } diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h index f16bd42456e4cc..912c50cdd6b7ce 100644 --- a/arch/loongarch/include/asm/elf.h +++ b/arch/loongarch/include/asm/elf.h @@ -120,6 +120,36 @@ #define R_LARCH_ADD_ULEB128 107 #define R_LARCH_SUB_ULEB128 108 #define R_LARCH_64_PCREL 109 +#define R_LARCH_CALL36 110 +#define R_LARCH_TLS_DESC_PC_HI20 111 +#define R_LARCH_TLS_DESC_PC_LO12 112 +#define R_LARCH_TLS_DESC64_PC_LO20 113 +#define R_LARCH_TLS_DESC64_PC_HI12 114 +#define R_LARCH_TLS_DESC_HI20 115 +#define R_LARCH_TLS_DESC_LO12 116 +#define R_LARCH_TLS_DESC64_LO20 117 +#define R_LARCH_TLS_DESC64_HI12 118 +#define R_LARCH_TLS_DESC_LD 119 +#define R_LARCH_TLS_DESC_CALL 120 +#define R_LARCH_TLS_LE_HI20_R 121 +#define R_LARCH_TLS_LE_ADD_R 122 +#define R_LARCH_TLS_LE_LO12_R 123 +#define R_LARCH_TLS_LD_PCREL20_S2 124 +#define R_LARCH_TLS_GD_PCREL20_S2 125 +#define R_LARCH_TLS_DESC_PCREL20_S2 126 +#define R_LARCH_CALL30 127 +#define R_LARCH_PCADD_HI20 128 +#define R_LARCH_PCADD_LO12 129 +#define R_LARCH_GOT_PCADD_HI20 130 +#define R_LARCH_GOT_PCADD_LO12 131 +#define R_LARCH_TLS_IE_PCADD_HI20 132 +#define R_LARCH_TLS_IE_PCADD_LO12 133 +#define R_LARCH_TLS_LD_PCADD_HI20 134 +#define R_LARCH_TLS_LD_PCADD_LO12 135 +#define R_LARCH_TLS_GD_PCADD_HI20 136 +#define R_LARCH_TLS_GD_PCADD_LO12 137 +#define R_LARCH_TLS_DESC_PCADD_HI20 138 +#define R_LARCH_TLS_DESC_PCADD_LO12 139 #ifndef ELF_ARCH @@ -156,6 +186,7 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef double elf_fpreg_t; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; +void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs); void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); #ifdef CONFIG_32BIT diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 55e64a12a124a6..f9f207082d0e83 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -438,8 +438,10 @@ static inline bool is_branch_ins(union loongarch_instruction *ip) static inline bool is_ra_save_ins(union loongarch_instruction *ip) { - /* st.d $ra, $sp, offset */ - return ip->reg2i12_format.opcode == std_op && + const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? stw_op : std_op; + + /* st.w / st.d $ra, $sp, offset */ + return ip->reg2i12_format.opcode == opcode && ip->reg2i12_format.rj == LOONGARCH_GPR_SP && ip->reg2i12_format.rd == LOONGARCH_GPR_RA && !is_imm12_negative(ip->reg2i12_format.immediate); @@ -447,8 +449,10 @@ static inline bool is_ra_save_ins(union loongarch_instruction *ip) static inline bool is_stack_alloc_ins(union loongarch_instruction *ip) { - /* addi.d $sp, $sp, -imm */ - return ip->reg2i12_format.opcode == addid_op && + const u32 opcode = IS_ENABLED(CONFIG_32BIT) ? addiw_op : addid_op; + + /* addi.w / addi.d $sp, $sp, -imm */ + return ip->reg2i12_format.opcode == opcode && ip->reg2i12_format.rj == LOONGARCH_GPR_SP && ip->reg2i12_format.rd == LOONGARCH_GPR_SP && is_imm12_negative(ip->reg2i12_format.immediate); diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 12bd15578c3369..3943647503a958 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -50,10 +50,22 @@ void spurious_interrupt(void); #define NR_LEGACY_VECTORS 16 #define IRQ_MATRIX_BITS NR_VECTORS +#define AVEC_IRQ_SHIFT 4 +#define AVEC_IRQ_BIT 8 +#define AVEC_IRQ_MASK GENMASK(AVEC_IRQ_BIT - 1, 0) +#define AVEC_CPU_SHIFT 12 +#define AVEC_CPU_BIT 16 +#define AVEC_CPU_MASK GENMASK(AVEC_CPU_BIT - 1, 0) + #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); +#ifdef CONFIG_32BIT +#define MAX_IO_PICS 1 +#else #define MAX_IO_PICS 8 +#endif + #define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { diff --git a/arch/loongarch/include/asm/jump_label.h b/arch/loongarch/include/asm/jump_label.h index 4000c7603d8e36..dcaecf69ea5a60 100644 --- a/arch/loongarch/include/asm/jump_label.h +++ b/arch/loongarch/include/asm/jump_label.h @@ -10,15 +10,23 @@ #ifndef __ASSEMBLER__ #include <linux/types.h> +#include <linux/stringify.h> +#include <asm/asm.h> #define JUMP_LABEL_NOP_SIZE 4 +#ifdef CONFIG_32BIT +#define JUMP_LABEL_TYPE ".long " +#else +#define JUMP_LABEL_TYPE ".quad " +#endif + /* This macro is also expanded on the Rust side. */ #define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ - ".align 3 \n\t" \ + ".align " __stringify(PTRLOG) " \n\t" \ ".long 1b - ., " label " - . \n\t" \ - ".quad " key " - . \n\t" \ + JUMP_LABEL_TYPE key " - . \n\t" \ ".popsection \n\t" #define ARCH_STATIC_BRANCH_ASM(key, label) \ diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h index f53ea653af76d7..2d118b1a060e45 100644 --- a/arch/loongarch/include/asm/local.h +++ b/arch/loongarch/include/asm/local.h @@ -8,6 +8,7 @@ #include <linux/percpu.h> #include <linux/bitops.h> #include <linux/atomic.h> +#include <asm/asm.h> #include <asm/cmpxchg.h> typedef struct { @@ -27,6 +28,7 @@ typedef struct { /* * Same as above, but return the result value */ +#ifdef CONFIG_CPU_HAS_AMO static inline long local_add_return(long i, local_t *l) { unsigned long result; @@ -55,6 +57,41 @@ static inline long local_sub_return(long i, local_t *l) return result; } +#else +static inline long local_add_return(long i, local_t *l) +{ + unsigned long result, temp; + + __asm__ __volatile__( + "1:" __LL "%1, %2 # local_add_return \n" + __stringify(LONG_ADD) " %0, %1, %3 \n" + __SC "%0, %2 \n" + " beq %0, $r0, 1b \n" + __stringify(LONG_ADD) " %0, %1, %3 \n" + : "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter) + : "r" (i), "ZC" (l->a.counter) + : "memory"); + + return result; +} + +static inline long local_sub_return(long i, local_t *l) +{ + unsigned long result, temp; + + __asm__ __volatile__( + "1:" __LL "%1, %2 # local_sub_return \n" + __stringify(LONG_SUB) " %0, %1, %3 \n" + __SC "%0, %2 \n" + " beq %0, $r0, 1b \n" + __stringify(LONG_SUB) " %0, %1, %3 \n" + : "=&r" (result), "=&r" (temp), "=ZC" (l->a.counter) + : "r" (i), "ZC" (l->a.counter) + : "memory"); + + return result; +} +#endif static inline long local_cmpxchg(local_t *l, long old, long new) { diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 58a4a3b6b035d4..e6b8ff61c8cc6c 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -182,6 +182,16 @@ #define csr_xchg32(val, mask, reg) __csrxchg_w(val, mask, reg) #define csr_xchg64(val, mask, reg) __csrxchg_d(val, mask, reg) +#ifdef CONFIG_32BIT +#define csr_read(reg) csr_read32(reg) +#define csr_write(val, reg) csr_write32(val, reg) +#define csr_xchg(val, mask, reg) csr_xchg32(val, mask, reg) +#else +#define csr_read(reg) csr_read64(reg) +#define csr_write(val, reg) csr_write64(val, reg) +#define csr_xchg(val, mask, reg) csr_xchg64(val, mask, reg) +#endif + /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) @@ -904,6 +914,26 @@ #define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ /* Direct Map window 0/1/2/3 */ + +#ifdef CONFIG_32BIT + +#define CSR_DMW0_PLV0 (1 << 0) +#define CSR_DMW0_VSEG (0x4) +#define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) +#define CSR_DMW0_INIT (CSR_DMW0_BASE | CSR_DMW0_PLV0) + +#define CSR_DMW1_PLV0 (1 << 0) +#define CSR_DMW1_MAT (1 << 4) +#define CSR_DMW1_VSEG (0x5) +#define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) +#define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) + +#define CSR_DMW2_INIT 0x0 + +#define CSR_DMW3_INIT 0x0 + +#else + #define CSR_DMW0_PLV0 _CONST64_(1 << 0) #define CSR_DMW0_VSEG _CONST64_(0x8000) #define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) @@ -923,6 +953,8 @@ #define CSR_DMW3_INIT 0x0 +#endif + /* Performance Counter registers */ #define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ #define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ @@ -1208,7 +1240,35 @@ #ifndef __ASSEMBLER__ -static __always_inline u64 drdtime(void) +#ifdef CONFIG_32BIT + +static __always_inline u32 rdtime_h(void) +{ + u32 val = 0; + + __asm__ __volatile__( + "rdtimeh.w %0, $zero\n\t" + : "=r"(val) + : + ); + return val; +} + +static __always_inline u32 rdtime_l(void) +{ + u32 val = 0; + + __asm__ __volatile__( + "rdtimel.w %0, $zero\n\t" + : "=r"(val) + : + ); + return val; +} + +#else + +static __always_inline u64 rdtime_d(void) { u64 val = 0; @@ -1220,11 +1280,14 @@ static __always_inline u64 drdtime(void) return val; } +#endif + static inline unsigned int get_csr_cpuid(void) { return csr_read32(LOONGARCH_CSR_CPUID); } +#ifdef CONFIG_64BIT static inline void csr_any_send(unsigned int addr, unsigned int data, unsigned int data_mask, unsigned int cpu) { @@ -1236,6 +1299,7 @@ static inline void csr_any_send(unsigned int addr, unsigned int data, val |= ((uint64_t)data << IOCSR_ANY_SEND_BUF_SHIFT); iocsr_write64(val, LOONGARCH_IOCSR_ANY_SEND); } +#endif static inline unsigned int read_csr_excode(void) { @@ -1259,22 +1323,22 @@ static inline void write_csr_pagesize(unsigned int size) static inline unsigned int read_csr_tlbrefill_pagesize(void) { - return (csr_read64(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT; + return (csr_read(LOONGARCH_CSR_TLBREHI) & CSR_TLBREHI_PS) >> CSR_TLBREHI_PS_SHIFT; } static inline void write_csr_tlbrefill_pagesize(unsigned int size) { - csr_xchg64(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI); + csr_xchg(size << CSR_TLBREHI_PS_SHIFT, CSR_TLBREHI_PS, LOONGARCH_CSR_TLBREHI); } #define read_csr_asid() csr_read32(LOONGARCH_CSR_ASID) #define write_csr_asid(val) csr_write32(val, LOONGARCH_CSR_ASID) -#define read_csr_entryhi() csr_read64(LOONGARCH_CSR_TLBEHI) -#define write_csr_entryhi(val) csr_write64(val, LOONGARCH_CSR_TLBEHI) -#define read_csr_entrylo0() csr_read64(LOONGARCH_CSR_TLBELO0) -#define write_csr_entrylo0(val) csr_write64(val, LOONGARCH_CSR_TLBELO0) -#define read_csr_entrylo1() csr_read64(LOONGARCH_CSR_TLBELO1) -#define write_csr_entrylo1(val) csr_write64(val, LOONGARCH_CSR_TLBELO1) +#define read_csr_entryhi() csr_read(LOONGARCH_CSR_TLBEHI) +#define write_csr_entryhi(val) csr_write(val, LOONGARCH_CSR_TLBEHI) +#define read_csr_entrylo0() csr_read(LOONGARCH_CSR_TLBELO0) +#define write_csr_entrylo0(val) csr_write(val, LOONGARCH_CSR_TLBELO0) +#define read_csr_entrylo1() csr_read(LOONGARCH_CSR_TLBELO1) +#define write_csr_entrylo1(val) csr_write(val, LOONGARCH_CSR_TLBELO1) #define read_csr_ecfg() csr_read32(LOONGARCH_CSR_ECFG) #define write_csr_ecfg(val) csr_write32(val, LOONGARCH_CSR_ECFG) #define read_csr_estat() csr_read32(LOONGARCH_CSR_ESTAT) @@ -1284,20 +1348,20 @@ static inline void write_csr_tlbrefill_pagesize(unsigned int size) #define read_csr_euen() csr_read32(LOONGARCH_CSR_EUEN) #define write_csr_euen(val) csr_write32(val, LOONGARCH_CSR_EUEN) #define read_csr_cpuid() csr_read32(LOONGARCH_CSR_CPUID) -#define read_csr_prcfg1() csr_read64(LOONGARCH_CSR_PRCFG1) -#define write_csr_prcfg1(val) csr_write64(val, LOONGARCH_CSR_PRCFG1) -#define read_csr_prcfg2() csr_read64(LOONGARCH_CSR_PRCFG2) -#define write_csr_prcfg2(val) csr_write64(val, LOONGARCH_CSR_PRCFG2) -#define read_csr_prcfg3() csr_read64(LOONGARCH_CSR_PRCFG3) -#define write_csr_prcfg3(val) csr_write64(val, LOONGARCH_CSR_PRCFG3) +#define read_csr_prcfg1() csr_read(LOONGARCH_CSR_PRCFG1) +#define write_csr_prcfg1(val) csr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_csr_prcfg2() csr_read(LOONGARCH_CSR_PRCFG2) +#define write_csr_prcfg2(val) csr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_csr_prcfg3() csr_read(LOONGARCH_CSR_PRCFG3) +#define write_csr_prcfg3(val) csr_write(val, LOONGARCH_CSR_PRCFG3) #define read_csr_stlbpgsize() csr_read32(LOONGARCH_CSR_STLBPGSIZE) #define write_csr_stlbpgsize(val) csr_write32(val, LOONGARCH_CSR_STLBPGSIZE) #define read_csr_rvacfg() csr_read32(LOONGARCH_CSR_RVACFG) #define write_csr_rvacfg(val) csr_write32(val, LOONGARCH_CSR_RVACFG) #define write_csr_tintclear(val) csr_write32(val, LOONGARCH_CSR_TINTCLR) -#define read_csr_impctl1() csr_read64(LOONGARCH_CSR_IMPCTL1) -#define write_csr_impctl1(val) csr_write64(val, LOONGARCH_CSR_IMPCTL1) -#define write_csr_impctl2(val) csr_write64(val, LOONGARCH_CSR_IMPCTL2) +#define read_csr_impctl1() csr_read(LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl1(val) csr_write(val, LOONGARCH_CSR_IMPCTL1) +#define write_csr_impctl2(val) csr_write(val, LOONGARCH_CSR_IMPCTL2) #define read_csr_perfctrl0() csr_read64(LOONGARCH_CSR_PERFCTRL0) #define read_csr_perfcntr0() csr_read64(LOONGARCH_CSR_PERFCNTR0) @@ -1378,8 +1442,10 @@ __BUILD_CSR_OP(tlbidx) #define ENTRYLO_C_SHIFT 4 #define ENTRYLO_C (_ULCAST_(3) << ENTRYLO_C_SHIFT) #define ENTRYLO_G (_ULCAST_(1) << 6) +#ifdef CONFIG_64BIT #define ENTRYLO_NR (_ULCAST_(1) << 61) #define ENTRYLO_NX (_ULCAST_(1) << 62) +#endif /* Values for PageSize register */ #define PS_4K 0x0000000c diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h index f33f3fd32ecc2c..d56a968273dee9 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -38,8 +38,10 @@ struct got_entry { struct plt_entry { u32 inst_lu12iw; +#ifdef CONFIG_64BIT u32 inst_lu32id; u32 inst_lu52id; +#endif u32 inst_jirl; }; @@ -57,6 +59,14 @@ static inline struct got_entry emit_got_entry(Elf_Addr val) static inline struct plt_entry emit_plt_entry(unsigned long val) { +#ifdef CONFIG_32BIT + u32 lu12iw, jirl; + + lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); + jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI)); + + return (struct plt_entry) { lu12iw, jirl }; +#else u32 lu12iw, lu32id, lu52id, jirl; lu12iw = larch_insn_gen_lu12iw(LOONGARCH_GPR_T1, ADDR_IMM(val, LU12IW)); @@ -65,6 +75,7 @@ static inline struct plt_entry emit_plt_entry(unsigned long val) jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, ADDR_IMM(val, ORI)); return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl }; +#endif } static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index a3aaf34fba16ab..256d1ff7a1e366 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -10,7 +10,7 @@ #include <vdso/page.h> -#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - PTRLOG) #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index 87be9b14e9dabf..583f2466262fcd 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -13,7 +13,7 @@ * the loading address of main kernel image, but far from where the modules are * loaded. Tell the compiler this fact when using explicit relocs. */ -#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) +#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) && defined(CONFIG_64BIT) # if __has_attribute(model) # define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) # else @@ -27,7 +27,7 @@ register unsigned long __my_cpu_offset __asm__("$r21"); static inline void set_my_cpu_offset(unsigned long off) { __my_cpu_offset = off; - csr_write64(off, PERCPU_BASE_KS); + csr_write(off, PERCPU_BASE_KS); } #define __my_cpu_offset \ @@ -36,6 +36,8 @@ static inline void set_my_cpu_offset(unsigned long off) __my_cpu_offset; \ }) +#ifdef CONFIG_CPU_HAS_AMO + #define PERCPU_OP(op, asm_op, c_op) \ static __always_inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ @@ -68,25 +70,9 @@ PERCPU_OP(and, and, &) PERCPU_OP(or, or, |) #undef PERCPU_OP -static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) -{ - switch (size) { - case 1: - case 2: - return __xchg_small((volatile void *)ptr, val, size); - - case 4: - return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); - - case 8: - return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val); +#endif - default: - BUILD_BUG(); - } - - return 0; -} +#ifdef CONFIG_64BIT #define __pcpu_op_1(op) op ".b " #define __pcpu_op_2(op) op ".h " @@ -115,6 +101,10 @@ do { \ : "memory"); \ } while (0) +#endif + +#define __percpu_xchg __arch_xchg + /* this_cpu_cmpxchg */ #define _protect_cmpxchg_local(pcp, o, n) \ ({ \ @@ -135,6 +125,8 @@ do { \ __retval; \ }) +#ifdef CONFIG_CPU_HAS_AMO + #define _percpu_add(pcp, val) \ _pcp_protect(__percpu_add, pcp, val) @@ -146,9 +138,6 @@ do { \ #define _percpu_or(pcp, val) \ _pcp_protect(__percpu_or, pcp, val) -#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ - _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) - #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) @@ -161,6 +150,10 @@ do { \ #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) +#endif + +#ifdef CONFIG_64BIT + #define this_cpu_read_1(pcp) _percpu_read(1, pcp) #define this_cpu_read_2(pcp) _percpu_read(2, pcp) #define this_cpu_read_4(pcp) _percpu_read(4, pcp) @@ -171,6 +164,11 @@ do { \ #define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val) #define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val) +#endif + +#define _percpu_xchg(pcp, val) ((typeof(pcp)) \ + _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))) + #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 2fc3789220ac6c..b565573cd82eed 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -6,6 +6,26 @@ #define _ASM_PGTABLE_BITS_H /* Page table bits */ + +#ifdef CONFIG_32BIT +#define _PAGE_VALID_SHIFT 0 +#define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ +#define _PAGE_DIRTY_SHIFT 1 +#define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */ +#define _CACHE_SHIFT 4 /* 4~5, two bits */ +#define _PAGE_GLOBAL_SHIFT 6 +#define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */ +#define _PAGE_PRESENT_SHIFT 7 +#define _PAGE_PFN_SHIFT 8 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ +#define _PAGE_SWP_EXCLUSIVE_SHIFT 13 +#define _PAGE_PFN_END_SHIFT 28 +#define _PAGE_WRITE_SHIFT 29 +#define _PAGE_MODIFIED_SHIFT 30 +#define _PAGE_PRESENT_INVALID_SHIFT 31 +#endif + +#ifdef CONFIG_64BIT #define _PAGE_VALID_SHIFT 0 #define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ #define _PAGE_DIRTY_SHIFT 1 @@ -18,14 +38,15 @@ #define _PAGE_MODIFIED_SHIFT 9 #define _PAGE_PROTNONE_SHIFT 10 #define _PAGE_SPECIAL_SHIFT 11 -#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ #define _PAGE_PFN_SHIFT 12 +#define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 #define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_RPLV_SHIFT 63 +#endif /* Used by software */ #define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) @@ -33,10 +54,15 @@ #define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) #define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) +#ifdef CONFIG_32BIT +#define _PAGE_PROTNONE 0 +#define _PAGE_SPECIAL 0 +#else #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) +#endif -/* We borrow bit 23 to store the exclusive marker in swap PTEs. */ +/* We borrow bit 13/23 to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) /* Used by TLB hardware (placed in EntryLo*) */ @@ -46,9 +72,15 @@ #define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT) #define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT) #define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT) +#ifdef CONFIG_32BIT +#define _PAGE_NO_READ 0 +#define _PAGE_NO_EXEC 0 +#define _PAGE_RPLV 0 +#else #define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT) #define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT) #define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT) +#endif #define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT) #define PFN_PTE_SHIFT (PAGE_SHIFT - 12 + _PAGE_PFN_SHIFT) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 03fb60432fde79..f41a648a3d9e21 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -11,6 +11,7 @@ #include <linux/compiler.h> #include <asm/addrspace.h> +#include <asm/asm.h> #include <asm/page.h> #include <asm/pgtable-bits.h> @@ -23,37 +24,45 @@ #endif #if CONFIG_PGTABLE_LEVELS == 2 -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #elif CONFIG_PGTABLE_LEVELS == 3 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG)) #elif CONFIG_PGTABLE_LEVELS == 4 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - PTRLOG)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - PTRLOG)) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) +#ifdef CONFIG_32BIT +#define VA_BITS 32 +#else +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - PTRLOG)) +#endif -#define PTRS_PER_PGD (PAGE_SIZE >> 3) +#define PTRS_PER_PGD (PAGE_SIZE >> PTRLOG) #if CONFIG_PGTABLE_LEVELS > 3 -#define PTRS_PER_PUD (PAGE_SIZE >> 3) +#define PTRS_PER_PUD (PAGE_SIZE >> PTRLOG) #endif #if CONFIG_PGTABLE_LEVELS > 2 -#define PTRS_PER_PMD (PAGE_SIZE >> 3) +#define PTRS_PER_PMD (PAGE_SIZE >> PTRLOG) #endif -#define PTRS_PER_PTE (PAGE_SIZE >> 3) +#define PTRS_PER_PTE (PAGE_SIZE >> PTRLOG) +#ifdef CONFIG_32BIT +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#else #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) +#endif #ifndef __ASSEMBLER__ @@ -74,11 +83,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) -/* - * TLB refill handlers may also map the vmalloc area into xkvrange. - * Avoid the first couple of pages so NULL pointer dereferences will - * still reliably trap. - */ +#ifdef CONFIG_32BIT + +#define VMALLOC_START (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) +#define VMALLOC_END (FIXADDR_START - (2 * PAGE_SIZE)) + +#endif + +#ifdef CONFIG_64BIT + #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) #define MODULES_END (MODULES_VADDR + SZ_256M) @@ -106,6 +119,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) +#endif + #define ptep_get(ptep) READ_ONCE(*(ptep)) #define pmdp_get(pmdp) READ_ONCE(*(pmdp)) @@ -277,7 +292,16 @@ extern void kernel_pte_init(void *addr); * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that * are !pte_none() && !pte_present(). * - * Format of swap PTEs: + * Format of 32bit swap PTEs: + * + * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * <------------ offset -------------> E <- type -> <-- zeroes --> + * + * E is the exclusive marker that is not stored in swap entries. + * The zero'ed bits include _PAGE_PRESENT. + * + * Format of 64bit swap PTEs: * * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 @@ -290,16 +314,27 @@ extern void kernel_pte_init(void *addr); * E is the exclusive marker that is not stored in swap entries. * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE. */ + +#define __SWP_TYPE_BITS (IS_ENABLED(CONFIG_32BIT) ? 5 : 7) +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_TYPE_SHIFT (IS_ENABLED(CONFIG_32BIT) ? 8 : 16) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT + 1) + static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) -{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; } +{ + pte_t pte; + pte_val(pte) = ((type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | (offset << __SWP_OFFSET_SHIFT); + return pte; +} -#define __swp_type(x) (((x).val >> 16) & 0x7f) -#define __swp_offset(x) ((x).val >> 24) +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) + +#define __swp_entry_to_pte(x) __pte((x).val) +#define __swp_entry_to_pmd(x) __pmd((x).val | _PAGE_HUGE) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) -#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) static inline bool pte_swp_exclusive(pte_t pte) { diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 5cb568a60cf8ef..ecc8e50fffa87a 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -38,22 +38,42 @@ cfi_restore \reg \offset \docfi .endm + .macro SETUP_TWINS temp + pcaddi t0, 0 + PTR_LI t1, ~TO_PHYS_MASK + and t0, t0, t1 + ori t0, t0, (1 << 4 | 1) + csrwr t0, LOONGARCH_CSR_DMWIN0 + PTR_LI t0, CSR_DMW1_INIT + csrwr t0, LOONGARCH_CSR_DMWIN1 + .endm + + .macro SETUP_MODES temp + /* Enable PG */ + li.w \temp, 0xb0 # PLV=0, IE=0, PG=1 + csrwr \temp, LOONGARCH_CSR_CRMD + li.w \temp, 0x04 # PLV=0, PIE=1, PWE=0 + csrwr \temp, LOONGARCH_CSR_PRMD + li.w \temp, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 + csrwr \temp, LOONGARCH_CSR_EUEN + .endm + .macro SETUP_DMWINS temp - li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW0_INIT # SUC, PLV0, LA32: 0x8xxx xxxx, LA64: 0x8000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN0 - li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW1_INIT # CAC, PLV0, LA32: 0xaxxx xxxx, LA64: 0x9000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN1 - li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx + PTR_LI \temp, CSR_DMW2_INIT # WUC, PLV0, LA32: unavailable, LA64: 0xa000 xxxx xxxx xxxx csrwr \temp, LOONGARCH_CSR_DMWIN2 - li.d \temp, CSR_DMW3_INIT # 0x0, unused + PTR_LI \temp, CSR_DMW3_INIT # 0x0, unused csrwr \temp, LOONGARCH_CSR_DMWIN3 .endm /* Jump to the runtime virtual address. */ .macro JUMP_VIRT_ADDR temp1 temp2 - li.d \temp1, CACHE_BASE + PTR_LI \temp1, CACHE_BASE pcaddi \temp2, 0 - bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0 + PTR_BSTRINS \temp1, \temp2, (DMW_PABITS - 1), 0 jirl zero, \temp1, 0xc .endm @@ -171,7 +191,7 @@ andi t0, t0, 0x3 /* extract pplv bit */ beqz t0, 9f - li.d tp, ~_THREAD_MASK + LONG_LI tp, ~_THREAD_MASK and tp, tp, sp cfi_st u0, PT_R21, \docfi csrrd u0, PERCPU_BASE_KS diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h index 5bb5a90d268151..bfa3fd879c7f33 100644 --- a/arch/loongarch/include/asm/string.h +++ b/arch/loongarch/include/asm/string.h @@ -5,6 +5,7 @@ #ifndef _ASM_STRING_H #define _ASM_STRING_H +#ifdef CONFIG_64BIT #define __HAVE_ARCH_MEMSET extern void *memset(void *__s, int __c, size_t __count); extern void *__memset(void *__s, int __c, size_t __count); @@ -16,6 +17,7 @@ extern void *__memcpy(void *__to, __const__ void *__from, size_t __n); #define __HAVE_ARCH_MEMMOVE extern void *memmove(void *__dest, __const__ void *__src, size_t __n); extern void *__memmove(void *__dest, __const__ void *__src, size_t __n); +#endif #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) diff --git a/arch/loongarch/include/asm/timex.h b/arch/loongarch/include/asm/timex.h index fb41e9e7a222cb..9ea52fad969039 100644 --- a/arch/loongarch/include/asm/timex.h +++ b/arch/loongarch/include/asm/timex.h @@ -18,7 +18,38 @@ typedef unsigned long cycles_t; static inline cycles_t get_cycles(void) { - return drdtime(); +#ifdef CONFIG_32BIT + return rdtime_l(); +#else + return rdtime_d(); +#endif +} + +#ifdef CONFIG_32BIT + +#define get_cycles_hi get_cycles_hi + +static inline cycles_t get_cycles_hi(void) +{ + return rdtime_h(); +} + +#endif + +static inline u64 get_cycles64(void) +{ +#ifdef CONFIG_32BIT + u32 hi, lo; + + do { + hi = rdtime_h(); + lo = rdtime_l(); + } while (hi != rdtime_h()); + + return ((u64)hi << 32) | lo; +#else + return rdtime_d(); +#endif } #endif /* __KERNEL__ */ diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h index 0d22991ae430d4..4e259d490e4567 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -19,10 +19,16 @@ #include <asm/asm-extable.h> #include <asm-generic/access_ok.h> +#define __LSW 0 +#define __MSW 1 + extern u64 __ua_limit; -#define __UA_ADDR ".dword" +#ifdef CONFIG_64BIT #define __UA_LIMIT __ua_limit +#else +#define __UA_LIMIT 0x80000000UL +#endif /* * get_user: - Get a simple variable from user space. @@ -126,6 +132,7 @@ extern u64 __ua_limit; * * Returns zero on success, or -EFAULT on error. */ + #define __put_user(x, ptr) \ ({ \ int __pu_err = 0; \ @@ -146,7 +153,7 @@ do { \ case 1: __get_data_asm(val, "ld.b", ptr); break; \ case 2: __get_data_asm(val, "ld.h", ptr); break; \ case 4: __get_data_asm(val, "ld.w", ptr); break; \ - case 8: __get_data_asm(val, "ld.d", ptr); break; \ + case 8: __get_data_asm_8(val, ptr); break; \ default: BUILD_BUG(); break; \ } \ } while (0) @@ -167,13 +174,39 @@ do { \ (val) = (__typeof__(*(ptr))) __gu_tmp; \ } +#ifdef CONFIG_64BIT +#define __get_data_asm_8(val, ptr) \ + __get_data_asm(val, "ld.d", ptr) +#else /* !CONFIG_64BIT */ +#define __get_data_asm_8(val, ptr) \ +{ \ + u32 __lo, __hi; \ + u32 __user *__ptr = (u32 __user *)(ptr); \ + \ + __asm__ __volatile__ ( \ + "1:\n" \ + " ld.w %1, %3 \n" \ + "2:\n" \ + " ld.w %2, %4 \n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ + : "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \ + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ + if (__gu_err) \ + __hi = 0; \ + (val) = (__typeof__(val))((__typeof__((val)-(val))) \ + ((((u64)__hi << 32) | __lo))); \ +} +#endif /* CONFIG_64BIT */ + #define __put_user_common(ptr, size) \ do { \ switch (size) { \ case 1: __put_data_asm("st.b", ptr); break; \ case 2: __put_data_asm("st.h", ptr); break; \ case 4: __put_data_asm("st.w", ptr); break; \ - case 8: __put_data_asm("st.d", ptr); break; \ + case 8: __put_data_asm_8(ptr); break; \ default: BUILD_BUG(); break; \ } \ } while (0) @@ -190,6 +223,30 @@ do { \ : "Jr" (__pu_val)); \ } +#ifdef CONFIG_64BIT +#define __put_data_asm_8(ptr) \ + __put_data_asm("st.d", ptr) +#else /* !CONFIG_64BIT */ +#define __put_data_asm_8(ptr) \ +{ \ + u32 __user *__ptr = (u32 __user *)(ptr); \ + u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \ + \ + __asm__ __volatile__ ( \ + "1:\n" \ + " st.w %z3, %1 \n" \ + "2:\n" \ + " st.w %z4, %2 \n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ + : "+r" (__pu_err), \ + "=m" (__ptr[__LSW]), \ + "=m" (__ptr[__MSW]) \ + : "rJ" (__x), "rJ" (__x >> 32)); \ +} +#endif /* CONFIG_64BIT */ + #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ int __gu_err = 0; \ diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h index dcafabca9bb695..bae76767c693a9 100644 --- a/arch/loongarch/include/asm/vdso/gettimeofday.h +++ b/arch/loongarch/include/asm/vdso/gettimeofday.h @@ -12,6 +12,8 @@ #include <asm/unistd.h> #include <asm/vdso/vdso.h> +#ifdef CONFIG_GENERIC_GETTIMEOFDAY + #define VDSO_HAS_CLOCK_GETRES 1 static __always_inline long gettimeofday_fallback( @@ -89,6 +91,8 @@ static inline bool loongarch_vdso_hres_capable(void) } #define __arch_vdso_hres_capable loongarch_vdso_hres_capable +#endif /* CONFIG_GENERIC_GETTIMEOFDAY */ + #endif /* !__ASSEMBLER__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild index 517761419999a8..89ac01faa5aef5 100644 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ b/arch/loongarch/include/uapi/asm/Kbuild @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 +syscall-y += unistd_32.h syscall-y += unistd_64.h diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h index 215e0f9e8aa32a..b35c794323bcab 100644 --- a/arch/loongarch/include/uapi/asm/ptrace.h +++ b/arch/loongarch/include/uapi/asm/ptrace.h @@ -61,8 +61,13 @@ struct user_lbt_state { struct user_watch_state { __u64 dbg_info; struct { +#if __BITS_PER_LONG == 32 + __u32 addr; + __u32 mask; +#else __u64 addr; __u64 mask; +#endif __u32 ctrl; __u32 pad; } dbg_regs[8]; @@ -71,8 +76,13 @@ struct user_watch_state { struct user_watch_state_v2 { __u64 dbg_info; struct { +#if __BITS_PER_LONG == 32 + __u32 addr; + __u32 mask; +#else __u64 addr; __u64 mask; +#endif __u32 ctrl; __u32 pad; } dbg_regs[14]; diff --git a/arch/loongarch/include/uapi/asm/unistd.h b/arch/loongarch/include/uapi/asm/unistd.h index 1f01980f9c9482..e19c7f2f9f87a4 100644 --- a/arch/loongarch/include/uapi/asm/unistd.h +++ b/arch/loongarch/include/uapi/asm/unistd.h @@ -1,3 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#include <asm/bitsperlong.h> + +#if __BITS_PER_LONG == 32 +#include <asm/unistd_32.h> +#else #include <asm/unistd_64.h> +#endif diff --git a/arch/loongarch/kernel/Makefile.syscalls b/arch/loongarch/kernel/Makefile.syscalls index ab7d9baa29152d..cd46c2b69c7fde 100644 --- a/arch/loongarch/kernel/Makefile.syscalls +++ b/arch/loongarch/kernel/Makefile.syscalls @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # No special ABIs on loongarch so far +syscall_abis_32 += syscall_abis_64 += diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index a2060a24b39fd7..08a227034042df 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -106,7 +106,11 @@ EXPORT_SYMBOL(vm_map_base); static void cpu_probe_addrbits(struct cpuinfo_loongarch *c) { -#ifdef __NEED_ADDRBITS_PROBE +#ifdef CONFIG_32BIT + c->pabits = cpu_pabits; + c->vabits = cpu_vabits; + vm_map_base = KVRANGE; +#else c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4; c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12; vm_map_base = 0UL - (1UL << c->vabits); @@ -298,8 +302,15 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int return; } +#ifdef CONFIG_64BIT *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); +#else + *vendor = iocsr_read32(LOONGARCH_IOCSR_VENDOR) | + (u64)iocsr_read32(LOONGARCH_IOCSR_VENDOR + 4) << 32; + *cpuname = iocsr_read32(LOONGARCH_IOCSR_CPUNAME) | + (u64)iocsr_read32(LOONGARCH_IOCSR_CPUNAME + 4) << 32; +#endif if (!__cpu_full_name[cpu]) { if (((char *)vendor)[0] == 0) diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S index ba0bdbf86aa856..6df56241cb95a7 100644 --- a/arch/loongarch/kernel/efi-header.S +++ b/arch/loongarch/kernel/efi-header.S @@ -9,7 +9,11 @@ .macro __EFI_PE_HEADER .long IMAGE_NT_SIGNATURE .Lcoff_header: +#ifdef CONFIG_32BIT + .short IMAGE_FILE_MACHINE_LOONGARCH32 /* Machine */ +#else .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */ +#endif .short .Lsection_count /* NumberOfSections */ .long 0 /* TimeDateStamp */ .long 0 /* PointerToSymbolTable */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 860a3bc030e066..52c21c89531896 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -115,7 +115,9 @@ void __init efi_init(void) efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); - set_bit(EFI_64BIT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + efi_nr_tables = efi_systab->nr_tables; efi_config_table = (unsigned long)efi_systab->tables; diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 47e1db9a1ce47b..b53d333a7c4270 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -23,24 +23,24 @@ SYM_CODE_START(handle_syscall) UNWIND_HINT_UNDEFINED csrrd t0, PERCPU_BASE_KS la.pcrel t1, kernelsp - add.d t1, t1, t0 + PTR_ADD t1, t1, t0 move t2, sp - ld.d sp, t1, 0 + PTR_L sp, t1, 0 - addi.d sp, sp, -PT_SIZE + PTR_ADDI sp, sp, -PT_SIZE cfi_st t2, PT_R3 cfi_rel_offset sp, PT_R3 - st.d zero, sp, PT_R0 + LONG_S zero, sp, PT_R0 csrrd t2, LOONGARCH_CSR_PRMD - st.d t2, sp, PT_PRMD + LONG_S t2, sp, PT_PRMD csrrd t2, LOONGARCH_CSR_CRMD - st.d t2, sp, PT_CRMD + LONG_S t2, sp, PT_CRMD csrrd t2, LOONGARCH_CSR_EUEN - st.d t2, sp, PT_EUEN + LONG_S t2, sp, PT_EUEN csrrd t2, LOONGARCH_CSR_ECFG - st.d t2, sp, PT_ECFG + LONG_S t2, sp, PT_ECFG csrrd t2, LOONGARCH_CSR_ESTAT - st.d t2, sp, PT_ESTAT + LONG_S t2, sp, PT_ESTAT cfi_st ra, PT_R1 cfi_st a0, PT_R4 cfi_st a1, PT_R5 @@ -51,7 +51,7 @@ SYM_CODE_START(handle_syscall) cfi_st a6, PT_R10 cfi_st a7, PT_R11 csrrd ra, LOONGARCH_CSR_ERA - st.d ra, sp, PT_ERA + LONG_S ra, sp, PT_ERA cfi_rel_offset ra, PT_ERA cfi_st tp, PT_R2 @@ -67,7 +67,7 @@ SYM_CODE_START(handle_syscall) #endif move u0, t0 - li.d tp, ~_THREAD_MASK + LONG_LI tp, ~_THREAD_MASK and tp, tp, sp move a0, sp diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 23bd5ae2212c26..841206fde3ab72 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -72,9 +72,12 @@ static int __init fdt_cpu_clk_init(void) clk = of_clk_get(np, 0); of_node_put(np); + cpu_clock_freq = 200 * 1000 * 1000; - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + pr_warn("No valid CPU clock freq, assume 200MHz.\n"); return -ENODEV; + } cpu_clock_freq = clk_get_rate(clk); clk_put(clk); diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 28caf416ae36e6..f225dcc5b530c7 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -96,6 +96,49 @@ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH) .endm +#ifdef CONFIG_32BIT + .macro sc_save_fcc thread tmp0 tmp1 + movcf2gr \tmp0, $fcc0 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc1 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc2 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc3 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, THREAD_FCC + movcf2gr \tmp0, $fcc4 + move \tmp1, \tmp0 + movcf2gr \tmp0, $fcc5 + bstrins.w \tmp1, \tmp0, 15, 8 + movcf2gr \tmp0, $fcc6 + bstrins.w \tmp1, \tmp0, 23, 16 + movcf2gr \tmp0, $fcc7 + bstrins.w \tmp1, \tmp0, 31, 24 + EX st.w \tmp1, \thread, (THREAD_FCC + 4) + .endm + + .macro sc_restore_fcc thread tmp0 tmp1 + EX ld.w \tmp0, \thread, THREAD_FCC + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc0, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc1, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc2, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc3, \tmp1 + EX ld.w \tmp0, \thread, (THREAD_FCC + 4) + bstrpick.w \tmp1, \tmp0, 7, 0 + movgr2cf $fcc4, \tmp1 + bstrpick.w \tmp1, \tmp0, 15, 8 + movgr2cf $fcc5, \tmp1 + bstrpick.w \tmp1, \tmp0, 23, 16 + movgr2cf $fcc6, \tmp1 + bstrpick.w \tmp1, \tmp0, 31, 24 + movgr2cf $fcc7, \tmp1 + .endm +#else .macro sc_save_fcc base, tmp0, tmp1 movcf2gr \tmp0, $fcc0 move \tmp1, \tmp0 @@ -135,6 +178,7 @@ bstrpick.d \tmp1, \tmp0, 63, 56 movgr2cf $fcc7, \tmp1 .endm +#endif .macro sc_save_fcsr base, tmp0 movfcsr2gr \tmp0, fcsr0 @@ -410,6 +454,72 @@ SYM_FUNC_START(_init_fpu) li.w t1, -1 # SNaN +#ifdef CONFIG_32BIT + movgr2fr.w $f0, t1 + movgr2frh.w $f0, t1 + movgr2fr.w $f1, t1 + movgr2frh.w $f1, t1 + movgr2fr.w $f2, t1 + movgr2frh.w $f2, t1 + movgr2fr.w $f3, t1 + movgr2frh.w $f3, t1 + movgr2fr.w $f4, t1 + movgr2frh.w $f4, t1 + movgr2fr.w $f5, t1 + movgr2frh.w $f5, t1 + movgr2fr.w $f6, t1 + movgr2frh.w $f6, t1 + movgr2fr.w $f7, t1 + movgr2frh.w $f7, t1 + movgr2fr.w $f8, t1 + movgr2frh.w $f8, t1 + movgr2fr.w $f9, t1 + movgr2frh.w $f9, t1 + movgr2fr.w $f10, t1 + movgr2frh.w $f10, t1 + movgr2fr.w $f11, t1 + movgr2frh.w $f11, t1 + movgr2fr.w $f12, t1 + movgr2frh.w $f12, t1 + movgr2fr.w $f13, t1 + movgr2frh.w $f13, t1 + movgr2fr.w $f14, t1 + movgr2frh.w $f14, t1 + movgr2fr.w $f15, t1 + movgr2frh.w $f15, t1 + movgr2fr.w $f16, t1 + movgr2frh.w $f16, t1 + movgr2fr.w $f17, t1 + movgr2frh.w $f17, t1 + movgr2fr.w $f18, t1 + movgr2frh.w $f18, t1 + movgr2fr.w $f19, t1 + movgr2frh.w $f19, t1 + movgr2fr.w $f20, t1 + movgr2frh.w $f20, t1 + movgr2fr.w $f21, t1 + movgr2frh.w $f21, t1 + movgr2fr.w $f22, t1 + movgr2frh.w $f22, t1 + movgr2fr.w $f23, t1 + movgr2frh.w $f23, t1 + movgr2fr.w $f24, t1 + movgr2frh.w $f24, t1 + movgr2fr.w $f25, t1 + movgr2frh.w $f25, t1 + movgr2fr.w $f26, t1 + movgr2frh.w $f26, t1 + movgr2fr.w $f27, t1 + movgr2frh.w $f27, t1 + movgr2fr.w $f28, t1 + movgr2frh.w $f28, t1 + movgr2fr.w $f29, t1 + movgr2frh.w $f29, t1 + movgr2fr.w $f30, t1 + movgr2frh.w $f30, t1 + movgr2fr.w $f31, t1 + movgr2frh.w $f31, t1 +#else movgr2fr.d $f0, t1 movgr2fr.d $f1, t1 movgr2fr.d $f2, t1 @@ -442,6 +552,7 @@ SYM_FUNC_START(_init_fpu) movgr2fr.d $f29, t1 movgr2fr.d $f30, t1 movgr2fr.d $f31, t1 +#endif jr ra SYM_FUNC_END(_init_fpu) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e3865e92a917a7..aba548db244604 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -43,36 +43,29 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); SYM_CODE_START(kernel_entry) # kernel entry point - /* Config direct window and set PG */ - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 - - /* Enable PG */ - li.w t0, 0xb0 # PLV=0, IE=0, PG=1 - csrwr t0, LOONGARCH_CSR_CRMD - li.w t0, 0x04 # PLV=0, PIE=1, PWE=0 - csrwr t0, LOONGARCH_CSR_PRMD - li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 - csrwr t0, LOONGARCH_CSR_EUEN + SETUP_DMWINS t0 la.pcrel t0, __bss_start # clear .bss - st.d zero, t0, 0 + LONG_S zero, t0, 0 la.pcrel t1, __bss_stop - LONGSIZE 1: - addi.d t0, t0, LONGSIZE - st.d zero, t0, 0 + PTR_ADDI t0, t0, LONGSIZE + LONG_S zero, t0, 0 bne t0, t1, 1b la.pcrel t0, fw_arg0 - st.d a0, t0, 0 # firmware arguments + PTR_S a0, t0, 0 # firmware arguments la.pcrel t0, fw_arg1 - st.d a1, t0, 0 + PTR_S a1, t0, 0 la.pcrel t0, fw_arg2 - st.d a2, t0, 0 + PTR_S a2, t0, 0 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* KSave3 used for percpu base, initialized as 0 */ @@ -98,7 +91,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point /* Jump to the new kernel: new_pc = current_pc + random_offset */ pcaddi t0, 0 - add.d t0, t0, a0 + PTR_ADD t0, t0, a0 jirl zero, t0, 0xc #endif /* CONFIG_RANDOMIZE_BASE */ @@ -121,12 +114,14 @@ SYM_CODE_END(kernel_entry) */ SYM_CODE_START(smpboot_entry) - SETUP_DMWINS t0 + SETUP_TWINS + SETUP_MODES t0 JUMP_VIRT_ADDR t0, t1 + SETUP_DMWINS t0 #ifdef CONFIG_PAGE_SIZE_4KB - li.d t0, 0 - li.d t1, CSR_STFILL + LONG_LI t0, 0 + LONG_LI t1, CSR_STFILL csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1 #endif /* Enable PG */ diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index a43ba7f9f9872a..9fa1c9814fcc2a 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -93,6 +93,7 @@ static void count_max_entries(Elf_Rela *relas, int num, (*plts)++; break; case R_LARCH_GOT_PC_HI20: + case R_LARCH_GOT_PCADD_HI20: (*gots)++; break; default: diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index 36d6d9eeb7c721..7d4d571ee55e9f 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -22,72 +22,89 @@ #include <asm/inst.h> #include <asm/unwind.h> -static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) +/* + * reloc_rela_handler() - Apply a particular relocation to a module + * @mod: the module to apply the reloc to + * @location: the address at which the reloc is to be applied + * @v: the value of the reloc, with addend for RELA-style + * @rela_stack: the stack used for store relocation info, LOCAL to THIS module + * @rela_stac_top: where the stack operation(pop/push) applies to + * + * Return: 0 upon success, else -ERRNO + */ +typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, + long *rela_stack, size_t *rela_stack_top, unsigned int type); + +static int rela_stack_push(long stack_value, long *rela_stack, size_t *rela_stack_top) { if (*rela_stack_top >= RELA_STACK_DEPTH) return -ENOEXEC; rela_stack[(*rela_stack_top)++] = stack_value; - pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value); + pr_debug("%s stack_value = 0x%lx\n", __func__, stack_value); return 0; } -static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top) +static int rela_stack_pop(long *stack_value, long *rela_stack, size_t *rela_stack_top) { if (*rela_stack_top == 0) return -ENOEXEC; *stack_value = rela_stack[--(*rela_stack_top)]; - pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value); + pr_debug("%s stack_value = 0x%lx\n", __func__, *stack_value); return 0; } static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return 0; } static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type); return -EINVAL; } static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *location = v; return 0; } +#ifdef CONFIG_32BIT +#define apply_r_larch_64 apply_r_larch_error +#else static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { *(Elf_Addr *)location = v; return 0; } +#endif static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { - return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top); + return rela_stack_push(v - (unsigned long)location, rela_stack, rela_stack_top); } static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { return rela_stack_push(v, rela_stack, rela_stack_top); } static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1; + long opr1; err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); if (err) @@ -104,7 +121,7 @@ static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Add static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; @@ -118,10 +135,10 @@ static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, } static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1, opr2, opr3; + long opr1, opr2, opr3; if (type == R_LARCH_SOP_IF_ELSE) { err = rela_stack_pop(&opr3, rela_stack, rela_stack_top); @@ -164,10 +181,10 @@ static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, } static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { int err = 0; - s64 opr1; + long opr1; union loongarch_instruction *insn = (union loongarch_instruction *)location; err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); @@ -244,31 +261,33 @@ static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Ad } overflow: - pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n", + pr_err("module %s: opr1 = 0x%lx overflow! dangerous %s (%u) relocation\n", mod->name, opr1, __func__, type); return -ENOEXEC; unaligned: - pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n", + pr_err("module %s: opr1 = 0x%lx unaligned! dangerous %s (%u) relocation\n", mod->name, opr1, __func__, type); return -ENOEXEC; } static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { switch (type) { case R_LARCH_ADD32: *(s32 *)location += v; return 0; - case R_LARCH_ADD64: - *(s64 *)location += v; - return 0; case R_LARCH_SUB32: *(s32 *)location -= v; return 0; +#ifdef CONFIG_64BIT + case R_LARCH_ADD64: + *(s64 *)location += v; + return 0; case R_LARCH_SUB64: *(s64 *)location -= v; +#endif return 0; default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); @@ -278,7 +297,7 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_b26(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; union loongarch_instruction *insn = (union loongarch_instruction *)location; @@ -310,15 +329,40 @@ static int apply_r_larch_b26(struct module *mod, return 0; } +static int apply_r_larch_pcadd(struct module *mod, u32 *location, Elf_Addr v, + long *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + union loongarch_instruction *insn = (union loongarch_instruction *)location; + /* Use s32 for a sign-extension deliberately. */ + s32 offset_hi20 = (void *)((v + 0x800)) - (void *)((Elf_Addr)location); + + switch (type) { + case R_LARCH_PCADD_LO12: + insn->reg2i12_format.immediate = v & 0xfff; + break; + case R_LARCH_PCADD_HI20: + v = offset_hi20 >> 12; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return 0; +} + static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { union loongarch_instruction *insn = (union loongarch_instruction *)location; /* Use s32 for a sign-extension deliberately. */ s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) - (void *)((Elf_Addr)location & ~0xfff); +#ifdef CONFIG_64BIT Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20; ptrdiff_t offset_rem = (void *)v - (void *)anchor; +#endif switch (type) { case R_LARCH_PCALA_LO12: @@ -328,6 +372,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, v = offset_hi20 >> 12; insn->reg1i20_format.immediate = v & 0xfffff; break; +#ifdef CONFIG_64BIT case R_LARCH_PCALA64_LO20: v = offset_rem >> 32; insn->reg1i20_format.immediate = v & 0xfffff; @@ -336,6 +381,7 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, v = offset_rem >> 52; insn->reg2i12_format.immediate = v & 0xfff; break; +#endif default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); return -EINVAL; @@ -346,30 +392,43 @@ static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, static int apply_r_larch_got_pc(struct module *mod, Elf_Shdr *sechdrs, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { - Elf_Addr got = module_emit_got_entry(mod, sechdrs, v); + reloc_rela_handler got_handler; - if (!got) - return -EINVAL; + if (type != R_LARCH_GOT_PCADD_LO12) { + v = module_emit_got_entry(mod, sechdrs, v); + if (!v) + return -EINVAL; + } switch (type) { case R_LARCH_GOT_PC_LO12: type = R_LARCH_PCALA_LO12; + got_handler = apply_r_larch_pcala; break; case R_LARCH_GOT_PC_HI20: type = R_LARCH_PCALA_HI20; + got_handler = apply_r_larch_pcala; + break; + case R_LARCH_GOT_PCADD_LO12: + type = R_LARCH_PCADD_LO12; + got_handler = apply_r_larch_pcadd; + break; + case R_LARCH_GOT_PCADD_HI20: + type = R_LARCH_PCADD_HI20; + got_handler = apply_r_larch_pcadd; break; default: pr_err("%s: Unsupport relocation type %u\n", mod->name, type); return -EINVAL; } - return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type); + return got_handler(mod, location, v, rela_stack, rela_stack_top, type); } static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; @@ -377,31 +436,22 @@ static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v, return 0; } +#ifdef CONFIG_32BIT +#define apply_r_larch_64_pcrel apply_r_larch_error +#else static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type) + long *rela_stack, size_t *rela_stack_top, unsigned int type) { ptrdiff_t offset = (void *)v - (void *)location; *(u64 *)location = offset; return 0; } - -/* - * reloc_handlers_rela() - Apply a particular relocation to a module - * @mod: the module to apply the reloc to - * @location: the address at which the reloc is to be applied - * @v: the value of the reloc, with addend for RELA-style - * @rela_stack: the stack used for store relocation info, LOCAL to THIS module - * @rela_stac_top: where the stack operation(pop/push) applies to - * - * Return: 0 upon success, else -ERRNO - */ -typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, - s64 *rela_stack, size_t *rela_stack_top, unsigned int type); +#endif /* The handlers for known reloc types */ static reloc_rela_handler reloc_rela_handlers[] = { - [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error, + [R_LARCH_NONE ... R_LARCH_TLS_DESC_PCADD_LO12] = apply_r_larch_error, [R_LARCH_NONE] = apply_r_larch_none, [R_LARCH_32] = apply_r_larch_32, @@ -414,7 +464,8 @@ static reloc_rela_handler reloc_rela_handlers[] = { [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, - [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, + [R_LARCH_PCADD_HI20 ... R_LARCH_PCADD_LO12] = apply_r_larch_pcadd, + [R_LARCH_PCALA_HI20 ... R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel, [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel, }; @@ -423,9 +474,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *mod) { - int i, err; - unsigned int type; - s64 rela_stack[RELA_STACK_DEPTH]; + int err; + unsigned int i, idx, type; + unsigned int num_relocations; + long rela_stack[RELA_STACK_DEPTH]; size_t rela_stack_top = 0; reloc_rela_handler handler; void *location; @@ -436,8 +488,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec, sechdrs[relsec].sh_info); + idx = 0; rela_stack_top = 0; - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); + for (i = 0; i < num_relocations; i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to */ @@ -462,17 +516,59 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, return -EINVAL; } - pr_debug("type %d st_value %llx r_addend %llx loc %llx\n", + pr_debug("type %d st_value %lx r_addend %lx loc %lx\n", (int)ELF_R_TYPE(rel[i].r_info), - sym->st_value, rel[i].r_addend, (u64)location); + (unsigned long)sym->st_value, (unsigned long)rel[i].r_addend, (unsigned long)location); v = sym->st_value + rel[i].r_addend; + + if (type == R_LARCH_PCADD_LO12 || type == R_LARCH_GOT_PCADD_LO12) { + bool found = false; + unsigned int j = idx; + + do { + u32 hi20_type = ELF_R_TYPE(rel[j].r_info); + unsigned long hi20_location = + sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; + + /* Find the corresponding HI20 relocation entry */ + if ((hi20_location == sym->st_value) && (hi20_type == type - 1)) { + s32 hi20, lo12; + Elf_Sym *hi20_sym = + (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[j].r_info); + unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend; + + /* Calculate LO12 offset */ + size_t offset = hi20_sym_val - hi20_location; + if (hi20_type == R_LARCH_GOT_PCADD_HI20) { + offset = module_emit_got_entry(mod, sechdrs, hi20_sym_val); + offset = offset - hi20_location; + } + hi20 = (offset + 0x800) & 0xfffff000; + v = lo12 = offset - hi20; + found = true; + break; + } + + j = (j + 1) % num_relocations; + + } while (idx != j); + + if (!found) { + pr_err("%s: Can not find HI20 relocation information\n", mod->name); + return -EINVAL; + } + + idx = j; /* Record the previous j-loop end index */ + } + switch (type) { case R_LARCH_B26: err = apply_r_larch_b26(mod, sechdrs, location, v, rela_stack, &rela_stack_top, type); break; case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12: + case R_LARCH_GOT_PCADD_HI20...R_LARCH_GOT_PCADD_LO12: err = apply_r_larch_got_pc(mod, sechdrs, location, v, rela_stack, &rela_stack_top, type); break; diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c index 63d2b7e7e844b0..a8800d20e11bd5 100644 --- a/arch/loongarch/kernel/proc.c +++ b/arch/loongarch/kernel/proc.c @@ -20,11 +20,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) unsigned int prid = cpu_data[n].processor_id; unsigned int version = cpu_data[n].processor_id & 0xff; unsigned int fp_version = cpu_data[n].fpu_vers; + u64 freq = cpu_clock_freq, bogomips = lpj_fine * cpu_clock_freq; #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; #endif + do_div(freq, 10000); + do_div(bogomips, const_clock_freq * (5000/HZ)); /* * For the first processor also print the system type @@ -41,11 +44,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid); seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version); seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version); - seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n", - cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100); - seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n", - (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ), - ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100); + seq_printf(m, "CPU MHz\t\t\t: %u.%02u\n", (u32)freq / 100, (u32)freq % 100); + seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", (u32)bogomips / 100, (u32)bogomips % 100); seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n", cpu_pabits + 1, cpu_vabits + 1); diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index efd9edf65603cc..4ac1c30861526d 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -130,6 +130,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) preempt_enable(); + if (IS_ENABLED(CONFIG_RANDSTRUCT)) { + memcpy(dst, src, sizeof(struct task_struct)); + return 0; + } + if (!used_math()) memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr)); else @@ -377,8 +382,11 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace); } -#ifdef CONFIG_64BIT +#ifdef CONFIG_32BIT +void loongarch_dump_regs32(u32 *uregs, const struct pt_regs *regs) +#else void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) +#endif { unsigned int i; @@ -395,4 +403,3 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs) uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg; uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat; } -#endif /* CONFIG_64BIT */ diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c index 8edd0954e55ab6..be38430f7e2802 100644 --- a/arch/loongarch/kernel/ptrace.c +++ b/arch/loongarch/kernel/ptrace.c @@ -650,8 +650,13 @@ static int ptrace_hbp_set_addr(unsigned int note_type, struct perf_event_attr attr; /* Kernel-space address cannot be monitored by user-space */ +#ifdef CONFIG_32BIT + if ((unsigned long)addr >= KPRANGE0) + return -EINVAL; +#else if ((unsigned long)addr >= XKPRANGE) return -EINVAL; +#endif bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); if (IS_ERR(bp)) diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index b5e2312a2fca51..82aa3f0359278e 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -68,18 +68,25 @@ static inline void __init relocate_absolute(long random_offset) for (p = begin; (void *)p < end; p++) { long v = p->symvalue; - uint32_t lu12iw, ori, lu32id, lu52id; + uint32_t lu12iw, ori; +#ifdef CONFIG_64BIT + uint32_t lu32id, lu52id; +#endif union loongarch_instruction *insn = (void *)p->pc; lu12iw = (v >> 12) & 0xfffff; ori = v & 0xfff; +#ifdef CONFIG_64BIT lu32id = (v >> 32) & 0xfffff; lu52id = v >> 52; +#endif insn[0].reg1i20_format.immediate = lu12iw; insn[1].reg2i12_format.immediate = ori; +#ifdef CONFIG_64BIT insn[2].reg1i20_format.immediate = lu32id; insn[3].reg2i12_format.immediate = lu52id; +#endif } } @@ -183,7 +190,7 @@ static inline void __init *determine_relocation_address(void) if (kaslr_disabled()) return destination; - kernel_length = (long)_end - (long)_text; + kernel_length = (unsigned long)_end - (unsigned long)_text; random_offset = get_random_boot() << 16; random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); @@ -232,7 +239,7 @@ unsigned long __init relocate_kernel(void) early_memunmap(cmdline, COMMAND_LINE_SIZE); if (random_offset) { - kernel_length = (long)(_end) - (long)(_text); + kernel_length = (unsigned long)(_end) - (unsigned long)(_text); /* Copy the kernel to it's new location */ memcpy(location_new, _text, kernel_length); diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 25a87378e48e43..20cb6f30645683 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -56,6 +56,7 @@ #define SMBIOS_FREQLOW_MASK 0xFF #define SMBIOS_CORE_PACKAGE_OFFSET 0x23 #define SMBIOS_THREAD_PACKAGE_OFFSET 0x25 +#define SMBIOS_THREAD_PACKAGE_2_OFFSET 0x2E #define LOONGSON_EFI_ENABLE (1 << 3) unsigned long fw_arg0, fw_arg1, fw_arg2; @@ -126,7 +127,12 @@ static void __init parse_cpu_table(const struct dmi_header *dm) cpu_clock_freq = freq_temp * 1000000; loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]); - loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + loongson_sysconf.cores_per_package = *(u8 *)(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET); + if (dm->length >= 0x30 && loongson_sysconf.cores_per_package == 0xff) { + /* SMBIOS 3.0+ has ThreadCount2 for more than 255 threads */ + loongson_sysconf.cores_per_package = + *(u16 *)(dmi_data + SMBIOS_THREAD_PACKAGE_2_OFFSET); + } pr_info("CpuClock = %llu\n", cpu_clock_freq); } diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S index 9c23cb7e432f1f..f377d8f5c51a63 100644 --- a/arch/loongarch/kernel/switch.S +++ b/arch/loongarch/kernel/switch.S @@ -16,18 +16,23 @@ */ .align 5 SYM_FUNC_START(__switch_to) - csrrd t1, LOONGARCH_CSR_PRMD - stptr.d t1, a0, THREAD_CSRPRMD +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, TASK_STRUCT_OFFSET + PTR_ADDI a1, a1, TASK_STRUCT_OFFSET +#endif + csrrd t1, LOONGARCH_CSR_PRMD + LONG_SPTR t1, a0, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) cpu_save_nonscratch a0 - stptr.d ra, a0, THREAD_REG01 - stptr.d a3, a0, THREAD_SCHED_RA - stptr.d a4, a0, THREAD_SCHED_CFA + LONG_SPTR a3, a0, (THREAD_SCHED_RA - TASK_STRUCT_OFFSET) + LONG_SPTR a4, a0, (THREAD_SCHED_CFA - TASK_STRUCT_OFFSET) + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) - la t7, __stack_chk_guard - LONG_L t8, a1, TASK_STACK_CANARY - LONG_S t8, t7, 0 + la t7, __stack_chk_guard + LONG_LPTR t8, a1, (TASK_STACK_CANARY - TASK_STRUCT_OFFSET) + LONG_SPTR t8, t7, 0 #endif + move tp, a2 cpu_restore_nonscratch a1 @@ -35,8 +40,11 @@ SYM_FUNC_START(__switch_to) PTR_ADD t0, t0, tp set_saved_sp t0, t1, t2 - ldptr.d t1, a1, THREAD_CSRPRMD - csrwr t1, LOONGARCH_CSR_PRMD + LONG_LPTR t1, a1, (THREAD_CSRPRMD - TASK_STRUCT_OFFSET) + csrwr t1, LOONGARCH_CSR_PRMD +#ifdef CONFIG_32BIT + PTR_ADDI a0, a0, -TASK_STRUCT_OFFSET +#endif jr ra SYM_FUNC_END(__switch_to) diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c index 168bd97540f8cf..1249d82c1cd0ac 100644 --- a/arch/loongarch/kernel/syscall.c +++ b/arch/loongarch/kernel/syscall.c @@ -34,9 +34,22 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, + prot, unsigned long, flags, unsigned long, fd, unsigned long, offset) +{ + if (offset & (~PAGE_MASK >> 12)) + return -EINVAL; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12)); +} + void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#ifdef CONFIG_32BIT +#include <asm/syscall_table_32.h> +#else #include <asm/syscall_table_64.h> +#endif }; typedef long (*sys_call_fn)(unsigned long, unsigned long, @@ -75,7 +88,7 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs) * * The resulting 6 bits of entropy is seen in SP[9:4]. */ - choose_random_kstack_offset(drdtime()); + choose_random_kstack_offset(get_cycles()); syscall_exit_to_user_mode(regs); } diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 6fb92cc1a4c92a..dbaaabcaf6f09e 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -18,6 +18,7 @@ #include <asm/loongarch.h> #include <asm/paravirt.h> #include <asm/time.h> +#include <asm/timex.h> u64 cpu_clock_freq; EXPORT_SYMBOL(cpu_clock_freq); @@ -50,10 +51,10 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) raw_spin_lock(&state_lock); - timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config = csr_read(LOONGARCH_CSR_TCFG); timer_config |= CSR_TCFG_EN; timer_config &= ~CSR_TCFG_PERIOD; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -62,15 +63,15 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) static int constant_set_state_periodic(struct clock_event_device *evt) { - unsigned long period; unsigned long timer_config; + u64 period = const_clock_freq; raw_spin_lock(&state_lock); - period = const_clock_freq / HZ; + do_div(period, HZ); timer_config = period & CSR_TCFG_VAL; timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN); - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -83,9 +84,9 @@ static int constant_set_state_shutdown(struct clock_event_device *evt) raw_spin_lock(&state_lock); - timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config = csr_read(LOONGARCH_CSR_TCFG); timer_config &= ~CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -98,7 +99,7 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev delta &= CSR_TCFG_VAL; timer_config = delta | CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); + csr_write(timer_config, LOONGARCH_CSR_TCFG); return 0; } @@ -120,7 +121,7 @@ static int arch_timer_dying(unsigned int cpu) static unsigned long get_loops_per_jiffy(void) { - unsigned long lpj = (unsigned long)const_clock_freq; + u64 lpj = const_clock_freq; do_div(lpj, HZ); @@ -131,13 +132,13 @@ static long init_offset; void save_counter(void) { - init_offset = drdtime(); + init_offset = get_cycles(); } void sync_counter(void) { /* Ensure counter begin at 0 */ - csr_write64(init_offset, LOONGARCH_CSR_CNTC); + csr_write(init_offset, LOONGARCH_CSR_CNTC); } int constant_clockevent_init(void) @@ -197,12 +198,12 @@ int constant_clockevent_init(void) static u64 read_const_counter(struct clocksource *clk) { - return drdtime(); + return get_cycles64(); } static noinstr u64 sched_clock_read(void) { - return drdtime(); + return get_cycles64(); } static struct clocksource clocksource_const = { @@ -211,7 +212,9 @@ static struct clocksource clocksource_const = { .read = read_const_counter, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, +#ifdef CONFIG_GENERIC_GETTIMEOFDAY .vdso_clock_mode = VDSO_CLOCKMODE_CPU, +#endif }; int __init constant_clocksource_init(void) @@ -235,7 +238,7 @@ void __init time_init(void) else const_clock_freq = calc_const_freq(); - init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC)); + init_offset = -(get_cycles() - csr_read(LOONGARCH_CSR_CNTC)); constant_clockevent_init(); constant_clocksource_init(); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index da5926fead4af9..004b8ebf005123 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -625,7 +625,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs) bool user = user_mode(regs); bool pie = regs_irqs_disabled(regs); unsigned long era = exception_era(regs); - u64 badv = 0, lower = 0, upper = ULONG_MAX; + unsigned long badv = 0, lower = 0, upper = ULONG_MAX; union loongarch_instruction insn; irqentry_state_t state = irqentry_enter(regs); @@ -1070,10 +1070,13 @@ asmlinkage void noinstr do_reserved(struct pt_regs *regs) asmlinkage void cache_parity_error(void) { + u32 merrctl = csr_read32(LOONGARCH_CSR_MERRCTL); + unsigned long merrera = csr_read(LOONGARCH_CSR_MERRERA); + /* For the moment, report the problem and hang. */ pr_err("Cache error exception:\n"); - pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL)); - pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA)); + pr_err("csr_merrctl == %08x\n", merrctl); + pr_err("csr_merrera == %016lx\n", merrera); panic("Can't handle the cache error!"); } @@ -1130,9 +1133,9 @@ static void configure_exception_vector(void) eentry = (unsigned long)exception_handlers; tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE; - csr_write64(eentry, LOONGARCH_CSR_EENTRY); - csr_write64(__pa(eentry), LOONGARCH_CSR_MERRENTRY); - csr_write64(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); + csr_write(eentry, LOONGARCH_CSR_EENTRY); + csr_write(__pa(eentry), LOONGARCH_CSR_MERRENTRY); + csr_write(__pa(tlbrentry), LOONGARCH_CSR_TLBRENTRY); } void per_cpu_trap_init(int cpu) diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c index 487be604b96ae5..cc929c9fe7e997 100644 --- a/arch/loongarch/kernel/unaligned.c +++ b/arch/loongarch/kernel/unaligned.c @@ -27,12 +27,21 @@ static u32 unaligned_instructions_user; static u32 unaligned_instructions_kernel; #endif -static inline unsigned long read_fpr(unsigned int idx) +static inline u64 read_fpr(unsigned int idx) { +#ifdef CONFIG_64BIT #define READ_FPR(idx, __value) \ __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value)); - - unsigned long __value; +#else +#define READ_FPR(idx, __value) \ +{ \ + u32 __value_lo, __value_hi; \ + __asm__ __volatile__("movfr2gr.s %0, $f"#idx"\n\t" : "=r"(__value_lo)); \ + __asm__ __volatile__("movfrh2gr.s %0, $f"#idx"\n\t" : "=r"(__value_hi)); \ + __value = (__value_lo | ((u64)__value_hi << 32)); \ +} +#endif + u64 __value; switch (idx) { case 0: @@ -138,11 +147,20 @@ static inline unsigned long read_fpr(unsigned int idx) return __value; } -static inline void write_fpr(unsigned int idx, unsigned long value) +static inline void write_fpr(unsigned int idx, u64 value) { +#ifdef CONFIG_64BIT #define WRITE_FPR(idx, value) \ __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value)); - +#else +#define WRITE_FPR(idx, value) \ +{ \ + u32 value_lo = value; \ + u32 value_hi = value >> 32; \ + __asm__ __volatile__("movgr2fr.w $f"#idx", %0\n\t" :: "r"(value_lo)); \ + __asm__ __volatile__("movgr2frh.w $f"#idx", %0\n\t" :: "r"(value_hi)); \ +} +#endif switch (idx) { case 0: WRITE_FPR(0, value); @@ -252,7 +270,7 @@ void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned i bool sign, write; bool user = user_mode(regs); unsigned int res, size = 0; - unsigned long value = 0; + u64 value = 0; union loongarch_instruction insn; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 6d833599ef2e90..656b954c1134b1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -9,6 +9,7 @@ #include <asm/loongarch.h> #include <asm/setup.h> #include <asm/time.h> +#include <asm/timex.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -814,7 +815,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_LOONGARCH_KVM: switch (reg->id) { case KVM_REG_LOONGARCH_COUNTER: - *v = drdtime() + vcpu->kvm->arch.time_offset; + *v = get_cycles() + vcpu->kvm->arch.time_offset; break; case KVM_REG_LOONGARCH_DEBUG_INST: *v = INSN_HVCL | KVM_HCALL_SWDBG; @@ -909,7 +910,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, * only set for the first time for smp system */ if (vcpu->vcpu_id == 0) - vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); + vcpu->kvm->arch.time_offset = (signed long)(v - get_cycles()); break; case KVM_REG_LOONGARCH_VCPU_RESET: vcpu->arch.st.guest_addr = 0; diff --git a/arch/loongarch/lib/bswapdi.c b/arch/loongarch/lib/bswapdi.c new file mode 100644 index 00000000000000..88242dc7de1767 --- /dev/null +++ b/arch/loongarch/lib/bswapdi.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +/* To silence -Wmissing-prototypes. */ +unsigned long long __bswapdi2(unsigned long long u); + +unsigned long long notrace __bswapdi2(unsigned long long u) +{ + return ___constant_swab64(u); +} +EXPORT_SYMBOL(__bswapdi2); diff --git a/arch/loongarch/lib/bswapsi.c b/arch/loongarch/lib/bswapsi.c new file mode 100644 index 00000000000000..2ed655497de5c5 --- /dev/null +++ b/arch/loongarch/lib/bswapsi.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/export.h> +#include <linux/compiler.h> +#include <uapi/linux/swab.h> + +/* To silence -Wmissing-prototypes. */ +unsigned int __bswapsi2(unsigned int u); + +unsigned int notrace __bswapsi2(unsigned int u) +{ + return ___constant_swab32(u); +} +EXPORT_SYMBOL(__bswapsi2); diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S index 7a0db643b2866c..58c667dde88293 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -13,11 +13,15 @@ #include <asm/unwind_hints.h> SYM_FUNC_START(__clear_user) +#ifdef CONFIG_32BIT + b __clear_user_generic +#else /* * Some CPUs support hardware unaligned access */ ALTERNATIVE "b __clear_user_generic", \ "b __clear_user_fast", CPU_FEATURE_UAL +#endif SYM_FUNC_END(__clear_user) EXPORT_SYMBOL(__clear_user) @@ -29,19 +33,20 @@ EXPORT_SYMBOL(__clear_user) * a1: size */ SYM_FUNC_START(__clear_user_generic) - beqz a1, 2f + beqz a1, 2f -1: st.b zero, a0, 0 - addi.d a0, a0, 1 - addi.d a1, a1, -1 - bgtz a1, 1b +1: st.b zero, a0, 0 + PTR_ADDI a0, a0, 1 + PTR_ADDI a1, a1, -1 + bgtz a1, 1b -2: move a0, a1 - jr ra +2: move a0, a1 + jr ra - _asm_extable 1b, 2b + _asm_extable 1b, 2b SYM_FUNC_END(__clear_user_generic) +#ifdef CONFIG_64BIT /* * unsigned long __clear_user_fast(void *addr, unsigned long size) * @@ -207,3 +212,4 @@ SYM_FUNC_START(__clear_user_fast) SYM_FUNC_END(__clear_user_fast) STACK_FRAME_NON_STANDARD __clear_user_fast +#endif diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index 095ce9181c6c04..c7264b779f6e4a 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -13,11 +13,15 @@ #include <asm/unwind_hints.h> SYM_FUNC_START(__copy_user) +#ifdef CONFIG_32BIT + b __copy_user_generic +#else /* * Some CPUs support hardware unaligned access */ ALTERNATIVE "b __copy_user_generic", \ "b __copy_user_fast", CPU_FEATURE_UAL +#endif SYM_FUNC_END(__copy_user) EXPORT_SYMBOL(__copy_user) @@ -30,22 +34,23 @@ EXPORT_SYMBOL(__copy_user) * a2: n */ SYM_FUNC_START(__copy_user_generic) - beqz a2, 3f + beqz a2, 3f -1: ld.b t0, a1, 0 -2: st.b t0, a0, 0 - addi.d a0, a0, 1 - addi.d a1, a1, 1 - addi.d a2, a2, -1 - bgtz a2, 1b +1: ld.b t0, a1, 0 +2: st.b t0, a0, 0 + PTR_ADDI a0, a0, 1 + PTR_ADDI a1, a1, 1 + PTR_ADDI a2, a2, -1 + bgtz a2, 1b -3: move a0, a2 - jr ra +3: move a0, a2 + jr ra - _asm_extable 1b, 3b - _asm_extable 2b, 3b + _asm_extable 1b, 3b + _asm_extable 2b, 3b SYM_FUNC_END(__copy_user_generic) +#ifdef CONFIG_64BIT /* * unsigned long __copy_user_fast(void *to, const void *from, unsigned long n) * @@ -281,3 +286,4 @@ SYM_FUNC_START(__copy_user_fast) SYM_FUNC_END(__copy_user_fast) STACK_FRAME_NON_STANDARD __copy_user_fast +#endif diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c index 0b886a6e260faf..e1cdad7a676e17 100644 --- a/arch/loongarch/lib/dump_tlb.c +++ b/arch/loongarch/lib/dump_tlb.c @@ -20,9 +20,9 @@ void dump_tlb_regs(void) pr_info("Index : 0x%0x\n", read_csr_tlbidx()); pr_info("PageSize : 0x%0x\n", read_csr_pagesize()); - pr_info("EntryHi : 0x%0*lx\n", field, read_csr_entryhi()); - pr_info("EntryLo0 : 0x%0*lx\n", field, read_csr_entrylo0()); - pr_info("EntryLo1 : 0x%0*lx\n", field, read_csr_entrylo1()); + pr_info("EntryHi : 0x%0*lx\n", field, (unsigned long)read_csr_entryhi()); + pr_info("EntryLo0 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo0()); + pr_info("EntryLo1 : 0x%0*lx\n", field, (unsigned long)read_csr_entrylo1()); } static void dump_tlb(int first, int last) @@ -73,12 +73,16 @@ static void dump_tlb(int first, int last) vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask); /* NR/NX are in awkward places, so mask them off separately */ +#ifdef CONFIG_64BIT pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX); +#endif pa = pa & PAGE_MASK; pr_cont("\n\t["); +#ifdef CONFIG_64BIT pr_cont("nr=%d nx=%d ", (entrylo0 & ENTRYLO_NR) ? 1 : 0, (entrylo0 & ENTRYLO_NX) ? 1 : 0); +#endif pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [", pwidth, pa, c0, (entrylo0 & ENTRYLO_D) ? 1 : 0, @@ -86,11 +90,15 @@ static void dump_tlb(int first, int last) (entrylo0 & ENTRYLO_G) ? 1 : 0, (entrylo0 & ENTRYLO_PLV) >> ENTRYLO_PLV_SHIFT); /* NR/NX are in awkward places, so mask them off separately */ +#ifdef CONFIG_64BIT pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX); +#endif pa = pa & PAGE_MASK; +#ifdef CONFIG_64BIT pr_cont("nr=%d nx=%d ", (entrylo1 & ENTRYLO_NR) ? 1 : 0, (entrylo1 & ENTRYLO_NX) ? 1 : 0); +#endif pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n", pwidth, pa, c1, (entrylo1 & ENTRYLO_D) ? 1 : 0, diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S index 185f82d85810af..470c0bfa3463bf 100644 --- a/arch/loongarch/lib/unaligned.S +++ b/arch/loongarch/lib/unaligned.S @@ -24,35 +24,35 @@ * a3: sign */ SYM_FUNC_START(unaligned_read) - beqz a2, 5f + beqz a2, 5f - li.w t2, 0 - addi.d t0, a2, -1 - slli.d t1, t0, 3 - add.d a0, a0, t0 + li.w t2, 0 + LONG_ADDI t0, a2, -1 + PTR_SLLI t1, t0, LONGLOG + PTR_ADD a0, a0, t0 - beqz a3, 2f -1: ld.b t3, a0, 0 - b 3f + beqz a3, 2f +1: ld.b t3, a0, 0 + b 3f -2: ld.bu t3, a0, 0 -3: sll.d t3, t3, t1 - or t2, t2, t3 - addi.d t1, t1, -8 - addi.d a0, a0, -1 - addi.d a2, a2, -1 - bgtz a2, 2b -4: st.d t2, a1, 0 +2: ld.bu t3, a0, 0 +3: LONG_SLLV t3, t3, t1 + or t2, t2, t3 + LONG_ADDI t1, t1, -8 + PTR_ADDI a0, a0, -1 + PTR_ADDI a2, a2, -1 + bgtz a2, 2b +4: LONG_S t2, a1, 0 - move a0, a2 - jr ra + move a0, a2 + jr ra -5: li.w a0, -EFAULT - jr ra +5: li.w a0, -EFAULT + jr ra - _asm_extable 1b, .L_fixup_handle_unaligned - _asm_extable 2b, .L_fixup_handle_unaligned - _asm_extable 4b, .L_fixup_handle_unaligned + _asm_extable 1b, .L_fixup_handle_unaligned + _asm_extable 2b, .L_fixup_handle_unaligned + _asm_extable 4b, .L_fixup_handle_unaligned SYM_FUNC_END(unaligned_read) /* @@ -63,21 +63,21 @@ SYM_FUNC_END(unaligned_read) * a2: n */ SYM_FUNC_START(unaligned_write) - beqz a2, 3f + beqz a2, 3f - li.w t0, 0 -1: srl.d t1, a1, t0 -2: st.b t1, a0, 0 - addi.d t0, t0, 8 - addi.d a2, a2, -1 - addi.d a0, a0, 1 - bgtz a2, 1b + li.w t0, 0 +1: LONG_SRLV t1, a1, t0 +2: st.b t1, a0, 0 + LONG_ADDI t0, t0, 8 + PTR_ADDI a2, a2, -1 + PTR_ADDI a0, a0, 1 + bgtz a2, 1b - move a0, a2 - jr ra + move a0, a2 + jr ra -3: li.w a0, -EFAULT - jr ra +3: li.w a0, -EFAULT + jr ra - _asm_extable 2b, .L_fixup_handle_unaligned + _asm_extable 2b, .L_fixup_handle_unaligned SYM_FUNC_END(unaligned_write) diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 6bfd4b8dad1b65..0946662afdd644 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -224,7 +224,7 @@ EXPORT_SYMBOL(invalid_pmd_table); pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); -#ifdef CONFIG_EXECMEM +#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) static struct execmem_info execmem_info __ro_after_init; struct execmem_info __init *execmem_arch_setup(void) @@ -242,4 +242,4 @@ struct execmem_info __init *execmem_arch_setup(void) return &execmem_info; } -#endif /* CONFIG_EXECMEM */ +#endif /* CONFIG_EXECMEM && MODULES_VADDR */ diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S index 7ad76551d3133a..7286b804756da0 100644 --- a/arch/loongarch/mm/page.S +++ b/arch/loongarch/mm/page.S @@ -10,75 +10,75 @@ .align 5 SYM_FUNC_START(clear_page) - lu12i.w t0, 1 << (PAGE_SHIFT - 12) - add.d t0, t0, a0 + lu12i.w t0, 1 << (PAGE_SHIFT - 12) + PTR_ADD t0, t0, a0 1: - st.d zero, a0, 0 - st.d zero, a0, 8 - st.d zero, a0, 16 - st.d zero, a0, 24 - st.d zero, a0, 32 - st.d zero, a0, 40 - st.d zero, a0, 48 - st.d zero, a0, 56 - addi.d a0, a0, 128 - st.d zero, a0, -64 - st.d zero, a0, -56 - st.d zero, a0, -48 - st.d zero, a0, -40 - st.d zero, a0, -32 - st.d zero, a0, -24 - st.d zero, a0, -16 - st.d zero, a0, -8 - bne t0, a0, 1b + LONG_S zero, a0, (LONGSIZE * 0) + LONG_S zero, a0, (LONGSIZE * 1) + LONG_S zero, a0, (LONGSIZE * 2) + LONG_S zero, a0, (LONGSIZE * 3) + LONG_S zero, a0, (LONGSIZE * 4) + LONG_S zero, a0, (LONGSIZE * 5) + LONG_S zero, a0, (LONGSIZE * 6) + LONG_S zero, a0, (LONGSIZE * 7) + PTR_ADDI a0, a0, (LONGSIZE * 16) + LONG_S zero, a0, -(LONGSIZE * 8) + LONG_S zero, a0, -(LONGSIZE * 7) + LONG_S zero, a0, -(LONGSIZE * 6) + LONG_S zero, a0, -(LONGSIZE * 5) + LONG_S zero, a0, -(LONGSIZE * 4) + LONG_S zero, a0, -(LONGSIZE * 3) + LONG_S zero, a0, -(LONGSIZE * 2) + LONG_S zero, a0, -(LONGSIZE * 1) + bne t0, a0, 1b - jr ra + jr ra SYM_FUNC_END(clear_page) EXPORT_SYMBOL(clear_page) .align 5 SYM_FUNC_START(copy_page) - lu12i.w t8, 1 << (PAGE_SHIFT - 12) - add.d t8, t8, a0 + lu12i.w t8, 1 << (PAGE_SHIFT - 12) + PTR_ADD t8, t8, a0 1: - ld.d t0, a1, 0 - ld.d t1, a1, 8 - ld.d t2, a1, 16 - ld.d t3, a1, 24 - ld.d t4, a1, 32 - ld.d t5, a1, 40 - ld.d t6, a1, 48 - ld.d t7, a1, 56 + LONG_L t0, a1, (LONGSIZE * 0) + LONG_L t1, a1, (LONGSIZE * 1) + LONG_L t2, a1, (LONGSIZE * 2) + LONG_L t3, a1, (LONGSIZE * 3) + LONG_L t4, a1, (LONGSIZE * 4) + LONG_L t5, a1, (LONGSIZE * 5) + LONG_L t6, a1, (LONGSIZE * 6) + LONG_L t7, a1, (LONGSIZE * 7) - st.d t0, a0, 0 - st.d t1, a0, 8 - ld.d t0, a1, 64 - ld.d t1, a1, 72 - st.d t2, a0, 16 - st.d t3, a0, 24 - ld.d t2, a1, 80 - ld.d t3, a1, 88 - st.d t4, a0, 32 - st.d t5, a0, 40 - ld.d t4, a1, 96 - ld.d t5, a1, 104 - st.d t6, a0, 48 - st.d t7, a0, 56 - ld.d t6, a1, 112 - ld.d t7, a1, 120 - addi.d a0, a0, 128 - addi.d a1, a1, 128 + LONG_S t0, a0, (LONGSIZE * 0) + LONG_S t1, a0, (LONGSIZE * 1) + LONG_L t0, a1, (LONGSIZE * 8) + LONG_L t1, a1, (LONGSIZE * 9) + LONG_S t2, a0, (LONGSIZE * 2) + LONG_S t3, a0, (LONGSIZE * 3) + LONG_L t2, a1, (LONGSIZE * 10) + LONG_L t3, a1, (LONGSIZE * 11) + LONG_S t4, a0, (LONGSIZE * 4) + LONG_S t5, a0, (LONGSIZE * 5) + LONG_L t4, a1, (LONGSIZE * 12) + LONG_L t5, a1, (LONGSIZE * 13) + LONG_S t6, a0, (LONGSIZE * 6) + LONG_S t7, a0, (LONGSIZE * 7) + LONG_L t6, a1, (LONGSIZE * 14) + LONG_L t7, a1, (LONGSIZE * 15) + PTR_ADDI a0, a0, (LONGSIZE * 16) + PTR_ADDI a1, a1, (LONGSIZE * 16) - st.d t0, a0, -64 - st.d t1, a0, -56 - st.d t2, a0, -48 - st.d t3, a0, -40 - st.d t4, a0, -32 - st.d t5, a0, -24 - st.d t6, a0, -16 - st.d t7, a0, -8 + LONG_S t0, a0, -(LONGSIZE * 8) + LONG_S t1, a0, -(LONGSIZE * 7) + LONG_S t2, a0, -(LONGSIZE * 6) + LONG_S t3, a0, -(LONGSIZE * 5) + LONG_S t4, a0, -(LONGSIZE * 4) + LONG_S t5, a0, -(LONGSIZE * 3) + LONG_S t6, a0, -(LONGSIZE * 2) + LONG_S t7, a0, -(LONGSIZE * 1) - bne t8, a0, 1b - jr ra + bne t8, a0, 1b + jr ra SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 3b427b319db21d..6a3c91b9cacdcf 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -229,11 +229,11 @@ static void setup_ptwalker(void) if (cpu_has_ptw) pwctl1 |= CSR_PWCTL1_PTW; - csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0); - csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1); - csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); - csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); - csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID); + csr_write(pwctl0, LOONGARCH_CSR_PWCTL0); + csr_write(pwctl1, LOONGARCH_CSR_PWCTL1); + csr_write((long)swapper_pg_dir, LOONGARCH_CSR_PGDH); + csr_write((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + csr_write((long)smp_processor_id(), LOONGARCH_CSR_TMID); } static void output_pgtable_bits_defines(void) @@ -251,8 +251,10 @@ static void output_pgtable_bits_defines(void) pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT); pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT); pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT); +#ifdef CONFIG_64BIT pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT); pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT); +#endif pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT); pr_debug("\n"); } diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index c08682a89c5824..84a881a339a7c5 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -11,10 +11,18 @@ #define INVTLB_ADDR_GFALSE_AND_ASID 5 -#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) -#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PGD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PUD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PMD_BITS (PAGE_SHIFT - PTRLOG) +#define PTRS_PER_PTE_BITS (PAGE_SHIFT - PTRLOG) + +#ifdef CONFIG_32BIT +#define PTE_LL ll.w +#define PTE_SC sc.w +#else +#define PTE_LL ll.d +#define PTE_SC sc.d +#endif .macro tlb_do_page_fault, write SYM_CODE_START(tlb_do_page_fault_\write) @@ -60,52 +68,61 @@ SYM_CODE_START(handle_tlb_load) vmalloc_done_load: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 + #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 + #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_load - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_load: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif andi ra, t0, _PAGE_PRESENT beqz ra, nopage_tlb_load ori t0, t0, _PAGE_VALID + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_load #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif + tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -115,30 +132,28 @@ smp_pgtable_change_load: csrrd ra, EXCEPTION_KS2 ertn -#ifdef CONFIG_64BIT vmalloc_load: la_abs t1, swapper_pg_dir b vmalloc_done_load -#endif /* This is the entry point of a huge page. */ tlb_huge_update_load: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif andi t0, ra, _PAGE_PRESENT beqz t0, nopage_tlb_load #ifdef CONFIG_SMP ori t0, ra, _PAGE_VALID - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_load ori t0, ra, _PAGE_VALID #else ori t0, ra, _PAGE_VALID - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -158,27 +173,27 @@ tlb_huge_update_load: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX tlbfill - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrrd t0, EXCEPTION_KS0 @@ -216,53 +231,71 @@ SYM_CODE_START(handle_tlb_store) vmalloc_done_store: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_store - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_store: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif + +#ifdef CONFIG_64BIT andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE +#else + PTR_LI ra, _PAGE_PRESENT | _PAGE_WRITE + and ra, ra, t0 + nor ra, ra, zero +#endif bnez ra, nopage_tlb_store +#ifdef CONFIG_64BIT ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_store #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -272,31 +305,42 @@ smp_pgtable_change_store: csrrd ra, EXCEPTION_KS2 ertn -#ifdef CONFIG_64BIT vmalloc_store: la_abs t1, swapper_pg_dir b vmalloc_done_store -#endif /* This is the entry point of a huge page. */ tlb_huge_update_store: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif + +#ifdef CONFIG_64BIT andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE +#else + PTR_LI t0, _PAGE_PRESENT | _PAGE_WRITE + and t0, t0, ra + nor t0, t0, zero +#endif + bnez t0, nopage_tlb_store #ifdef CONFIG_SMP ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_store ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else +#ifdef CONFIG_64BIT ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - st.d t0, t1, 0 +#else + PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -316,28 +360,28 @@ tlb_huge_update_store: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX tlbfill /* Reset default page size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrrd t0, EXCEPTION_KS0 @@ -375,52 +419,69 @@ SYM_CODE_START(handle_tlb_modify) vmalloc_done_modify: /* Get PGD offset in bytes */ - bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT - alsl.d t1, ra, t1, 3 +#ifdef CONFIG_32BIT + PTR_BSTRPICK ra, t0, 31, PGDIR_SHIFT +#else + PTR_BSTRPICK ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT +#endif + PTR_ALSL t1, ra, t1, _PGD_T_LOG2 + #if CONFIG_PGTABLE_LEVELS > 3 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif #if CONFIG_PGTABLE_LEVELS > 2 - ld.d t1, t1, 0 - bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT - alsl.d t1, ra, t1, 3 + PTR_L t1, t1, 0 + PTR_BSTRPICK ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + PTR_ALSL t1, ra, t1, _PMD_T_LOG2 #endif - ld.d ra, t1, 0 + PTR_L ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + PTR_ROTRI ra, ra, _PAGE_HUGE_SHIFT + 1 bltz ra, tlb_huge_update_modify - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) - bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT - alsl.d t1, t0, ra, _PTE_T_LOG2 + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) + PTR_BSTRPICK t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + PTR_ALSL t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_modify: - ll.d t0, t1, 0 + PTE_LL t0, t1, 0 #else - ld.d t0, t1, 0 + PTR_L t0, t1, 0 #endif +#ifdef CONFIG_64BIT andi ra, t0, _PAGE_WRITE +#else + PTR_LI ra, _PAGE_WRITE + and ra, t0, ra +#endif + beqz ra, nopage_tlb_modify +#ifdef CONFIG_64BIT ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) +#else + PTR_LI ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + #ifdef CONFIG_SMP - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, smp_pgtable_change_modify #else - st.d t0, t1, 0 + PTR_S t0, t1, 0 #endif tlbsrch - bstrins.d t1, zero, 3, 3 - ld.d t0, t1, 0 - ld.d t1, t1, 8 + PTR_BSTRINS t1, zero, _PTE_T_LOG2, _PTE_T_LOG2 + PTR_L t0, t1, 0 + PTR_L t1, t1, _PTE_T_SIZE csrwr t0, LOONGARCH_CSR_TLBELO0 csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr @@ -430,30 +491,40 @@ smp_pgtable_change_modify: csrrd ra, EXCEPTION_KS2 ertn -#ifdef CONFIG_64BIT vmalloc_modify: la_abs t1, swapper_pg_dir b vmalloc_done_modify -#endif /* This is the entry point of a huge page. */ tlb_huge_update_modify: #ifdef CONFIG_SMP - ll.d ra, t1, 0 + PTE_LL ra, t1, 0 #else - rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + PTR_ROTRI ra, ra, BITS_PER_LONG - (_PAGE_HUGE_SHIFT + 1) #endif + +#ifdef CONFIG_64BIT andi t0, ra, _PAGE_WRITE +#else + PTR_LI t0, _PAGE_WRITE + and t0, ra, t0 +#endif + beqz t0, nopage_tlb_modify #ifdef CONFIG_SMP ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - sc.d t0, t1, 0 + PTE_SC t0, t1, 0 beqz t0, tlb_huge_update_modify ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else +#ifdef CONFIG_64BIT ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) - st.d t0, t1, 0 +#else + PTR_LI t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + or t0, ra, t0 +#endif + PTR_S t0, t1, 0 #endif csrrd ra, LOONGARCH_CSR_ASID csrrd t1, LOONGARCH_CSR_BADV @@ -473,28 +544,28 @@ tlb_huge_update_modify: xori t0, t0, _PAGE_HUGE lu12i.w t1, _PAGE_HGLOBAL >> 12 and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + PTR_SRLI t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) or t0, t0, t1 move ra, t0 csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 + PTR_ADDI t1, zero, 1 + PTR_SLLI t1, t1, (HPAGE_SHIFT - 1) + PTR_ADD t0, t0, t1 csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX tlbfill /* Reset default page size */ - addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) - addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) + PTR_LI t0, (CSR_TLBIDX_PS >> 16) << 16 + PTR_LI t1, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrrd t0, EXCEPTION_KS0 @@ -517,6 +588,44 @@ SYM_CODE_START(handle_tlb_modify_ptw) jr t0 SYM_CODE_END(handle_tlb_modify_ptw) +#ifdef CONFIG_32BIT +SYM_CODE_START(handle_tlb_refill) + UNWIND_HINT_UNDEFINED + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 + li.w ra, 0x1fffffff + + csrrd t0, LOONGARCH_CSR_PGD + csrrd t1, LOONGARCH_CSR_TLBRBADV + srli.w t1, t1, PGDIR_SHIFT + slli.w t1, t1, 0x2 + add.w t0, t0, t1 + and t0, t0, ra + + ld.w t0, t0, 0 + csrrd t1, LOONGARCH_CSR_TLBRBADV + slli.w t1, t1, (32 - PGDIR_SHIFT) + srli.w t1, t1, (32 - PGDIR_SHIFT + PAGE_SHIFT + 1) + slli.w t1, t1, (0x2 + 1) + add.w t0, t0, t1 + and t0, t0, ra + + ld.w t1, t0, 0x0 + csrwr t1, LOONGARCH_CSR_TLBRELO0 + + ld.w t1, t0, 0x4 + csrwr t1, LOONGARCH_CSR_TLBRELO1 + + tlbfill + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn +SYM_CODE_END(handle_tlb_refill) +#endif + +#ifdef CONFIG_64BIT SYM_CODE_START(handle_tlb_refill) UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_TLBRSAVE @@ -534,3 +643,4 @@ SYM_CODE_START(handle_tlb_refill) csrrd t0, LOONGARCH_CSR_TLBRSAVE ertn SYM_CODE_END(handle_tlb_refill) +#endif diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c index d9fc5d520b3778..d923295ab8c665 100644 --- a/arch/loongarch/pci/pci.c +++ b/arch/loongarch/pci/pci.c @@ -14,6 +14,7 @@ #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 #define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06 #define PCI_DEVICE_ID_LOONGSON_DC2 0x7a36 +#define PCI_DEVICE_ID_LOONGSON_DC3 0x7a46 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val) @@ -97,3 +98,4 @@ static void pci_fixup_vgadev(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC1, pci_fixup_vgadev); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC2, pci_fixup_vgadev); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DC3, pci_fixup_vgadev); diff --git a/arch/loongarch/power/hibernate.c b/arch/loongarch/power/hibernate.c index e7b7346592cb2a..817270410ef987 100644 --- a/arch/loongarch/power/hibernate.c +++ b/arch/loongarch/power/hibernate.c @@ -10,7 +10,7 @@ static u32 saved_crmd; static u32 saved_prmd; static u32 saved_euen; static u32 saved_ecfg; -static u64 saved_pcpu_base; +static unsigned long saved_pcpu_base; struct pt_regs saved_regs; void save_processor_state(void) @@ -20,7 +20,7 @@ void save_processor_state(void) saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); saved_euen = csr_read32(LOONGARCH_CSR_EUEN); saved_ecfg = csr_read32(LOONGARCH_CSR_ECFG); - saved_pcpu_base = csr_read64(PERCPU_BASE_KS); + saved_pcpu_base = csr_read(PERCPU_BASE_KS); if (is_fpu_owner()) save_fp(current); @@ -33,7 +33,7 @@ void restore_processor_state(void) csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); csr_write32(saved_euen, LOONGARCH_CSR_EUEN); csr_write32(saved_ecfg, LOONGARCH_CSR_ECFG); - csr_write64(saved_pcpu_base, PERCPU_BASE_KS); + csr_write(saved_pcpu_base, PERCPU_BASE_KS); if (is_fpu_owner()) restore_fp(current); diff --git a/arch/loongarch/power/platform.c b/arch/loongarch/power/platform.c index 5bbdb9fd76e5d0..faa4fe4e7461ce 100644 --- a/arch/loongarch/power/platform.c +++ b/arch/loongarch/power/platform.c @@ -72,10 +72,10 @@ static int __init loongson3_acpi_suspend_init(void) status = acpi_evaluate_integer(NULL, "\\SADR", NULL, &suspend_addr); if (ACPI_FAILURE(status) || !suspend_addr) { pr_info("ACPI S3 supported with hardware register default\n"); - loongson_sysconf.suspend_addr = (u64)default_suspend_addr; + loongson_sysconf.suspend_addr = (unsigned long)default_suspend_addr; } else { pr_info("ACPI S3 supported with Loongson ACPI SADR extension\n"); - loongson_sysconf.suspend_addr = (u64)phys_to_virt(PHYSADDR(suspend_addr)); + loongson_sysconf.suspend_addr = (unsigned long)phys_to_virt(PHYSADDR(suspend_addr)); } #endif return 0; diff --git a/arch/loongarch/power/suspend.c b/arch/loongarch/power/suspend.c index c9e594925c4737..7e3d79f8bbd4ef 100644 --- a/arch/loongarch/power/suspend.c +++ b/arch/loongarch/power/suspend.c @@ -20,24 +20,24 @@ u64 loongarch_suspend_addr; struct saved_registers { u32 ecfg; u32 euen; - u64 pgd; - u64 kpgd; u32 pwctl0; u32 pwctl1; - u64 pcpu_base; + unsigned long pgd; + unsigned long kpgd; + unsigned long pcpu_base; }; static struct saved_registers saved_regs; void loongarch_common_suspend(void) { save_counter(); - saved_regs.pgd = csr_read64(LOONGARCH_CSR_PGDL); - saved_regs.kpgd = csr_read64(LOONGARCH_CSR_PGDH); + saved_regs.pgd = csr_read(LOONGARCH_CSR_PGDL); + saved_regs.kpgd = csr_read(LOONGARCH_CSR_PGDH); saved_regs.pwctl0 = csr_read32(LOONGARCH_CSR_PWCTL0); saved_regs.pwctl1 = csr_read32(LOONGARCH_CSR_PWCTL1); saved_regs.ecfg = csr_read32(LOONGARCH_CSR_ECFG); saved_regs.euen = csr_read32(LOONGARCH_CSR_EUEN); - saved_regs.pcpu_base = csr_read64(PERCPU_BASE_KS); + saved_regs.pcpu_base = csr_read(PERCPU_BASE_KS); loongarch_suspend_addr = loongson_sysconf.suspend_addr; } @@ -46,17 +46,17 @@ void loongarch_common_resume(void) { sync_counter(); local_flush_tlb_all(); - csr_write64(eentry, LOONGARCH_CSR_EENTRY); - csr_write64(eentry, LOONGARCH_CSR_MERRENTRY); - csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY); + csr_write(eentry, LOONGARCH_CSR_EENTRY); + csr_write(eentry, LOONGARCH_CSR_MERRENTRY); + csr_write(tlbrentry, LOONGARCH_CSR_TLBRENTRY); - csr_write64(saved_regs.pgd, LOONGARCH_CSR_PGDL); - csr_write64(saved_regs.kpgd, LOONGARCH_CSR_PGDH); + csr_write(saved_regs.pgd, LOONGARCH_CSR_PGDL); + csr_write(saved_regs.kpgd, LOONGARCH_CSR_PGDH); csr_write32(saved_regs.pwctl0, LOONGARCH_CSR_PWCTL0); csr_write32(saved_regs.pwctl1, LOONGARCH_CSR_PWCTL1); csr_write32(saved_regs.ecfg, LOONGARCH_CSR_ECFG); csr_write32(saved_regs.euen, LOONGARCH_CSR_EUEN); - csr_write64(saved_regs.pcpu_base, PERCPU_BASE_KS); + csr_write(saved_regs.pcpu_base, PERCPU_BASE_KS); } int loongarch_acpi_suspend(void) diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index df0865df26fae5..c8119ad5fb2c8a 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -14,41 +14,41 @@ /* preparatory stuff */ .macro SETUP_SLEEP - addi.d sp, sp, -PT_SIZE - st.d $r1, sp, PT_R1 - st.d $r2, sp, PT_R2 - st.d $r3, sp, PT_R3 - st.d $r4, sp, PT_R4 - st.d $r21, sp, PT_R21 - st.d $r22, sp, PT_R22 - st.d $r23, sp, PT_R23 - st.d $r24, sp, PT_R24 - st.d $r25, sp, PT_R25 - st.d $r26, sp, PT_R26 - st.d $r27, sp, PT_R27 - st.d $r28, sp, PT_R28 - st.d $r29, sp, PT_R29 - st.d $r30, sp, PT_R30 - st.d $r31, sp, PT_R31 + PTR_ADDI sp, sp, -PT_SIZE + REG_S $r1, sp, PT_R1 + REG_S $r2, sp, PT_R2 + REG_S $r3, sp, PT_R3 + REG_S $r4, sp, PT_R4 + REG_S $r21, sp, PT_R21 + REG_S $r22, sp, PT_R22 + REG_S $r23, sp, PT_R23 + REG_S $r24, sp, PT_R24 + REG_S $r25, sp, PT_R25 + REG_S $r26, sp, PT_R26 + REG_S $r27, sp, PT_R27 + REG_S $r28, sp, PT_R28 + REG_S $r29, sp, PT_R29 + REG_S $r30, sp, PT_R30 + REG_S $r31, sp, PT_R31 .endm .macro SETUP_WAKEUP - ld.d $r1, sp, PT_R1 - ld.d $r2, sp, PT_R2 - ld.d $r3, sp, PT_R3 - ld.d $r4, sp, PT_R4 - ld.d $r21, sp, PT_R21 - ld.d $r22, sp, PT_R22 - ld.d $r23, sp, PT_R23 - ld.d $r24, sp, PT_R24 - ld.d $r25, sp, PT_R25 - ld.d $r26, sp, PT_R26 - ld.d $r27, sp, PT_R27 - ld.d $r28, sp, PT_R28 - ld.d $r29, sp, PT_R29 - ld.d $r30, sp, PT_R30 - ld.d $r31, sp, PT_R31 - addi.d sp, sp, PT_SIZE + REG_L $r1, sp, PT_R1 + REG_L $r2, sp, PT_R2 + REG_L $r3, sp, PT_R3 + REG_L $r4, sp, PT_R4 + REG_L $r21, sp, PT_R21 + REG_L $r22, sp, PT_R22 + REG_L $r23, sp, PT_R23 + REG_L $r24, sp, PT_R24 + REG_L $r25, sp, PT_R25 + REG_L $r26, sp, PT_R26 + REG_L $r27, sp, PT_R27 + REG_L $r28, sp, PT_R28 + REG_L $r29, sp, PT_R29 + REG_L $r30, sp, PT_R30 + REG_L $r31, sp, PT_R31 + PTR_ADDI sp, sp, PT_SIZE .endm .text @@ -59,15 +59,15 @@ SYM_FUNC_START(loongarch_suspend_enter) SETUP_SLEEP la.pcrel t0, acpi_saved_sp - st.d sp, t0, 0 + REG_S sp, t0, 0 bl __flush_cache_all /* Pass RA and SP to BIOS */ - addi.d a1, sp, 0 + PTR_ADDI a1, sp, 0 la.pcrel a0, loongarch_wakeup_start la.pcrel t0, loongarch_suspend_addr - ld.d t0, t0, 0 + REG_L t0, t0, 0 jirl ra, t0, 0 /* Call BIOS's STR sleep routine */ /* @@ -83,7 +83,7 @@ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) csrwr t0, LOONGARCH_CSR_CRMD la.pcrel t0, acpi_saved_sp - ld.d sp, t0, 0 + REG_L sp, t0, 0 SETUP_WAKEUP jr ra diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index c0cc3ca5da9f4b..520f1513f07ddb 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -4,8 +4,9 @@ # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile.include -obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \ +obj-vdso-y := elf.o vgetcpu.o vgetrandom.o \ vgetrandom-chacha.o sigreturn.o +obj-vdso-$(CONFIG_GENERIC_GETTIMEOFDAY) += vgettimeofday.o # Common compiler flags between ABIs. ccflags-vdso := \ @@ -16,6 +17,10 @@ ccflags-vdso := \ $(CLANG_FLAGS) \ -D__VDSO__ +ifdef CONFIG_32BIT +ccflags-vdso += -DBUILD_VDSO32 +endif + cflags-vdso := $(ccflags-vdso) \ -isystem $(shell $(CC) -print-file-name=include) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S index 8ff98649994750..ac537e02beb183 100644 --- a/arch/loongarch/vdso/vdso.lds.S +++ b/arch/loongarch/vdso/vdso.lds.S @@ -7,8 +7,6 @@ #include <generated/asm-offsets.h> #include <vdso/datapage.h> -OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch") - OUTPUT_ARCH(loongarch) SECTIONS @@ -63,9 +61,11 @@ VERSION LINUX_5.10 { global: __vdso_getcpu; +#ifdef CONFIG_GENERIC_GETTIMEOFDAY __vdso_clock_getres; __vdso_clock_gettime; __vdso_gettimeofday; +#endif __vdso_getrandom; __vdso_rt_sigreturn; local: *; diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c index 5301cd9d0f839e..73af49242ecdc8 100644 --- a/arch/loongarch/vdso/vgetcpu.c +++ b/arch/loongarch/vdso/vgetcpu.c @@ -10,11 +10,19 @@ static __always_inline int read_cpu_id(void) { int cpu_id; +#ifdef CONFIG_64BIT __asm__ __volatile__( " rdtime.d $zero, %0\n" : "=r" (cpu_id) : : "memory"); +#else + __asm__ __volatile__( + " rdtimel.w $zero, %0\n" + : "=r" (cpu_id) + : + : "memory"); +#endif return cpu_id; } diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig index fda9971bdd8d9c..adb9fd62ddb0da 100644 --- a/arch/mips/configs/gcw0_defconfig +++ b/arch/mips/configs/gcw0_defconfig @@ -79,7 +79,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_PROC_FS is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_SPI is not set diff --git a/arch/mips/configs/loongson1_defconfig b/arch/mips/configs/loongson1_defconfig index 02d29110f7024b..1d9781ff969861 100644 --- a/arch/mips/configs/loongson1_defconfig +++ b/arch/mips/configs/loongson1_defconfig @@ -119,7 +119,6 @@ CONFIG_WATCHDOG_SYSFS=y CONFIG_LOONGSON1_WDT=y CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_MIPS is not set # CONFIG_SND_USB is not set diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index 5f5b0254d75e78..a1bb0792f6eb1e 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -81,7 +81,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_LINUX_CLUT224 is not set CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_SPI is not set diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig index 03a7bbe28a532d..49c709d663bebc 100644 --- a/arch/mips/configs/rbtx49xx_defconfig +++ b/arch/mips/configs/rbtx49xx_defconfig @@ -53,7 +53,6 @@ CONFIG_TXX9_WDT=m # CONFIG_VGA_ARB is not set CONFIG_SOUND=m CONFIG_SND=m -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_PCI is not set diff --git a/arch/mips/configs/rs90_defconfig b/arch/mips/configs/rs90_defconfig index a53dd66e9b8644..8382d535e6dc10 100644 --- a/arch/mips/configs/rs90_defconfig +++ b/arch/mips/configs/rs90_defconfig @@ -105,7 +105,6 @@ CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y # CONFIG_SND_PCM_TIMER is not set -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_PROC_FS is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_MIPS is not set diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config index 8aff8321739778..2b19c20a9a2c48 100644 --- a/arch/powerpc/configs/85xx-hw.config +++ b/arch/powerpc/configs/85xx-hw.config @@ -117,7 +117,6 @@ CONFIG_SND_INTEL8X0=y CONFIG_SND_POWERPC_SOC=y # CONFIG_SND_PPC is not set CONFIG_SND_SOC=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_USB is not set CONFIG_SND=y CONFIG_SOUND=y diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config index e7bd265fae5a4a..07f30ab881e59d 100644 --- a/arch/powerpc/configs/86xx-hw.config +++ b/arch/powerpc/configs/86xx-hw.config @@ -80,7 +80,6 @@ CONFIG_SERIO_LIBPS2=y CONFIG_SND_INTEL8X0=y CONFIG_SND_MIXER_OSS=y CONFIG_SND_PCM_OSS=y -# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND=y CONFIG_SOUND=y CONFIG_ULI526X=y diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index c0fe5e76604a04..617650cea56a92 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -75,7 +75,6 @@ CONFIG_FB_SM501=m CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_PCI is not set # CONFIG_SND_PPC is not set diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index b082c1fae13c94..787d707f64a428 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -726,7 +726,6 @@ CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_DYNAMIC_MINORS=y -# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y CONFIG_SND_DEBUG_VERBOSE=y diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 502133979e2289..4cbbe2ee58aba7 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -532,6 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, newpage); + __count_vm_event(BALLOON_MIGRATE); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, static void cmm_balloon_compaction_init(void) { - balloon_devinfo_init(&b_dev_info); b_dev_info.migratepage = cmm_migratepage; } #else /* CONFIG_BALLOON_COMPACTION */ @@ -572,6 +572,7 @@ static int cmm_init(void) if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate) return -EOPNOTSUPP; + balloon_devinfo_init(&b_dev_info); cmm_balloon_compaction_init(); rc = register_oom_notifier(&cmm_oom_nb); diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index a75d6325607b4c..14c5acb935e941 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -4,7 +4,8 @@ menu "Accelerated Cryptographic Algorithms for CPU (riscv)" config CRYPTO_AES_RISCV64 tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTS" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_ALGAPI select CRYPTO_LIB_AES select CRYPTO_SKCIPHER @@ -20,7 +21,8 @@ config CRYPTO_AES_RISCV64 config CRYPTO_GHASH_RISCV64 tristate "Hash functions: GHASH" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_GCM help GCM GHASH function (NIST SP 800-38D) @@ -30,7 +32,8 @@ config CRYPTO_GHASH_RISCV64 config CRYPTO_SM3_RISCV64 tristate "Hash functions: SM3 (ShangMi 3)" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_HASH select CRYPTO_LIB_SM3 help @@ -42,7 +45,8 @@ config CRYPTO_SM3_RISCV64 config CRYPTO_SM4_RISCV64 tristate "Ciphers: SM4 (ShangMi 4)" - depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + depends on 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS select CRYPTO_ALGAPI select CRYPTO_SM4 help diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index abeae220606a3a..905fac10728457 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -79,7 +79,6 @@ CONFIG_FB_TILEBLITTING=y CONFIG_FB_SH_MOBILE_LCDC=m CONFIG_SOUND=y CONFIG_SND=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_SOC=y diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig index 9e3a54936f76f1..8ca46d704c8ba5 100644 --- a/arch/sh/configs/se7724_defconfig +++ b/arch/sh/configs/se7724_defconfig @@ -83,7 +83,6 @@ CONFIG_LOGO=y # CONFIG_LOGO_SUPERH_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=m -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_SPI is not set # CONFIG_SND_SUPERH is not set diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index eb63aa61b0465c..5468cc53cddb47 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -93,7 +93,6 @@ CONFIG_SND_PCM_OSS=y CONFIG_SND_SEQUENCER_OSS=y CONFIG_SND_HRTIMER=y CONFIG_SND_DYNAMIC_MINORS=y -# CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index e8b6af199c738e..9293ce50574dad 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int ret = amd_uncore_event_init(event); - if (ret || pmu_version < 2) - return ret; - hwc->config = event->attr.config & (pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB : AMD64_RAW_EVENT_MASK_NB); - return 0; + return ret; } static int amd_uncore_df_add(struct perf_event *event, int flags) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 853fe073bab301..bdf3f0d0fe2167 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) if (!test_bit(bit, cpuc->active_mask)) continue; + /* Event may have already been cleared: */ + if (!event) + continue; /* * There may be unprocessed PEBS records in the PEBS buffer, diff --git a/arch/x86/hyperv/.gitignore b/arch/x86/hyperv/.gitignore new file mode 100644 index 00000000000000..333615d993b56c --- /dev/null +++ b/arch/x86/hyperv/.gitignore @@ -0,0 +1 @@ +mshv-asm-offsets.h diff --git a/block/bio.c b/block/bio.c index fa5ff36b443f91..e726c0e280a8dc 100644 --- a/block/bio.c +++ b/block/bio.c @@ -321,9 +321,13 @@ static struct bio *__bio_chain_endio(struct bio *bio) return parent; } +/* + * This function should only be used as a flag and must never be called. + * If execution reaches here, it indicates a serious programming error. + */ static void bio_chain_endio(struct bio *bio) { - bio_endio(__bio_chain_endio(bio)); + BUG(); } /** diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index b6dbc976759613..fb018fffffdcc5 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -199,6 +199,7 @@ static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev, if (blk_can_dma_map_iova(req, dma_dev) && dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len)) return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec); + memset(state, 0, sizeof(*state)); return blk_dma_map_direct(req, dma_dev, iter, &vec); } diff --git a/block/blk-mq.c b/block/blk-mq.c index bd8b11c472a253..1978eef95dca3f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -336,12 +336,12 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; - mutex_lock(&set->tag_list_lock); - list_for_each_entry(q, &set->tag_list, tag_set_list) { + rcu_read_lock(); + list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_quiesce_queue_nowait(q); } - mutex_unlock(&set->tag_list_lock); + rcu_read_unlock(); blk_mq_wait_quiesce_done(set); } @@ -351,12 +351,12 @@ void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; - mutex_lock(&set->tag_list_lock); - list_for_each_entry(q, &set->tag_list, tag_set_list) { + rcu_read_lock(); + list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_unquiesce_queue(q); } - mutex_unlock(&set->tag_list_lock); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); @@ -4311,7 +4311,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); - list_del(&q->tag_set_list); + list_del_rcu(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; @@ -4319,7 +4319,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_shared(set, false); } mutex_unlock(&set->tag_list_lock); - INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, @@ -4338,7 +4337,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, } if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); - list_add_tail(&q->tag_set_list, &set->tag_list); + list_add_tail_rcu(&q->tag_set_list, &set->tag_list); mutex_unlock(&set->tag_list_lock); } @@ -5193,27 +5192,19 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags) { - long state = get_current_state(); int ret; do { ret = q->mq_ops->poll(hctx, iob); - if (ret > 0) { - __set_current_state(TASK_RUNNING); + if (ret > 0) return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - if (task_is_running(current)) + if (task_sigpending(current)) return 1; - if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); - __set_current_state(TASK_RUNNING); return 0; } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 394d8d74bba9b5..1c54678fae6b97 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -2100,7 +2100,7 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, * we have a zone write plug for such zone if the device has a zone * write plug hash table. */ - if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) + if (!disk->zone_wplugs_hash) return 0; wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone); diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 77d6944489905b..858320b6ebb7ef 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -316,9 +316,6 @@ config BLK_DEV_RBD tristate "Rados block device (RBD)" depends on INET && BLOCK select CEPH_LIB - select CRC32 - select CRYPTO_AES - select CRYPTO help Say Y here if you want include the Rados block device, which stripes a block device over objects stored in the Ceph distributed object diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2c715df63f23f4..df9831783a1339 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -926,6 +926,7 @@ static size_t ublk_copy_user_pages(const struct request *req, size_t done = 0; rq_for_each_segment(bv, req, iter) { + unsigned len; void *bv_buf; size_t copied; @@ -934,18 +935,17 @@ static size_t ublk_copy_user_pages(const struct request *req, continue; } - bv.bv_offset += offset; - bv.bv_len -= offset; - bv_buf = bvec_kmap_local(&bv); + len = bv.bv_len - offset; + bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset; if (dir == ITER_DEST) - copied = copy_to_iter(bv_buf, bv.bv_len, uiter); + copied = copy_to_iter(bv_buf, len, uiter); else - copied = copy_from_iter(bv_buf, bv.bv_len, uiter); + copied = copy_from_iter(bv_buf, len, uiter); kunmap_local(bv_buf); done += copied; - if (copied < bv.bv_len) + if (copied < len) break; offset = 0; @@ -3673,6 +3673,19 @@ exit: return ret; } +static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op) +{ + switch (_IOC_NR(cmd_op)) { + case UBLK_CMD_GET_QUEUE_AFFINITY: + case UBLK_CMD_GET_DEV_INFO: + case UBLK_CMD_GET_DEV_INFO2: + case _IOC_NR(UBLK_U_CMD_GET_FEATURES): + return false; + default: + return true; + } +} + static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { @@ -3681,7 +3694,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, u32 cmd_op = cmd->cmd_op; int ret = -EINVAL; - if (issue_flags & IO_URING_F_NONBLOCK) + if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) && + issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ublk_ctrl_cmd_dump(cmd); diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index 3782d0a187d1f5..9825f5218137f2 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -72,10 +72,10 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, desc_ver, priv.runtime_map); /* Config Direct Mapping */ - csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); - csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); - csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); - csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); + csr_write(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); + csr_write(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + csr_write(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); + csr_write(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index c74da29253e810..bd185482a7fdf1 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -737,7 +737,6 @@ config GPIO_TB10X depends on ARC_PLAT_TB10X || COMPILE_TEST select GPIO_GENERIC select GENERIC_IRQ_CHIP - select OF_GPIO config GPIO_TEGRA tristate "NVIDIA Tegra GPIO support" @@ -1568,6 +1567,7 @@ config GPIO_QIXIS_FPGA tristate "NXP QIXIS FPGA GPIO support" depends on MFD_SIMPLE_MFD_I2C || COMPILE_TEST select GPIO_REGMAP + select REGMAP_MMIO help This enables support for the GPIOs found in the QIXIS FPGA which is integrated on some NXP Layerscape boards such as LX2160ARDB and diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index b3a26a06260bbb..5daf962b03231e 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -231,7 +231,7 @@ static int gpio_mmio_set(struct gpio_chip *gc, unsigned int gpio, int val) struct gpio_generic_chip *chip = to_gpio_generic_chip(gc); unsigned long mask = gpio_mmio_line2mask(gc, gpio); - guard(raw_spinlock)(&chip->lock); + guard(raw_spinlock_irqsave)(&chip->lock); if (val) chip->sdata |= mask; @@ -262,7 +262,7 @@ static int gpio_mmio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) struct gpio_generic_chip *chip = to_gpio_generic_chip(gc); unsigned long mask = gpio_mmio_line2mask(gc, gpio); - guard(raw_spinlock)(&chip->lock); + guard(raw_spinlock_irqsave)(&chip->lock); if (val) chip->sdata |= mask; @@ -302,7 +302,7 @@ static void gpio_mmio_set_multiple_single_reg(struct gpio_chip *gc, struct gpio_generic_chip *chip = to_gpio_generic_chip(gc); unsigned long set_mask, clear_mask; - guard(raw_spinlock)(&chip->lock); + guard(raw_spinlock_irqsave)(&chip->lock); gpio_mmio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask); @@ -391,7 +391,7 @@ static int gpio_mmio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct gpio_generic_chip *chip = to_gpio_generic_chip(gc); - scoped_guard(raw_spinlock, &chip->lock) { + scoped_guard(raw_spinlock_irqsave, &chip->lock) { chip->sdir &= ~gpio_mmio_line2mask(gc, gpio); if (chip->reg_dir_in) @@ -431,7 +431,7 @@ static void gpio_mmio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct gpio_generic_chip *chip = to_gpio_generic_chip(gc); - guard(raw_spinlock)(&chip->lock); + guard(raw_spinlock_irqsave)(&chip->lock); chip->sdir |= gpio_mmio_line2mask(gc, gpio); diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index e5ba38e65c1002..9581bd5ca94711 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -338,7 +338,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config config->regmap_irq_line, config->regmap_irq_flags, 0, config->regmap_irq_chip, &gpio->irq_chip_data); if (ret) - goto err_free_bitmap; + goto err_remove_gpiochip; irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); } else diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c index 7b95d1b0336149..a0116f004975ae 100644 --- a/drivers/gpio/gpiolib-acpi-quirks.c +++ b/drivers/gpio/gpiolib-acpi-quirks.c @@ -370,6 +370,28 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { .ignore_wake = "ASCP1A00:00@8", }, }, + { + /* + * Spurious wakeups, likely from touchpad controller + * Dell Precision 7780 + * Found in BIOS 1.24.1 + * + * Found in touchpad firmware, installed by Dell Touchpad Firmware Update Utility version 1160.4196.9, A01 + * ( Dell-Touchpad-Firmware-Update-Utility_VYGNN_WIN64_1160.4196.9_A00.EXE ), + * released on 11 Jul 2024 + * + * https://lore.kernel.org/linux-i2c/197ae95ffd8.dc819e60457077.7692120488609091556@zohomail.com/ + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Precision"), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7780"), + DMI_MATCH(DMI_BOARD_NAME, "0C6JVW"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_wake = "VEN_0488:00@355", + }, + }, {} /* Terminating entry */ }; diff --git a/drivers/gpio/gpiolib-shared.c b/drivers/gpio/gpiolib-shared.c index 8bdd107b1ad1d2..ba4b718d40a087 100644 --- a/drivers/gpio/gpiolib-shared.c +++ b/drivers/gpio/gpiolib-shared.c @@ -36,6 +36,8 @@ struct gpio_shared_ref { enum gpiod_flags flags; char *con_id; int dev_id; + /* Protects the auxiliary device struct and the lookup table. */ + struct mutex lock; struct auxiliary_device adev; struct gpiod_lookup_table *lookup; }; @@ -49,6 +51,7 @@ struct gpio_shared_entry { unsigned int offset; /* Index in the property value array. */ size_t index; + /* Synchronizes the modification of shared_desc. */ struct mutex lock; struct gpio_shared_desc *shared_desc; struct kref ref; @@ -56,7 +59,6 @@ struct gpio_shared_entry { }; static LIST_HEAD(gpio_shared_list); -static DEFINE_MUTEX(gpio_shared_lock); static DEFINE_IDA(gpio_shared_ida); #if IS_ENABLED(CONFIG_OF) @@ -77,6 +79,10 @@ gpio_shared_find_entry(struct fwnode_handle *controller_node, /* Handle all special nodes that we should ignore. */ static bool gpio_shared_of_node_ignore(struct device_node *node) { + /* Ignore disabled devices. */ + if (!of_device_is_available(node)) + return true; + /* * __symbols__ is a special, internal node and should not be considered * when scanning for shared GPIOs. @@ -183,6 +189,7 @@ static int gpio_shared_of_traverse(struct device_node *curr) ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr)); ref->flags = args.args[1]; + mutex_init(&ref->lock); if (strends(prop->name, "gpios")) suffix = "-gpios"; @@ -254,7 +261,7 @@ static int gpio_shared_make_adev(struct gpio_device *gdev, struct auxiliary_device *adev = &ref->adev; int ret; - lockdep_assert_held(&gpio_shared_lock); + guard(mutex)(&ref->lock); memset(adev, 0, sizeof(*adev)); @@ -369,14 +376,14 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags) if (!lookup) return -ENOMEM; - guard(mutex)(&gpio_shared_lock); - list_for_each_entry(entry, &gpio_shared_list, list) { list_for_each_entry(ref, &entry->refs, list) { if (!device_match_fwnode(consumer, ref->fwnode) && !gpio_shared_dev_is_reset_gpio(consumer, entry, ref)) continue; + guard(mutex)(&ref->lock); + /* We've already done that on a previous request. */ if (ref->lookup) return 0; @@ -395,7 +402,8 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags) lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0, ref->con_id, lflags); - gpiod_add_lookup_table(no_free_ptr(lookup)); + ref->lookup = no_free_ptr(lookup); + gpiod_add_lookup_table(ref->lookup); return 0; } @@ -408,10 +416,8 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags) static void gpio_shared_remove_adev(struct auxiliary_device *adev) { - lockdep_assert_held(&gpio_shared_lock); - - auxiliary_device_uninit(adev); auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); } int gpio_device_setup_shared(struct gpio_device *gdev) @@ -421,8 +427,6 @@ int gpio_device_setup_shared(struct gpio_device *gdev) unsigned long *flags; int ret; - guard(mutex)(&gpio_shared_lock); - list_for_each_entry(entry, &gpio_shared_list, list) { list_for_each_entry(ref, &entry->refs, list) { if (gdev->dev.parent == &ref->adev.dev) { @@ -479,19 +483,32 @@ void gpio_device_teardown_shared(struct gpio_device *gdev) struct gpio_shared_entry *entry; struct gpio_shared_ref *ref; - guard(mutex)(&gpio_shared_lock); - list_for_each_entry(entry, &gpio_shared_list, list) { if (!device_match_fwnode(&gdev->dev, entry->fwnode)) continue; + /* + * For some reason if we call synchronize_srcu() in GPIO core, + * descent here and take this mutex and then recursively call + * synchronize_srcu() again from gpiochip_remove() (which is + * totally fine) called after gpio_shared_remove_adev(), + * lockdep prints a false positive deadlock splat. Disable + * lockdep here. + */ + lockdep_off(); list_for_each_entry(ref, &entry->refs, list) { - gpiod_remove_lookup_table(ref->lookup); - kfree(ref->lookup->table[0].key); - kfree(ref->lookup); - ref->lookup = NULL; + guard(mutex)(&ref->lock); + + if (ref->lookup) { + gpiod_remove_lookup_table(ref->lookup); + kfree(ref->lookup->table[0].key); + kfree(ref->lookup); + ref->lookup = NULL; + } + gpio_shared_remove_adev(&ref->adev); } + lockdep_on(); } } @@ -515,8 +532,6 @@ static void gpiod_shared_put(void *data) { struct gpio_shared_entry *entry = data; - lockdep_assert_not_held(&gpio_shared_lock); - kref_put(&entry->ref, gpio_shared_release); } @@ -554,8 +569,6 @@ struct gpio_shared_desc *devm_gpiod_shared_get(struct device *dev) struct gpio_shared_entry *entry; int ret; - lockdep_assert_not_held(&gpio_shared_lock); - entry = dev_get_platdata(dev); if (WARN_ON(!entry)) /* Programmer bug */ @@ -590,6 +603,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_shared_get); static void gpio_shared_drop_ref(struct gpio_shared_ref *ref) { list_del(&ref->list); + mutex_destroy(&ref->lock); kfree(ref->con_id); ida_free(&gpio_shared_ida, ref->dev_id); fwnode_handle_put(ref->fwnode); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a67285118c37ba..c362d4dfb5bbb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1069,7 +1069,9 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, } /* Prepare a TLB flush fence to be attached to PTs */ - if (!params->unlocked) { + if (!params->unlocked && + /* SI doesn't support pasid or KIQ/MES */ + params->adev->family > AMDGPU_FAMILY_SI) { amdgpu_vm_tlb_fence_create(params->adev, vm, fence); /* Makes sure no PD/PT is freed before the flush */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index b107ee80e4728a..1f6a22983c0dd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -265,6 +265,8 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 0320163b6e740f..f98c735b2905f3 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -3644,14 +3644,18 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { }; static const uint32_t cwsr_trap_gfx12_hex[] = { - 0xbfa00001, 0xbfa002a2, - 0xb0804009, 0xb8f8f804, + 0xbfa00001, 0xbfa002b2, + 0xb0804009, 0xb8eef81a, + 0xbf880000, 0xb980081a, + 0x00000000, 0xb8f8f804, + 0x9177ff77, 0x0c000000, + 0x846e9a6e, 0x8c776e77, 0x9178ff78, 0x00008c00, 0xb8fbf811, 0x8b6eff78, 0x00004000, 0xbfa10008, 0x8b6eff7b, 0x00000080, 0xbfa20018, 0x8b6ea07b, - 0xbfa20042, 0xbf830010, + 0xbfa2004a, 0xbf830010, 0xb8fbf811, 0xbfa0fffb, 0x8b6eff7b, 0x00000bd0, 0xbfa20010, 0xb8eef812, @@ -3662,28 +3666,32 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0xf0000000, 0xbfa20005, 0x8b6fff6f, 0x00000200, 0xbfa20002, 0x8b6ea07b, - 0xbfa2002c, 0xbefa4d82, + 0xbfa20034, 0xbefa4d82, 0xbf8a0000, 0x84fa887a, 0xbf0d8f7b, 0xbfa10002, 0x8c7bff7b, 0xffff0000, - 0xf4601bbd, 0xf8000010, - 0xbf8a0000, 0x846e976e, - 0x9177ff77, 0x00800000, - 0x8c776e77, 0xf4603bbd, - 0xf8000000, 0xbf8a0000, - 0xf4603ebd, 0xf8000008, - 0xbf8a0000, 0x8bee6e6e, - 0xbfa10001, 0xbe80486e, - 0x8b6eff6d, 0xf0000000, - 0xbfa20009, 0xb8eef811, - 0x8b6eff6e, 0x00000080, - 0xbfa20007, 0x8c78ff78, - 0x00004000, 0x80ec886c, - 0x82ed806d, 0xbfa00002, - 0x806c846c, 0x826d806d, - 0x8b6dff6d, 0x0000ffff, - 0x8bfe7e7e, 0x8bea6a6a, - 0x85788978, 0xb9783244, + 0x8b6eff77, 0x0c000000, + 0x916dff6d, 0x0c000000, + 0x8c6d6e6d, 0xf4601bbd, + 0xf8000010, 0xbf8a0000, + 0x846e976e, 0x9177ff77, + 0x00800000, 0x8c776e77, + 0xf4603bbd, 0xf8000000, + 0xbf8a0000, 0xf4603ebd, + 0xf8000008, 0xbf8a0000, + 0x8bee6e6e, 0xbfa10001, + 0xbe80486e, 0x8b6eff6d, + 0xf0000000, 0xbfa20009, + 0xb8eef811, 0x8b6eff6e, + 0x00000080, 0xbfa20007, + 0x8c78ff78, 0x00004000, + 0x80ec886c, 0x82ed806d, + 0xbfa00002, 0x806c846c, + 0x826d806d, 0x8b6dff6d, + 0x0000ffff, 0x8bfe7e7e, + 0x8bea6a6a, 0x85788978, + 0x936eff77, 0x0002001a, + 0xb96ef81a, 0xb9783244, 0xbe804a6c, 0xb8faf802, 0xbf0d987a, 0xbfa10001, 0xbfb00000, 0x8b6dff6d, @@ -3981,7 +3989,7 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0x008ce800, 0x00000000, 0x807d817d, 0x8070ff70, 0x00000080, 0xbf0a7b7d, - 0xbfa2fff7, 0xbfa0016e, + 0xbfa2fff7, 0xbfa00171, 0xbef4007e, 0x8b75ff7f, 0x0000ffff, 0x8c75ff75, 0x00040000, 0xbef60080, @@ -4163,12 +4171,14 @@ static const uint32_t cwsr_trap_gfx12_hex[] = { 0xf8000074, 0xbf8a0000, 0x8b6dff6d, 0x0000ffff, 0x8bfe7e7e, 0x8bea6a6a, - 0xb97af804, 0xbe804ec2, - 0xbf94fffe, 0xbe804a6c, + 0x936eff77, 0x0002001a, + 0xb96ef81a, 0xb97af804, 0xbe804ec2, 0xbf94fffe, - 0xbfb10000, 0xbf9f0000, + 0xbe804a6c, 0xbe804ec2, + 0xbf94fffe, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_gfx9_5_0_hex[] = { diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm index 5a1a1b1f897fe3..07999b4649ded0 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm @@ -78,9 +78,16 @@ var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT + +var SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT = 0 +var SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE = 2 + var BARRIER_STATE_SIGNAL_OFFSET = 16 var BARRIER_STATE_VALID_OFFSET = 0 +var TTMP11_SCHED_MODE_SHIFT = 26 +var TTMP11_SCHED_MODE_SIZE = 2 +var TTMP11_SCHED_MODE_MASK = 0xC000000 var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23 var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000 @@ -160,8 +167,19 @@ L_JUMP_TO_RESTORE: s_branch L_RESTORE L_SKIP_RESTORE: + // Assume most relaxed scheduling mode is set. Save and revert to normal mode. + s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_SCHED_MODE) + s_wait_alu 0 + s_setreg_imm32_b32 hwreg(HW_REG_WAVE_SCHED_MODE, \ + SQ_WAVE_SCHED_MODE_DEP_MODE_SHIFT, SQ_WAVE_SCHED_MODE_DEP_MODE_SIZE), 0 + s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC + // Save SCHED_MODE[1:0] into ttmp11[27:26]. + s_andn2_b32 ttmp11, ttmp11, TTMP11_SCHED_MODE_MASK + s_lshl_b32 ttmp2, ttmp2, TTMP11_SCHED_MODE_SHIFT + s_or_b32 ttmp11, ttmp11, ttmp2 + // Clear SPI_PRIO: do not save with elevated priority. // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd. s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK @@ -238,6 +256,13 @@ L_FETCH_2ND_TRAP: s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA s_or_b32 ttmp15, ttmp15, 0xFFFF0000 L_NO_SIGN_EXTEND_TMA: +#if ASIC_FAMILY == CHIP_GFX12 + // Move SCHED_MODE[1:0] from ttmp11 to unused bits in ttmp1[27:26] (return PC_HI). + // The second-level trap will restore from ttmp1 for backwards compatibility. + s_and_b32 ttmp2, ttmp11, TTMP11_SCHED_MODE_MASK + s_andn2_b32 ttmp1, ttmp1, TTMP11_SCHED_MODE_MASK + s_or_b32 ttmp1, ttmp1, ttmp2 +#endif s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag s_wait_idle @@ -287,6 +312,10 @@ L_EXIT_TRAP: // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it. // Only restore fields which the trap handler changes. s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT + + // Assume relaxed scheduling mode after this point. + restore_sched_mode(ttmp2) + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv @@ -1043,6 +1072,9 @@ L_SKIP_BARRIER_RESTORE: s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + // Assume relaxed scheduling mode after this point. + restore_sched_mode(s_restore_tmp) + s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu // Make barrier and LDS state visible to all waves in the group. @@ -1134,3 +1166,8 @@ function valu_sgpr_hazard end #endif end + +function restore_sched_mode(s_tmp) + s_bfe_u32 s_tmp, ttmp11, (TTMP11_SCHED_MODE_SHIFT | (TTMP11_SCHED_MODE_SIZE << 0x10)) + s_setreg_b32 hwreg(HW_REG_WAVE_SCHED_MODE), s_tmp +end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index f1e7583650c416..80c4fa2b0975dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -409,6 +409,7 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv) vgpr_size = 0x80000; else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */ gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */ + gfxv == 110501 || /* GFX_VERSION_GFX1151 */ gfxv == 120000 || /* GFX_VERSION_GFX1200 */ gfxv == 120001) /* GFX_VERSION_GFX1201 */ vgpr_size = 0x60000; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 97c2270f278fd3..79ea138897fcf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1144,30 +1144,48 @@ static int svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list, struct list_head *remap_list) { + unsigned long last_align_down = ALIGN_DOWN(prange->last, 512); + unsigned long start_align = ALIGN(prange->start, 512); + bool huge_page_mapping = last_align_down > start_align; struct svm_range *tail = NULL; - int r = svm_range_split(prange, prange->start, new_last, &tail); + int r; - if (!r) { - list_add(&tail->list, insert_list); - if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity)) - list_add(&tail->update_list, remap_list); - } - return r; + r = svm_range_split(prange, prange->start, new_last, &tail); + + if (r) + return r; + + list_add(&tail->list, insert_list); + + if (huge_page_mapping && tail->start > start_align && + tail->start < last_align_down && (!IS_ALIGNED(tail->start, 512))) + list_add(&tail->update_list, remap_list); + + return 0; } static int svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list, struct list_head *remap_list) { + unsigned long last_align_down = ALIGN_DOWN(prange->last, 512); + unsigned long start_align = ALIGN(prange->start, 512); + bool huge_page_mapping = last_align_down > start_align; struct svm_range *head = NULL; - int r = svm_range_split(prange, new_start, prange->last, &head); + int r; - if (!r) { - list_add(&head->list, insert_list); - if (!IS_ALIGNED(new_start, 1UL << prange->granularity)) - list_add(&head->update_list, remap_list); - } - return r; + r = svm_range_split(prange, new_start, prange->last, &head); + + if (r) + return r; + + list_add(&head->list, insert_list); + + if (huge_page_mapping && head->last + 1 > start_align && + head->last + 1 < last_align_down && (!IS_ALIGNED(head->last, 512))) + list_add(&head->update_list, remap_list); + + return 0; } static void diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 811636af14eaac..3eb32d58a12007 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -491,6 +491,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_queues_per_engine); sysfs_show_32bit_prop(buffer, offs, "num_cp_queues", dev->node_props.num_cp_queues); + sysfs_show_32bit_prop(buffer, offs, "cwsr_size", + dev->node_props.cwsr_size); + sysfs_show_32bit_prop(buffer, offs, "ctl_stack_size", + dev->node_props.ctl_stack_size); if (dev->gpu) { log_max_watch_addr = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index ef97cede99269c..bd0403005f370e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -1063,6 +1063,9 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, void amdgpu_dm_update_connector_after_detect( struct amdgpu_dm_connector *aconnector); +void populate_hdmi_info_from_connector(struct drm_hdmi_info *info, + struct dc_edid_caps *edid_caps); + extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ac98c746c3dec8..e5e993d3ef74a2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -139,6 +139,9 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->edid_hdmi = connector->display_info.is_hdmi; + if (edid_caps->edid_hdmi) + populate_hdmi_info_from_connector(&connector->display_info.hdmi, edid_caps); + apply_edid_quirks(dev, edid_buf, edid_caps); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); @@ -990,6 +993,11 @@ dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector) return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector); } +void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps) +{ + edid_caps->scdc_present = hdmi->scdc.supported; +} + enum dc_edid_status dm_helpers_read_local_edid( struct dc_context *ctx, struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index dbd1da4d85d323..5e92eaa67aa33d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -884,28 +884,26 @@ struct dsc_mst_fairness_params { }; #if defined(CONFIG_DRM_AMD_DC_FP) -static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn) +static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) { - uint64_t effective_kbps = (uint64_t)kbps; + u8 link_coding_cap; + uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; - if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps - effective_kbps *= 1006; - effective_kbps = div_u64(effective_kbps, 1000); - } + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); + if (link_coding_cap == DP_128b_132b_ENCODING) + fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; - return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000)); + return fec_overhead_multiplier_x1000; } -static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin) +static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) { - uint64_t pbn_effective = (uint64_t)pbn; - - if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn - pbn_effective *= (1000000 / PEAK_FACTOR_X1000); - else - pbn_effective *= 1000; + u64 peak_kbps = kbps; - return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64); + peak_kbps *= 1006; + peak_kbps *= fec_overhead_multiplier_x1000; + peak_kbps = div_u64(peak_kbps, 1000 * 1000); + return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); } static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, @@ -976,7 +974,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; - kbps = pbn_to_kbps(pbn, false); + kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, @@ -1005,11 +1003,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn; + kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -1105,6 +1104,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); int var_pbn; for (i = 0; i < count; i++) { @@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index); var_pbn = vars[next_index].pbn; - vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1197,6 +1197,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); struct drm_connector_state *new_conn_state; memset(params, 0, sizeof(params)); @@ -1277,7 +1278,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try no compression\n"); for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1299,7 +1300,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, DRM_DEBUG_DRIVER("MST_DSC Try max compression\n"); for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1307,7 +1308,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1762,6 +1763,18 @@ clean_exit: return ret; } +static uint32_t kbps_from_pbn(unsigned int pbn) +{ + uint64_t kbps = (uint64_t)pbn; + + kbps *= (1000000 / PEAK_FACTOR_X1000); + kbps *= 8; + kbps *= 54; + kbps /= 64; + + return (uint32_t)kbps; +} + static bool is_dsc_common_config_possible(struct dc_stream_state *stream, struct dc_dsc_bw_range *bw_range) { @@ -1860,7 +1873,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( dc_link_get_highest_encoding_format(stream->link)); cur_link_settings = stream->link->verified_link_cap; root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); - virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true); + virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); /* pick the end to end bw bottleneck */ end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); @@ -1913,7 +1926,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( immediate_upstream_port = aconnector->mst_output_port->parent->port_parent; if (immediate_upstream_port) { - virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true); + virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); } else { /* For topology LCT 1 case - only one mstb*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 922f23557f5d9e..0971dfa258454e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -86,7 +86,7 @@ uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane struct dc_plane_state *dc_create_plane_state(const struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), - GFP_KERNEL); + GFP_ATOMIC); if (NULL == plane_state) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c index c468f492b87687..09303c282495b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c @@ -6711,6 +6711,76 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo } // for j } +static noinline_for_stack void set_vm_row_and_swath_parameters(struct display_mode_lib_st *mode_lib) +{ + struct CalculateVMRowAndSwath_params_st *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params; + struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals; + + CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; + CalculateVMRowAndSwath_params->myPipe = s->SurfParameters; + CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL; + CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_luma; + CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_chroma; + CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ms.ip.dcc_meta_buffer_size_bytes; + CalculateVMRowAndSwath_params->UseMALLForStaticScreen = mode_lib->ms.cache_display_cfg.plane.UseMALLForStaticScreen; + CalculateVMRowAndSwath_params->UseMALLForPStateChange = mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange; + CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->ms.soc.mall_allocated_for_dcn_mbytes; + CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthYThisState; + CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthCThisState; + CalculateVMRowAndSwath_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; + CalculateVMRowAndSwath_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; + CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; + CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; + CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; + CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; + CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; + CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; + CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceededPerState; + CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[0]; + CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[1]; + CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height; + CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma; + CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[2]; // VBA_DELTA + CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[3]; // VBA_DELTA + CalculateVMRowAndSwath_params->meta_req_width = s->dummy_integer_array[4]; + CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[5]; + CalculateVMRowAndSwath_params->meta_req_height = s->dummy_integer_array[6]; + CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[7]; + CalculateVMRowAndSwath_params->meta_row_width = s->dummy_integer_array[8]; + CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[9]; + CalculateVMRowAndSwath_params->meta_row_height = mode_lib->ms.meta_row_height; + CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->ms.meta_row_height_chroma; + CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[10]; + CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; + CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[11]; + CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[12]; + CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[13]; + CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[14]; + CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[15]; + CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[16]; + CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[17]; + CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[18]; + CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[19]; + CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[20]; + CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesYThisState; + CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesCThisState; + CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY; + CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC; + CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwY; + CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwC; + CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bandwidth_this_state; + CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bandwidth_this_state; + CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRowThisState; + CalculateVMRowAndSwath_params->PDEAndMetaPTEBytesFrame = mode_lib->ms.PDEAndMetaPTEBytesPerFrameThisState; + CalculateVMRowAndSwath_params->MetaRowByte = mode_lib->ms.MetaRowBytesThisState; + CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame_this_state; + CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip_this_state; + CalculateVMRowAndSwath_params->UsesMALLForStaticScreen = s->dummy_boolean_array[0]; + CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1]; + CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[21]; +} + /// @brief The Mode Support function. dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib) { @@ -7683,69 +7753,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib) s->SurfParameters[k].SwathHeightC = mode_lib->ms.SwathHeightCThisState[k]; } - CalculateVMRowAndSwath_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; - CalculateVMRowAndSwath_params->myPipe = s->SurfParameters; - CalculateVMRowAndSwath_params->SurfaceSizeInMALL = mode_lib->ms.SurfaceSizeInMALL; - CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsLuma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_luma; - CalculateVMRowAndSwath_params->PTEBufferSizeInRequestsChroma = mode_lib->ms.ip.dpte_buffer_size_in_pte_reqs_chroma; - CalculateVMRowAndSwath_params->DCCMetaBufferSizeBytes = mode_lib->ms.ip.dcc_meta_buffer_size_bytes; - CalculateVMRowAndSwath_params->UseMALLForStaticScreen = mode_lib->ms.cache_display_cfg.plane.UseMALLForStaticScreen; - CalculateVMRowAndSwath_params->UseMALLForPStateChange = mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange; - CalculateVMRowAndSwath_params->MALLAllocatedForDCN = mode_lib->ms.soc.mall_allocated_for_dcn_mbytes; - CalculateVMRowAndSwath_params->SwathWidthY = mode_lib->ms.SwathWidthYThisState; - CalculateVMRowAndSwath_params->SwathWidthC = mode_lib->ms.SwathWidthCThisState; - CalculateVMRowAndSwath_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; - CalculateVMRowAndSwath_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; - CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; - CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; - CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; - CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; - CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; - CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; - CalculateVMRowAndSwath_params->DCCMetaBufferSizeNotExceeded = mode_lib->ms.DCCMetaBufferSizeNotExceededPerState; - CalculateVMRowAndSwath_params->dpte_row_width_luma_ub = s->dummy_integer_array[0]; - CalculateVMRowAndSwath_params->dpte_row_width_chroma_ub = s->dummy_integer_array[1]; - CalculateVMRowAndSwath_params->dpte_row_height_luma = mode_lib->ms.dpte_row_height; - CalculateVMRowAndSwath_params->dpte_row_height_chroma = mode_lib->ms.dpte_row_height_chroma; - CalculateVMRowAndSwath_params->dpte_row_height_linear_luma = s->dummy_integer_array[2]; // VBA_DELTA - CalculateVMRowAndSwath_params->dpte_row_height_linear_chroma = s->dummy_integer_array[3]; // VBA_DELTA - CalculateVMRowAndSwath_params->meta_req_width = s->dummy_integer_array[4]; - CalculateVMRowAndSwath_params->meta_req_width_chroma = s->dummy_integer_array[5]; - CalculateVMRowAndSwath_params->meta_req_height = s->dummy_integer_array[6]; - CalculateVMRowAndSwath_params->meta_req_height_chroma = s->dummy_integer_array[7]; - CalculateVMRowAndSwath_params->meta_row_width = s->dummy_integer_array[8]; - CalculateVMRowAndSwath_params->meta_row_width_chroma = s->dummy_integer_array[9]; - CalculateVMRowAndSwath_params->meta_row_height = mode_lib->ms.meta_row_height; - CalculateVMRowAndSwath_params->meta_row_height_chroma = mode_lib->ms.meta_row_height_chroma; - CalculateVMRowAndSwath_params->vm_group_bytes = s->dummy_integer_array[10]; - CalculateVMRowAndSwath_params->dpte_group_bytes = mode_lib->ms.dpte_group_bytes; - CalculateVMRowAndSwath_params->PixelPTEReqWidthY = s->dummy_integer_array[11]; - CalculateVMRowAndSwath_params->PixelPTEReqHeightY = s->dummy_integer_array[12]; - CalculateVMRowAndSwath_params->PTERequestSizeY = s->dummy_integer_array[13]; - CalculateVMRowAndSwath_params->PixelPTEReqWidthC = s->dummy_integer_array[14]; - CalculateVMRowAndSwath_params->PixelPTEReqHeightC = s->dummy_integer_array[15]; - CalculateVMRowAndSwath_params->PTERequestSizeC = s->dummy_integer_array[16]; - CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_l = s->dummy_integer_array[17]; - CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_l = s->dummy_integer_array[18]; - CalculateVMRowAndSwath_params->dpde0_bytes_per_frame_ub_c = s->dummy_integer_array[19]; - CalculateVMRowAndSwath_params->meta_pte_bytes_per_frame_ub_c = s->dummy_integer_array[20]; - CalculateVMRowAndSwath_params->PrefetchSourceLinesY = mode_lib->ms.PrefetchLinesYThisState; - CalculateVMRowAndSwath_params->PrefetchSourceLinesC = mode_lib->ms.PrefetchLinesCThisState; - CalculateVMRowAndSwath_params->VInitPreFillY = mode_lib->ms.PrefillY; - CalculateVMRowAndSwath_params->VInitPreFillC = mode_lib->ms.PrefillC; - CalculateVMRowAndSwath_params->MaxNumSwathY = mode_lib->ms.MaxNumSwY; - CalculateVMRowAndSwath_params->MaxNumSwathC = mode_lib->ms.MaxNumSwC; - CalculateVMRowAndSwath_params->meta_row_bw = mode_lib->ms.meta_row_bandwidth_this_state; - CalculateVMRowAndSwath_params->dpte_row_bw = mode_lib->ms.dpte_row_bandwidth_this_state; - CalculateVMRowAndSwath_params->PixelPTEBytesPerRow = mode_lib->ms.DPTEBytesPerRowThisState; - CalculateVMRowAndSwath_params->PDEAndMetaPTEBytesFrame = mode_lib->ms.PDEAndMetaPTEBytesPerFrameThisState; - CalculateVMRowAndSwath_params->MetaRowByte = mode_lib->ms.MetaRowBytesThisState; - CalculateVMRowAndSwath_params->use_one_row_for_frame = mode_lib->ms.use_one_row_for_frame_this_state; - CalculateVMRowAndSwath_params->use_one_row_for_frame_flip = mode_lib->ms.use_one_row_for_frame_flip_this_state; - CalculateVMRowAndSwath_params->UsesMALLForStaticScreen = s->dummy_boolean_array[0]; - CalculateVMRowAndSwath_params->PTE_BUFFER_MODE = s->dummy_boolean_array[1]; - CalculateVMRowAndSwath_params->BIGK_FRAGMENT_SIZE = s->dummy_integer_array[21]; + set_vm_row_and_swath_parameters(mode_lib); CalculateVMRowAndSwath(&mode_lib->scratch, CalculateVMRowAndSwath_params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 8fe39993922029..4986f12dc9dfd3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1484,9 +1484,6 @@ void build_audio_output( state->clk_mgr); } - audio_output->pll_info.feed_back_divider = - pipe_ctx->pll_settings.feedback_divider; - audio_output->pll_info.dto_source = translate_to_dto_source( pipe_ctx->stream_res.tg->inst + 1); diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h index e4a26143f14c94..6699ad4fa825e3 100644 --- a/drivers/gpu/drm/amd/display/include/audio_types.h +++ b/drivers/gpu/drm/amd/display/include/audio_types.h @@ -47,15 +47,15 @@ struct audio_crtc_info { uint32_t h_total; uint32_t h_active; uint32_t v_active; - uint32_t pixel_repetition; uint32_t requested_pixel_clock_100Hz; /* in 100Hz */ uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */ - uint32_t refresh_rate; + uint32_t dsc_bits_per_pixel; + uint32_t dsc_num_slices; enum dc_color_depth color_depth; enum dc_pixel_encoding pixel_encoding; + uint16_t refresh_rate; + uint8_t pixel_repetition; bool interlaced; - uint32_t dsc_bits_per_pixel; - uint32_t dsc_num_slices; }; struct azalia_clock_info { uint32_t pixel_clock_in_10khz; @@ -78,11 +78,9 @@ enum audio_dto_source { struct audio_pll_info { uint32_t audio_dto_source_clock_in_khz; - uint32_t feed_back_divider; + uint32_t ss_percentage; enum audio_dto_source dto_source; bool ss_enabled; - uint32_t ss_percentage; - uint32_t ss_percentage_divider; }; struct audio_channel_associate_info { diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 033c44326552ab..fffb47b62f437b 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -429,7 +429,14 @@ static void sn65dsi83_handle_errors(struct sn65dsi83 *ctx) */ ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat); - if (ret || irq_stat) { + + /* + * Some hardware (Toradex Verdin AM62) is known to report the + * PLL_UNLOCK error interrupt while working without visible + * problems. In lack of a reliable way to discriminate such cases + * from user-visible PLL_UNLOCK cases, ignore that bit entirely. + */ + if (ret || irq_stat & ~REG_IRQ_STAT_CHA_PLL_UNLOCK) { /* * IRQ acknowledged is not always possible (the bridge can be in * a state where it doesn't answer anymore). To prevent an @@ -654,7 +661,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, if (ctx->irq) { /* Enable irq to detect errors */ regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN); - regmap_write(ctx->regmap, REG_IRQ_EN, 0xff); + regmap_write(ctx->regmap, REG_IRQ_EN, 0xff & ~REG_IRQ_EN_CHA_PLL_UNLOCK_EN); } else { /* Use the polling task */ sn65dsi83_monitor_start(ctx); diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 12d8307997a0a9..eb56ba23479660 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -308,7 +308,7 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv, struct drm_gem_dma_object *dma_obj; int ret; - ret = drm_mode_size_dumb(drm, args, SZ_8, 0); + ret = drm_mode_size_dumb(drm, args, 0, 0); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index dc94a27710e5f4..93b9cff89080f9 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -559,7 +559,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, { int ret; - ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + ret = drm_mode_size_dumb(dev, args, 0, 0); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index ce76c55913f725..b143589717e648 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -338,14 +338,14 @@ static int drm_plane_create_hotspot_properties(struct drm_plane *plane) prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X", INT_MIN, INT_MAX); - if (IS_ERR(prop_x)) - return PTR_ERR(prop_x); + if (!prop_x) + return -ENOMEM; prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y", INT_MIN, INT_MAX); - if (IS_ERR(prop_y)) { + if (!prop_y) { drm_property_destroy(plane->dev, prop_x); - return PTR_ERR(prop_y); + return -ENOMEM; } drm_object_attach_property(&plane->base, prop_x, 0); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 9cd03e2adeb26b..44f4fcce526e1d 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -288,13 +288,18 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, drm_framebuffer_put(&fb->base); fb = NULL; } + + wakeref = intel_display_rpm_get(display); + if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) { drm_dbg_kms(display->drm, "no BIOS fb, allocating a new one\n"); fb = __intel_fbdev_fb_alloc(display, sizes); - if (IS_ERR(fb)) - return PTR_ERR(fb); + if (IS_ERR(fb)) { + ret = PTR_ERR(fb); + goto out_unlock; + } } else { drm_dbg_kms(display->drm, "re-using BIOS fb\n"); prealloc = true; @@ -302,8 +307,6 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, sizes->fb_height = fb->base.height; } - wakeref = intel_display_rpm_get(display); - /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the * BIOS is suitable for own access. diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index b3b75be9ced5d3..e9a4e6090fe0a0 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -72,7 +72,7 @@ struct intel_memory_region { u16 instance; enum intel_region_id id; char name[16]; - char uabi_name[16]; + char uabi_name[20]; bool private; /* not for userspace */ struct { diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 951d715dea3011..d019177462cf7f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -161,6 +161,30 @@ static void mgag200_set_startadd(struct mga_device *mdev, WREG_ECRT(0x00, crtcext0); } +/* + * Set the opmode for the hardware swapper for Big-Endian processor + * support for the frame buffer aperture and DMAWIN space. + */ +static void mgag200_set_datasiz(struct mga_device *mdev, u32 format) +{ +#if defined(__BIG_ENDIAN) + u32 opmode = RREG32(MGAREG_OPMODE); + + opmode &= ~(GENMASK(17, 16) | GENMASK(9, 8) | GENMASK(3, 2)); + + /* Big-endian byte-swapping */ + switch (format) { + case DRM_FORMAT_RGB565: + opmode |= 0x10100; + break; + case DRM_FORMAT_XRGB8888: + opmode |= 0x20200; + break; + } + WREG32(MGAREG_OPMODE, opmode); +#endif +} + void mgag200_init_registers(struct mga_device *mdev) { u8 crtc11, misc; @@ -496,6 +520,7 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; + mgag200_set_datasiz(mdev, fb->format->format); drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage); diff --git a/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c index e2bf99c433366b..a60209097a20a6 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c +++ b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c @@ -94,26 +94,6 @@ fail_unregister: return err; } -/** - * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder - * @drm_encoder: Encoder to be unregistered. - * - * This should be called from the @destroy method of an I2C slave - * encoder driver once I2C access is no longer needed. - */ -void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder) -{ - struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder); - struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder); - struct module *module = client->dev.driver->owner; - - i2c_unregister_device(client); - encoder->i2c_client = NULL; - - module_put(module); -} -EXPORT_SYMBOL(nouveau_i2c_encoder_destroy); - /* * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: */ diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h index 31334aa90781b3..869820701a56eb 100644 --- a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h +++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h @@ -202,7 +202,24 @@ static inline struct i2c_client *nouveau_i2c_encoder_get_client(struct drm_encod return to_encoder_i2c(encoder)->i2c_client; } -void nouveau_i2c_encoder_destroy(struct drm_encoder *encoder); +/** + * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder + * @drm_encoder: Encoder to be unregistered. + * + * This should be called from the @destroy method of an I2C slave + * encoder driver once I2C access is no longer needed. + */ +static __always_inline void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder) +{ + struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder); + struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder); + struct module *module = client->dev.driver->owner; + + i2c_unregister_device(client); + encoder->i2c_client = NULL; + + module_put(module); +} /* * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs: diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 226c7ec56b8ed9..b8b97e10ae83ed 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -73,6 +73,10 @@ struct nvkm_gsp { const struct firmware *bl; const struct firmware *rm; + + struct { + struct nvkm_falcon_fw sb; + } falcon; } fws; struct nvkm_firmware fw; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 869d4335c0f45c..4a193b7d6d9e40 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -183,11 +183,11 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha fctx->context = drm->runl[chan->runlist].context_base + chan->chid; if (chan == drm->cechan) - strcpy(fctx->name, "copy engine channel"); + strscpy(fctx->name, "copy engine channel"); else if (chan == drm->channel) - strcpy(fctx->name, "generic kernel channel"); + strscpy(fctx->name, "generic kernel channel"); else - strcpy(fctx->name, cli->name); + strscpy(fctx->name, cli->name); kref_init(&fctx->fence_ref); if (!priv->uevent) diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 5c07a9ee8b775f..34effe6d86ad50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -125,7 +125,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d, if (ret < 0) return ret; - return sprintf(buf, "%i\n", ret); + return sysfs_emit(buf, "%i\n", ret); } static ssize_t @@ -141,7 +141,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d, if (ret < 0) return ret; - return sprintf(buf, "%i\n", ret); + return sysfs_emit(buf, "%i\n", ret); } static ssize_t diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c index 5b721bd9d79949..50376024666042 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c @@ -259,18 +259,16 @@ nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name, } static int -nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) +nvkm_gsp_fwsec_init(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, const char *name, u32 init_cmd) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; struct nvkm_bios *bios = device->bios; const union nvfw_falcon_ucode_desc *desc; struct nvbios_pmuE flcn_ucode; - u8 idx, ver, hdr; u32 data; u16 size, vers; - struct nvkm_falcon_fw fw = {}; - u32 mbox0 = 0; + u8 idx, ver, hdr; int ret; /* Lookup in VBIOS. */ @@ -291,8 +289,8 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) vers = (desc->v2.Hdr & 0x0000ff00) >> 8; switch (vers) { - case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break; - case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break; + case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, fw); break; + case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, fw); break; default: nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers); return -EINVAL; @@ -303,15 +301,19 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd) return ret; } - /* Boot. */ - ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0); - nvkm_falcon_fw_dtor(&fw); - if (ret) - return ret; - return 0; } +static int +nvkm_gsp_fwsec_boot(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw) +{ + struct nvkm_subdev *subdev = &gsp->subdev; + u32 mbox0 = 0; + + /* Boot */ + return nvkm_falcon_fw_boot(fw, subdev, true, &mbox0, NULL, 0, 0); +} + int nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) { @@ -320,7 +322,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) int ret; u32 err; - ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB); + ret = nvkm_gsp_fwsec_boot(gsp, &gsp->fws.falcon.sb); if (ret) return ret; @@ -335,26 +337,47 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp) } int +nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp) +{ + return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb", + NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB); +} + +void +nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp) +{ + nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb); +} + +int nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp) { struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device; + struct nvkm_falcon_fw fw = {}; int ret; u32 err, wpr2_lo, wpr2_hi; - ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS); + ret = nvkm_gsp_fwsec_init(gsp, &fw, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS); if (ret) return ret; + ret = nvkm_gsp_fwsec_boot(gsp, &fw); + if (ret) + goto fwsec_dtor; + /* Verify. */ err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16; if (err) { nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err); - return -EIO; + ret = -EIO; + } else { + wpr2_lo = nvkm_rd32(device, 0x1fa824); + wpr2_hi = nvkm_rd32(device, 0x1fa828); + nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi); } - wpr2_lo = nvkm_rd32(device, 0x1fa824); - wpr2_hi = nvkm_rd32(device, 0x1fa828); - nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi); - return 0; +fwsec_dtor: + nvkm_falcon_fw_dtor(&fw); + return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h index c3494b7ac572bc..86bdd203bc107c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h @@ -6,7 +6,10 @@ enum nvkm_acr_lsf_id; int nvkm_gsp_fwsec_frts(struct nvkm_gsp *); + +int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *); int nvkm_gsp_fwsec_sb(struct nvkm_gsp *); +void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *); struct nvkm_gsp_fwif { int version; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index 32e6a065d6d7a5..2a7e80c6d70f39 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -1817,12 +1817,16 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) RM_RISCV_UCODE_DESC *desc; int ret; + ret = nvkm_gsp_fwsec_sb_ctor(gsp); + if (ret) + return ret; + hdr = nvfw_bin_hdr(&gsp->subdev, fw->data); desc = (void *)fw->data + hdr->header_offset; ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw); if (ret) - return ret; + goto dtor_fwsec; memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size); @@ -1831,6 +1835,9 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp) gsp->boot.manifest_offset = desc->manifestOffset; gsp->boot.app_version = desc->appVersion; return 0; +dtor_fwsec: + nvkm_gsp_fwsec_sb_dtor(gsp); + return ret; } static const struct nvkm_firmware_func @@ -2101,6 +2108,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp) mutex_destroy(&gsp->cmdq.mutex); nvkm_gsp_dtor_fws(gsp); + nvkm_gsp_fwsec_sb_dtor(gsp); nvkm_gsp_mem_dtor(&gsp->rmargs); nvkm_gsp_mem_dtor(&gsp->wpr_meta); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c index 561e6643dcbb67..6e5173f98a2264 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c @@ -213,7 +213,7 @@ static const struct backlight_properties nt35560_bl_props = { static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) { - struct device dev = dsi_ctx->dsi->dev; + struct device *dev = &dsi_ctx->dsi->dev; u8 vendor, version, panel; u16 val; @@ -225,7 +225,7 @@ static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) return; if (vendor == 0x00) { - dev_err(&dev, "device vendor ID is zero\n"); + dev_err(dev, "device vendor ID is zero\n"); dsi_ctx->accum_err = -ENODEV; return; } @@ -236,12 +236,12 @@ static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx) case DISPLAY_SONY_ACX424AKP_ID2: case DISPLAY_SONY_ACX424AKP_ID3: case DISPLAY_SONY_ACX424AKP_ID4: - dev_info(&dev, + dev_info(dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; default: - dev_info(&dev, + dev_info(dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index b834123a6560e9..a6b8024e1a3cd5 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -779,6 +779,12 @@ struct panthor_job_profiling_data { */ #define MAX_GROUPS_PER_POOL 128 +/* + * Mark added on an entry of group pool Xarray to identify if the group has + * been fully initialized and can be accessed elsewhere in the driver code. + */ +#define GROUP_REGISTERED XA_MARK_1 + /** * struct panthor_group_pool - Group pool * @@ -3007,7 +3013,7 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile) return; xa_lock(&gpool->xa); - xa_for_each(&gpool->xa, i, group) { + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { guard(spinlock)(&group->fdinfo.lock); pfile->stats.cycles += group->fdinfo.data.cycles; pfile->stats.time += group->fdinfo.data.time; @@ -3727,6 +3733,8 @@ int panthor_group_create(struct panthor_file *pfile, group_init_task_info(group); + xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED); + return gid; err_erase_gid: @@ -3744,6 +3752,9 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) struct panthor_scheduler *sched = ptdev->scheduler; struct panthor_group *group; + if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED)) + return -EINVAL; + group = xa_erase(&gpool->xa, group_handle); if (!group) return -EINVAL; @@ -3769,12 +3780,12 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle) } static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, - u32 group_handle) + unsigned long group_handle) { struct panthor_group *group; xa_lock(&pool->xa); - group = group_get(xa_load(&pool->xa, group_handle)); + group = group_get(xa_find(&pool->xa, &group_handle, group_handle, GROUP_REGISTERED)); xa_unlock(&pool->xa); return group; @@ -3861,7 +3872,7 @@ panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile, return; xa_lock(&gpool->xa); - xa_for_each(&gpool->xa, i, group) { + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { stats->resident += group->fdinfo.kbo_sizes; if (group->csg_id >= 0) stats->active += group->fdinfo.kbo_sizes; diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 9413b76d0bfce0..4ef2e3c129ed7e 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -492,9 +492,9 @@ static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi, /* Configuration for Video Parameters, input is always RGB888 */ vprmset0r = TXVMVPRMSET0R_BPP_24; - if (mode->flags & DRM_MODE_FLAG_NVSYNC) + if (!(mode->flags & DRM_MODE_FLAG_PVSYNC)) vprmset0r |= TXVMVPRMSET0R_VSPOL_LOW; - if (mode->flags & DRM_MODE_FLAG_NHSYNC) + if (!(mode->flags & DRM_MODE_FLAG_PHSYNC)) vprmset0r |= TXVMVPRMSET0R_HSPOL_LOW; vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 5718d9d83a49f3..52c95131af5afd 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -586,7 +586,7 @@ out: drm_modeset_unlock(&crtc->mutex); } -static void tilcdc_crtc_destroy(struct drm_crtc *crtc) +void tilcdc_crtc_destroy(struct drm_crtc *crtc) { struct tilcdc_drm_private *priv = crtc->dev->dev_private; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 7caec4d38ddf0c..3dcbec312bacb4 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -172,8 +172,7 @@ static void tilcdc_fini(struct drm_device *dev) if (priv->crtc) tilcdc_crtc_shutdown(priv->crtc); - if (priv->is_registered) - drm_dev_unregister(dev); + drm_dev_unregister(dev); drm_kms_helper_poll_fini(dev); drm_atomic_helper_shutdown(dev); @@ -220,21 +219,21 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) priv->wq = alloc_ordered_workqueue("tilcdc", 0); if (!priv->wq) { ret = -ENOMEM; - goto init_failed; + goto put_drm; } priv->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->mmio)) { dev_err(dev, "failed to request / ioremap\n"); ret = PTR_ERR(priv->mmio); - goto init_failed; + goto free_wq; } priv->clk = clk_get(dev, "fck"); if (IS_ERR(priv->clk)) { dev_err(dev, "failed to get functional clock\n"); ret = -ENODEV; - goto init_failed; + goto free_wq; } pm_runtime_enable(dev); @@ -313,7 +312,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) ret = tilcdc_crtc_create(ddev); if (ret < 0) { dev_err(dev, "failed to create crtc\n"); - goto init_failed; + goto disable_pm; } modeset_init(ddev); @@ -324,46 +323,46 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) if (ret) { dev_err(dev, "failed to register cpufreq notifier\n"); priv->freq_transition.notifier_call = NULL; - goto init_failed; + goto destroy_crtc; } #endif if (priv->is_componentized) { ret = component_bind_all(dev, ddev); if (ret < 0) - goto init_failed; + goto unregister_cpufreq_notif; ret = tilcdc_add_component_encoder(ddev); if (ret < 0) - goto init_failed; + goto unbind_component; } else { ret = tilcdc_attach_external_device(ddev); if (ret) - goto init_failed; + goto unregister_cpufreq_notif; } if (!priv->external_connector && ((priv->num_encoders == 0) || (priv->num_connectors == 0))) { dev_err(dev, "no encoders/connectors found\n"); ret = -EPROBE_DEFER; - goto init_failed; + goto unbind_component; } ret = drm_vblank_init(ddev, 1); if (ret < 0) { dev_err(dev, "failed to initialize vblank\n"); - goto init_failed; + goto unbind_component; } ret = platform_get_irq(pdev, 0); if (ret < 0) - goto init_failed; + goto unbind_component; priv->irq = ret; ret = tilcdc_irq_install(ddev, priv->irq); if (ret < 0) { dev_err(dev, "failed to install IRQ handler\n"); - goto init_failed; + goto unbind_component; } drm_mode_config_reset(ddev); @@ -372,16 +371,34 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev) ret = drm_dev_register(ddev, 0); if (ret) - goto init_failed; - priv->is_registered = true; + goto stop_poll; drm_client_setup_with_color_mode(ddev, bpp); return 0; -init_failed: - tilcdc_fini(ddev); +stop_poll: + drm_kms_helper_poll_fini(ddev); + tilcdc_irq_uninstall(ddev); +unbind_component: + if (priv->is_componentized) + component_unbind_all(dev, ddev); +unregister_cpufreq_notif: +#ifdef CONFIG_CPU_FREQ + cpufreq_unregister_notifier(&priv->freq_transition, + CPUFREQ_TRANSITION_NOTIFIER); +destroy_crtc: +#endif + tilcdc_crtc_destroy(priv->crtc); +disable_pm: + pm_runtime_disable(dev); + clk_put(priv->clk); +free_wq: + destroy_workqueue(priv->wq); +put_drm: platform_set_drvdata(pdev, NULL); + ddev->dev_private = NULL; + drm_dev_put(ddev); return ret; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index b818448c83f612..58b276f82a669a 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -82,7 +82,6 @@ struct tilcdc_drm_private { struct drm_encoder *external_encoder; struct drm_connector *external_connector; - bool is_registered; bool is_componentized; bool irq_enabled; }; @@ -164,6 +163,7 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, bool simulate_vesa_sync); void tilcdc_crtc_shutdown(struct drm_crtc *crtc); +void tilcdc_crtc_destroy(struct drm_crtc *crtc); int tilcdc_crtc_update_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b47020fca19923..e6abc7b40b1895 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -434,6 +434,11 @@ int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, if (ret) return ret; + if (!bo->resource) { + ret = -ENODATA; + goto unlock; + } + switch (bo->resource->mem_type) { case TTM_PL_SYSTEM: fallthrough; @@ -448,6 +453,7 @@ int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, ret = -EIO; } +unlock: ttm_bo_unreserve(bo); return ret; diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index f88f7e19203ae5..7f606c8716480c 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -1742,11 +1742,10 @@ EXPORT_SYMBOL_GPL(i3c_master_do_daa); struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *buf, size_t len, bool force_bounce, enum dma_data_direction dir) { - struct i3c_dma *dma_xfer __free(kfree) = NULL; void *bounce __free(kfree) = NULL; void *dma_buf = buf; - dma_xfer = kzalloc(sizeof(*dma_xfer), GFP_KERNEL); + struct i3c_dma *dma_xfer __free(kfree) = kzalloc(sizeof(*dma_xfer), GFP_KERNEL); if (!dma_xfer) return NULL; @@ -2819,14 +2818,10 @@ EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot); static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops) { - if (!ops || !ops->bus_init || + if (!ops || !ops->bus_init || !ops->i3c_xfers || !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers) return -EINVAL; - /* Must provide one of priv_xfers (SDR only) or i3c_xfers (all modes) */ - if (!ops->priv_xfers && !ops->i3c_xfers) - return -EINVAL; - if (ops->request_ibi && (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi || !ops->recycle_ibi_slot)) @@ -3031,13 +3026,7 @@ int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, if (mode != I3C_SDR && !(master->this->info.hdr_cap & BIT(mode))) return -EOPNOTSUPP; - if (master->ops->i3c_xfers) - return master->ops->i3c_xfers(dev, xfers, nxfers, mode); - - if (mode != I3C_SDR) - return -EINVAL; - - return master->ops->priv_xfers(dev, xfers, nxfers); + return master->ops->i3c_xfers(dev, xfers, nxfers, mode); } int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev) diff --git a/drivers/i3c/master/adi-i3c-master.c b/drivers/i3c/master/adi-i3c-master.c index 82ac0b3d057abd..6380a38e6d294c 100644 --- a/drivers/i3c/master/adi-i3c-master.c +++ b/drivers/i3c/master/adi-i3c-master.c @@ -332,10 +332,9 @@ static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, struct i3c_ccc_cmd *cmd) { struct adi_i3c_master *master = to_adi_i3c_master(m); - struct adi_i3c_xfer *xfer __free(kfree) = NULL; struct adi_i3c_cmd *ccmd; - xfer = adi_i3c_master_alloc_xfer(master, 1); + struct adi_i3c_xfer *xfer __free(kfree) = adi_i3c_master_alloc_xfer(master, 1); if (!xfer) return -ENOMEM; @@ -365,19 +364,18 @@ static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, return 0; } -static int adi_i3c_master_priv_xfers(struct i3c_dev_desc *dev, - struct i3c_priv_xfer *xfers, - int nxfers) +static int adi_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, + struct i3c_xfer *xfers, + int nxfers, enum i3c_xfer_mode mode) { struct i3c_master_controller *m = i3c_dev_get_master(dev); struct adi_i3c_master *master = to_adi_i3c_master(m); - struct adi_i3c_xfer *xfer __free(kfree) = NULL; int i, ret; if (!nxfers) return 0; - xfer = adi_i3c_master_alloc_xfer(master, nxfers); + struct adi_i3c_xfer *xfer __free(kfree) = adi_i3c_master_alloc_xfer(master, nxfers); if (!xfer) return -ENOMEM; @@ -777,7 +775,6 @@ static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, { struct i3c_master_controller *m = i2c_dev_get_master(dev); struct adi_i3c_master *master = to_adi_i3c_master(m); - struct adi_i3c_xfer *xfer __free(kfree) = NULL; int i; if (!nxfers) @@ -786,7 +783,8 @@ static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev, if (xfers[i].flags & I2C_M_TEN) return -EOPNOTSUPP; } - xfer = adi_i3c_master_alloc_xfer(master, nxfers); + + struct adi_i3c_xfer *xfer __free(kfree) = adi_i3c_master_alloc_xfer(master, nxfers); if (!xfer) return -ENOMEM; @@ -919,7 +917,7 @@ static const struct i3c_master_controller_ops adi_i3c_master_ops = { .do_daa = adi_i3c_master_do_daa, .supports_ccc_cmd = adi_i3c_master_supports_ccc_cmd, .send_ccc_cmd = adi_i3c_master_send_ccc_cmd, - .priv_xfers = adi_i3c_master_priv_xfers, + .i3c_xfers = adi_i3c_master_i3c_xfers, .i2c_xfers = adi_i3c_master_i2c_xfers, .request_ibi = adi_i3c_master_request_ibi, .enable_ibi = adi_i3c_master_enable_ibi, diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 276592a8222e7b..889e2ed5bc8303 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -902,9 +902,9 @@ rpm_out: return ret; } -static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev, - struct i3c_priv_xfer *i3c_xfers, - int i3c_nxfers) +static int dw_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, + struct i3c_xfer *i3c_xfers, + int i3c_nxfers, enum i3c_xfer_mode mode) { struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); struct i3c_master_controller *m = i3c_dev_get_master(dev); @@ -1498,7 +1498,7 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { .do_daa = dw_i3c_master_daa, .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd, .send_ccc_cmd = dw_i3c_master_send_ccc_cmd, - .priv_xfers = dw_i3c_master_priv_xfers, + .i3c_xfers = dw_i3c_master_i3c_xfers, .attach_i2c_dev = dw_i3c_master_attach_i2c_dev, .detach_i2c_dev = dw_i3c_master_detach_i2c_dev, .i2c_xfers = dw_i3c_master_i2c_xfers, diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 97b151564d3d31..8eb76b8ca2b00a 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -720,9 +720,9 @@ static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m, return ret; } -static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev, - struct i3c_priv_xfer *xfers, - int nxfers) +static int cdns_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, + struct i3c_xfer *xfers, + int nxfers, enum i3c_xfer_mode mode) { struct i3c_master_controller *m = i3c_dev_get_master(dev); struct cdns_i3c_master *master = to_cdns_i3c_master(m); @@ -1519,7 +1519,7 @@ static const struct i3c_master_controller_ops cdns_i3c_master_ops = { .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev, .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd, .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd, - .priv_xfers = cdns_i3c_master_priv_xfers, + .i3c_xfers = cdns_i3c_master_i3c_xfers, .i2c_xfers = cdns_i3c_master_i2c_xfers, .enable_ibi = cdns_i3c_master_enable_ibi, .disable_ibi = cdns_i3c_master_disable_ibi, diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 47e42cb4dbe71e..607d77ab0e5469 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -266,9 +266,9 @@ static int i3c_hci_daa(struct i3c_master_controller *m) return hci->cmd->perform_daa(hci); } -static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev, - struct i3c_priv_xfer *i3c_xfers, - int nxfers) +static int i3c_hci_i3c_xfers(struct i3c_dev_desc *dev, + struct i3c_xfer *i3c_xfers, int nxfers, + enum i3c_xfer_mode mode) { struct i3c_master_controller *m = i3c_dev_get_master(dev); struct i3c_hci *hci = to_i3c_hci(m); @@ -515,7 +515,7 @@ static const struct i3c_master_controller_ops i3c_hci_ops = { .bus_cleanup = i3c_hci_bus_cleanup, .do_daa = i3c_hci_daa, .send_ccc_cmd = i3c_hci_send_ccc_cmd, - .priv_xfers = i3c_hci_priv_xfers, + .i3c_xfers = i3c_hci_i3c_xfers, .i2c_xfers = i3c_hci_i2c_xfers, .attach_i3c_dev = i3c_hci_attach_i3c_dev, .reattach_i3c_dev = i3c_hci_reattach_i3c_dev, diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c index 275f7b9242886e..426a418f29b612 100644 --- a/drivers/i3c/master/renesas-i3c.c +++ b/drivers/i3c/master/renesas-i3c.c @@ -794,8 +794,8 @@ static int renesas_i3c_send_ccc_cmd(struct i3c_master_controller *m, return ret; } -static int renesas_i3c_priv_xfers(struct i3c_dev_desc *dev, struct i3c_priv_xfer *i3c_xfers, - int i3c_nxfers) +static int renesas_i3c_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *i3c_xfers, + int i3c_nxfers, enum i3c_xfer_mode mode) { struct i3c_master_controller *m = i3c_dev_get_master(dev); struct renesas_i3c *i3c = to_renesas_i3c(m); @@ -1282,7 +1282,7 @@ static const struct i3c_master_controller_ops renesas_i3c_ops = { .do_daa = renesas_i3c_daa, .supports_ccc_cmd = renesas_i3c_supports_ccc_cmd, .send_ccc_cmd = renesas_i3c_send_ccc_cmd, - .priv_xfers = renesas_i3c_priv_xfers, + .i3c_xfers = renesas_i3c_i3c_xfers, .attach_i2c_dev = renesas_i3c_attach_i2c_dev, .detach_i2c_dev = renesas_i3c_detach_i2c_dev, .i2c_xfers = renesas_i3c_i2c_xfers, diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c index bf52dc8345f5ff..ba556c008cf32d 100644 --- a/drivers/irqchip/irq-loongarch-avec.c +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -209,8 +209,9 @@ static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) struct avecintc_data *adata = irq_data_get_irq_chip_data(d); msg->address_hi = 0x0; - msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) - | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); + msg->address_lo = (loongarch_avec.msi_base_addr | + (adata->vec & AVEC_IRQ_MASK) << AVEC_IRQ_SHIFT) | + ((cpu_logical_map(adata->cpu & AVEC_CPU_MASK)) << AVEC_CPU_SHIFT); msg->data = 0x0; } diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c index 2474fa467a059e..31093a8ab67c3a 100644 --- a/drivers/irqchip/irq-mchp-eic.c +++ b/drivers/irqchip/irq-mchp-eic.c @@ -170,7 +170,7 @@ static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq, ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); if (ret || hwirq >= MCHP_EIC_NIRQ) - return ret; + return ret ?: -EINVAL; switch (type) { case IRQ_TYPE_EDGE_RISING: diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 104aa53550905d..239c1744a9268e 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -299,6 +299,7 @@ config DM_CRYPT select CRYPTO select CRYPTO_CBC select CRYPTO_ESSIV + select CRYPTO_LIB_MD5 # needed by lmk IV mode help This device-mapper target allows you to create a device that transparently encrypts the data on it. You'll need to activate @@ -546,6 +547,7 @@ config DM_VERITY depends on BLK_DEV_DM select CRYPTO select CRYPTO_HASH + select CRYPTO_LIB_SHA256 select DM_BUFIO help This device-mapper target creates a read-only device that diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index af345dc6fde14f..82fdea7dea7aac 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1104,7 +1104,7 @@ static void detached_dev_end_io(struct bio *bio) } kfree(ddip); - bio->bi_end_io(bio); + bio_endio(bio); } static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, @@ -1121,7 +1121,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); if (!ddip) { bio->bi_status = BLK_STS_RESOURCE; - bio->bi_end_io(bio); + bio_endio(bio); return; } @@ -1136,7 +1136,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, if ((bio_op(bio) == REQ_OP_DISCARD) && !bdev_max_discard_sectors(dc->bdev)) - bio->bi_end_io(bio); + detached_dev_end_io(bio); else submit_bio_noacct(bio); } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index e6d28be11c5c6c..5235f3e4924b7a 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1374,7 +1374,7 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio { unsigned int n_sectors; sector_t sector; - unsigned int offset, end; + unsigned int offset, end, align; b->end_io = end_io; @@ -1388,9 +1388,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio b->c->write_callback(b); offset = b->write_start; end = b->write_end; - offset &= -DM_BUFIO_WRITE_ALIGN; - end += DM_BUFIO_WRITE_ALIGN - 1; - end &= -DM_BUFIO_WRITE_ALIGN; + align = max(DM_BUFIO_WRITE_ALIGN, + bdev_physical_block_size(b->c->bdev)); + offset &= -align; + end += align - 1; + end &= -align; if (unlikely(end > b->c->block_size)) end = b->c->block_size; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index a3c9f74fe2dc47..1cda8618d74d2c 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -139,7 +139,6 @@ struct mapped_device { struct srcu_struct io_barrier; #ifdef CONFIG_BLK_DEV_ZONED - unsigned int nr_zones; void *zone_revalidate_map; struct task_struct *revalidate_map_task; #endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5ef43231fe77f7..79704fbc523b5a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -21,6 +21,7 @@ #include <linux/mempool.h> #include <linux/slab.h> #include <linux/crypto.h> +#include <linux/fips.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/backing-dev.h> @@ -120,7 +121,6 @@ struct iv_benbi_private { #define LMK_SEED_SIZE 64 /* hash + 0 */ struct iv_lmk_private { - struct crypto_shash *hash_tfm; u8 *seed; }; @@ -254,22 +254,15 @@ static unsigned int max_write_size = 0; module_param(max_write_size, uint, 0644); MODULE_PARM_DESC(max_write_size, "Maximum size of a write request"); -static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio) +static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split) { struct crypt_config *cc = ti->private; unsigned val, sector_align; bool wrt = op_is_write(bio_op(bio)); - if (wrt) { - /* - * For zoned devices, splitting write operations creates the - * risk of deadlocking queue freeze operations with zone write - * plugging BIO work when the reminder of a split BIO is - * issued. So always allow the entire BIO to proceed. - */ - if (ti->emulate_zone_append) - return bio_sectors(bio); - + if (no_split) { + val = -1; + } else if (wrt) { val = min_not_zero(READ_ONCE(max_write_size), DM_CRYPT_DEFAULT_MAX_WRITE_SIZE); } else { @@ -465,10 +458,6 @@ static void crypt_iv_lmk_dtr(struct crypt_config *cc) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) - crypto_free_shash(lmk->hash_tfm); - lmk->hash_tfm = NULL; - kfree_sensitive(lmk->seed); lmk->seed = NULL; } @@ -483,11 +472,10 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - lmk->hash_tfm = crypto_alloc_shash("md5", 0, - CRYPTO_ALG_ALLOCATES_MEMORY); - if (IS_ERR(lmk->hash_tfm)) { - ti->error = "Error initializing LMK hash"; - return PTR_ERR(lmk->hash_tfm); + if (fips_enabled) { + ti->error = "LMK support is disabled due to FIPS"; + /* ... because it uses MD5. */ + return -EINVAL; } /* No seed in LMK version 2 */ @@ -498,7 +486,6 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); if (!lmk->seed) { - crypt_iv_lmk_dtr(cc); ti->error = "Error kmallocing seed storage in LMK"; return -ENOMEM; } @@ -514,7 +501,7 @@ static int crypt_iv_lmk_init(struct crypt_config *cc) /* LMK seed is on the position of LMK_KEYS + 1 key */ if (lmk->seed) memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), - crypto_shash_digestsize(lmk->hash_tfm)); + MD5_DIGEST_SIZE); return 0; } @@ -529,55 +516,31 @@ static int crypt_iv_lmk_wipe(struct crypt_config *cc) return 0; } -static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq, - u8 *data) +static void crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq, u8 *data) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); - union { - struct md5_state md5state; - u8 state[CRYPTO_MD5_STATESIZE]; - } u; + struct md5_ctx ctx; __le32 buf[4]; - int i, r; - desc->tfm = lmk->hash_tfm; + md5_init(&ctx); - r = crypto_shash_init(desc); - if (r) - return r; - - if (lmk->seed) { - r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); - if (r) - return r; - } + if (lmk->seed) + md5_update(&ctx, lmk->seed, LMK_SEED_SIZE); /* Sector is always 512B, block size 16, add data of blocks 1-31 */ - r = crypto_shash_update(desc, data + 16, 16 * 31); - if (r) - return r; + md5_update(&ctx, data + 16, 16 * 31); /* Sector is cropped to 56 bits here */ buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); buf[2] = cpu_to_le32(4024); buf[3] = 0; - r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); - if (r) - return r; + md5_update(&ctx, (u8 *)buf, sizeof(buf)); /* No MD5 padding here */ - r = crypto_shash_export(desc, &u.md5state); - if (r) - return r; - - for (i = 0; i < MD5_HASH_WORDS; i++) - __cpu_to_le32s(&u.md5state.hash[i]); - memcpy(iv, &u.md5state.hash, cc->iv_size); - - return 0; + cpu_to_le32_array(ctx.state.h, ARRAY_SIZE(ctx.state.h)); + memcpy(iv, ctx.state.h, cc->iv_size); } static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, @@ -585,17 +548,15 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, { struct scatterlist *sg; u8 *src; - int r = 0; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); src = kmap_local_page(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); + crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); kunmap_local(src); } else memset(iv, 0, cc->iv_size); - - return r; + return 0; } static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, @@ -603,21 +564,19 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, { struct scatterlist *sg; u8 *dst; - int r; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) return 0; sg = crypt_get_sg_data(cc, dmreq->sg_out); dst = kmap_local_page(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); + crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); /* Tweak the first block of plaintext sector */ - if (!r) - crypto_xor(dst + sg->offset, iv, cc->iv_size); + crypto_xor(dst + sg->offset, iv, cc->iv_size); kunmap_local(dst); - return r; + return 0; } static void crypt_iv_tcw_dtr(struct crypt_config *cc) @@ -1781,7 +1740,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) bio_for_each_folio_all(fi, clone) { if (folio_test_large(fi.folio)) { percpu_counter_sub(&cc->n_allocated_pages, - 1 << folio_order(fi.folio)); + folio_nr_pages(fi.folio)); folio_put(fi.folio); } else { mempool_free(&fi.folio->page, &cc->page_pool); @@ -3496,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct dm_crypt_io *io; struct crypt_config *cc = ti->private; unsigned max_sectors; + bool no_split; /* * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. @@ -3513,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. + * + * For zoned devices, splitting write operations creates the + * risk of deadlocking queue freeze operations with zone write + * plugging BIO work when the reminder of a split BIO is + * issued. So always allow the entire BIO to proceed. */ - max_sectors = get_max_request_sectors(ti, bio); - if (unlikely(bio_sectors(bio) > max_sectors)) + no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) || + (bio->bi_opf & REQ_ATOMIC); + max_sectors = get_max_request_sectors(ti, bio, no_split); + if (unlikely(bio_sectors(bio) > max_sectors)) { + if (unlikely(no_split)) + return DM_MAPIO_KILL; dm_accept_partial_bio(bio, max_sectors); + } /* * Ensure that bio is a multiple of internal sector encryption size @@ -3762,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) if (ti->emulate_zone_append) limits->max_hw_sectors = min(limits->max_hw_sectors, BIO_MAX_VECS << PAGE_SECTORS_SHIFT); + + limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max, + BIO_MAX_VECS << PAGE_SHIFT); + limits->atomic_write_hw_max = min(limits->atomic_write_hw_max, + BIO_MAX_VECS << PAGE_SHIFT); } static struct target_type crypt_target = { .name = "crypt", - .version = {1, 28, 0}, + .version = {1, 29, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, - .features = DM_TARGET_ZONED_HM, + .features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES, .report_zones = crypt_report_zones, .map = crypt_map, .status = crypt_status, diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 6abb31ca966231..b354e74a670e0f 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -103,7 +103,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, } else { flush_dcache_page(bv->bv_page); memcpy(ba, pa, cur_len); - dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len); + dm_bufio_mark_buffer_dirty(b); } dm_bufio_release(b); diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index b6797663753818..061b4d3108132c 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -29,7 +29,7 @@ typedef sector_t chunk_t; * chunk within the device. */ struct dm_exception { - struct hlist_bl_node hash_list; + struct hlist_node hash_list; chunk_t old_chunk; chunk_t new_chunk; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 7bb7174f8f4f87..f0c84e7a5daa63 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -432,6 +432,7 @@ static int log_writes_kthread(void *arg) struct log_writes_c *lc = arg; sector_t sector = 0; + set_freezable(); while (!kthread_should_stop()) { bool super = false; bool logging_enabled; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaf4a0a4b0ebb6..d5d6ef7ba8381a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -131,7 +131,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t); #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */ #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */ #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */ -#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */ +/* MPATHF_RETAIN_ATTACHED_HW_HANDLER no longer has any effect */ #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ @@ -237,16 +237,10 @@ static struct multipath *alloc_multipath(struct dm_target *ti) static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) { - if (m->queue_mode == DM_TYPE_NONE) { + if (m->queue_mode == DM_TYPE_NONE) m->queue_mode = DM_TYPE_REQUEST_BASED; - } else if (m->queue_mode == DM_TYPE_BIO_BASED) { + else if (m->queue_mode == DM_TYPE_BIO_BASED) INIT_WORK(&m->process_queued_bios, process_queued_bios); - /* - * bio-based doesn't support any direct scsi_dh management; - * it just discovers if a scsi_dh is attached. - */ - set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); - } dm_table_set_type(ti->table, m->queue_mode); @@ -887,36 +881,30 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, struct request_queue *q = bdev_get_queue(bdev); int r; - if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) { -retain: - if (*attached_handler_name) { - /* - * Clear any hw_handler_params associated with a - * handler that isn't already attached. - */ - if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) { - kfree(m->hw_handler_params); - m->hw_handler_params = NULL; - } - - /* - * Reset hw_handler_name to match the attached handler - * - * NB. This modifies the table line to show the actual - * handler instead of the original table passed in. - */ - kfree(m->hw_handler_name); - m->hw_handler_name = *attached_handler_name; - *attached_handler_name = NULL; + if (*attached_handler_name) { + /* + * Clear any hw_handler_params associated with a + * handler that isn't already attached. + */ + if (m->hw_handler_name && strcmp(*attached_handler_name, + m->hw_handler_name)) { + kfree(m->hw_handler_params); + m->hw_handler_params = NULL; } + + /* + * Reset hw_handler_name to match the attached handler + * + * NB. This modifies the table line to show the actual + * handler instead of the original table passed in. + */ + kfree(m->hw_handler_name); + m->hw_handler_name = *attached_handler_name; + *attached_handler_name = NULL; } if (m->hw_handler_name) { r = scsi_dh_attach(q, m->hw_handler_name); - if (r == -EBUSY) { - DMINFO("retaining handler on device %pg", bdev); - goto retain; - } if (r < 0) { *error = "error attaching hardware handler"; return r; @@ -962,6 +950,19 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps q = bdev_get_queue(p->path.dev->bdev); attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); + if (IS_ERR(attached_handler_name)) { + if (PTR_ERR(attached_handler_name) == -ENODEV) { + if (m->hw_handler_name) { + DMERR("hardware handlers are only allowed for SCSI devices"); + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; + } + attached_handler_name = NULL; + } else { + r = PTR_ERR(attached_handler_name); + goto bad; + } + } if (attached_handler_name || m->hw_handler_name) { INIT_DELAYED_WORK(&p->activate_path, activate_path_work); r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); @@ -1138,7 +1139,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) } if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { - set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); + /* no longer has any effect */ continue; } @@ -1823,7 +1824,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + (m->pg_init_retries > 0) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + - test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) @@ -1832,8 +1832,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("pg_init_retries %u ", m->pg_init_retries); if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); - if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) - DMEMIT("retain_attached_hw_handler "); if (m->queue_mode != DM_TYPE_REQUEST_BASED) { switch (m->queue_mode) { case DM_TYPE_BIO_BASED: @@ -2307,7 +2305,7 @@ static struct target_type multipath_target = { .name = "multipath", .version = {1, 15, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | - DM_TARGET_PASSES_INTEGRITY, + DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ATOMIC_WRITES, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c index 698697a7a73c66..534bf07b794f14 100644 --- a/drivers/md/dm-pcache/cache.c +++ b/drivers/md/dm-pcache/cache.c @@ -10,7 +10,8 @@ struct kmem_cache *key_cache; static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache) { - return cache->cache_info_addr + cache->info_index; + return (struct pcache_cache_info *)((char *)cache->cache_info_addr + + (size_t)cache->info_index * PCACHE_CACHE_INFO_SIZE); } static void cache_info_write(struct pcache_cache *cache) @@ -21,10 +22,10 @@ static void cache_info_write(struct pcache_cache *cache) cache_info->header.crc = pcache_meta_crc(&cache_info->header, sizeof(struct pcache_cache_info)); + cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX; memcpy_flushcache(get_cache_info_addr(cache), cache_info, sizeof(struct pcache_cache_info)); - - cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX; + pmem_wmb(); } static void cache_info_init_default(struct pcache_cache *cache); @@ -49,6 +50,8 @@ static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_optio return -EINVAL; } + cache->info_index = ((char *)cache_info_addr - (char *)cache->cache_info_addr) / PCACHE_CACHE_INFO_SIZE; + return 0; } @@ -93,10 +96,10 @@ void cache_pos_encode(struct pcache_cache *cache, pos_onmedia.header.seq = seq; pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia); + *index = (*index + 1) % PCACHE_META_INDEX_MAX; + memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia)); pmem_wmb(); - - *index = (*index + 1) % PCACHE_META_INDEX_MAX; } int cache_pos_decode(struct pcache_cache *cache, diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c index f0b58980806e07..9d92e2b067ed04 100644 --- a/drivers/md/dm-pcache/cache_segment.c +++ b/drivers/md/dm-pcache/cache_segment.c @@ -26,11 +26,11 @@ static void cache_seg_info_write(struct pcache_cache_segment *cache_seg) seg_info->header.seq++; seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info)); + cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX; + seg_info_addr = get_seg_info_addr(cache_seg); memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info)); pmem_wmb(); - - cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX; mutex_unlock(&cache_seg->info_lock); } @@ -56,7 +56,10 @@ static int cache_seg_info_load(struct pcache_cache_segment *cache_seg) ret = -EIO; goto out; } - cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base; + + cache_seg->info_index = + ((char *)cache_seg_info_addr - (char *)cache_seg_info_addr_base) / + PCACHE_SEG_INFO_SIZE; out: mutex_unlock(&cache_seg->info_lock); @@ -129,10 +132,10 @@ static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg) cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header, sizeof(struct pcache_cache_seg_gen)); + cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX; + memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen)); pmem_wmb(); - - cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX; } static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c6f7129e43d347..4bacdc499984b5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2287,6 +2287,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) mddev->reshape_position = le64_to_cpu(sb->reshape_position); rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); + if (!rs->raid_type) + return -EINVAL; } } else { diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f40c18da400007..dbd148967de425 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -40,10 +40,15 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) +struct dm_hlist_head { + struct hlist_head head; + spinlock_t lock; +}; + struct dm_exception_table { uint32_t hash_mask; unsigned int hash_shift; - struct hlist_bl_head *table; + struct dm_hlist_head *table; }; struct dm_snapshot { @@ -628,8 +633,8 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk); /* Lock to protect access to the completed and pending exception hash tables. */ struct dm_exception_table_lock { - struct hlist_bl_head *complete_slot; - struct hlist_bl_head *pending_slot; + spinlock_t *complete_slot; + spinlock_t *pending_slot; }; static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, @@ -638,20 +643,20 @@ static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, struct dm_exception_table *complete = &s->complete; struct dm_exception_table *pending = &s->pending; - lock->complete_slot = &complete->table[exception_hash(complete, chunk)]; - lock->pending_slot = &pending->table[exception_hash(pending, chunk)]; + lock->complete_slot = &complete->table[exception_hash(complete, chunk)].lock; + lock->pending_slot = &pending->table[exception_hash(pending, chunk)].lock; } static void dm_exception_table_lock(struct dm_exception_table_lock *lock) { - hlist_bl_lock(lock->complete_slot); - hlist_bl_lock(lock->pending_slot); + spin_lock_nested(lock->complete_slot, 1); + spin_lock_nested(lock->pending_slot, 2); } static void dm_exception_table_unlock(struct dm_exception_table_lock *lock) { - hlist_bl_unlock(lock->pending_slot); - hlist_bl_unlock(lock->complete_slot); + spin_unlock(lock->pending_slot); + spin_unlock(lock->complete_slot); } static int dm_exception_table_init(struct dm_exception_table *et, @@ -661,13 +666,15 @@ static int dm_exception_table_init(struct dm_exception_table *et, et->hash_shift = hash_shift; et->hash_mask = size - 1; - et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head), + et->table = kvmalloc_array(size, sizeof(struct dm_hlist_head), GFP_KERNEL); if (!et->table) return -ENOMEM; - for (i = 0; i < size; i++) - INIT_HLIST_BL_HEAD(et->table + i); + for (i = 0; i < size; i++) { + INIT_HLIST_HEAD(&et->table[i].head); + spin_lock_init(&et->table[i].lock); + } return 0; } @@ -675,16 +682,17 @@ static int dm_exception_table_init(struct dm_exception_table *et, static void dm_exception_table_exit(struct dm_exception_table *et, struct kmem_cache *mem) { - struct hlist_bl_head *slot; + struct dm_hlist_head *slot; struct dm_exception *ex; - struct hlist_bl_node *pos, *n; + struct hlist_node *pos; int i, size; size = et->hash_mask + 1; for (i = 0; i < size; i++) { slot = et->table + i; - hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) { + hlist_for_each_entry_safe(ex, pos, &slot->head, hash_list) { + hlist_del(&ex->hash_list); kmem_cache_free(mem, ex); cond_resched(); } @@ -700,7 +708,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) static void dm_remove_exception(struct dm_exception *e) { - hlist_bl_del(&e->hash_list); + hlist_del(&e->hash_list); } /* @@ -710,12 +718,11 @@ static void dm_remove_exception(struct dm_exception *e) static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, chunk_t chunk) { - struct hlist_bl_head *slot; - struct hlist_bl_node *pos; + struct hlist_head *slot; struct dm_exception *e; - slot = &et->table[exception_hash(et, chunk)]; - hlist_bl_for_each_entry(e, pos, slot, hash_list) + slot = &et->table[exception_hash(et, chunk)].head; + hlist_for_each_entry(e, slot, hash_list) if (chunk >= e->old_chunk && chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) return e; @@ -762,18 +769,17 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) static void dm_insert_exception(struct dm_exception_table *eh, struct dm_exception *new_e) { - struct hlist_bl_head *l; - struct hlist_bl_node *pos; + struct hlist_head *l; struct dm_exception *e = NULL; - l = &eh->table[exception_hash(eh, new_e->old_chunk)]; + l = &eh->table[exception_hash(eh, new_e->old_chunk)].head; /* Add immediately if this table doesn't support consecutive chunks */ if (!eh->hash_shift) goto out; /* List is ordered by old_chunk */ - hlist_bl_for_each_entry(e, pos, l, hash_list) { + hlist_for_each_entry(e, l, hash_list) { /* Insert after an existing chunk? */ if (new_e->old_chunk == (e->old_chunk + dm_consecutive_chunk_count(e) + 1) && @@ -804,13 +810,13 @@ out: * Either the table doesn't support consecutive chunks or slot * l is empty. */ - hlist_bl_add_head(&new_e->hash_list, l); + hlist_add_head(&new_e->hash_list, l); } else if (new_e->old_chunk < e->old_chunk) { /* Add before an existing exception */ - hlist_bl_add_before(&new_e->hash_list, &e->hash_list); + hlist_add_before(&new_e->hash_list, &e->hash_list); } else { /* Add to l's tail: e is the last exception in this slot */ - hlist_bl_add_behind(&new_e->hash_list, &e->hash_list); + hlist_add_behind(&new_e->hash_list, &e->hash_list); } } @@ -820,7 +826,6 @@ out: */ static int dm_add_exception(void *context, chunk_t old, chunk_t new) { - struct dm_exception_table_lock lock; struct dm_snapshot *s = context; struct dm_exception *e; @@ -833,17 +838,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) /* Consecutive_count is implicitly initialised to zero */ e->new_chunk = new; - /* - * Although there is no need to lock access to the exception tables - * here, if we don't then hlist_bl_add_head(), called by - * dm_insert_exception(), will complain about accessing the - * corresponding list without locking it first. - */ - dm_exception_table_lock_init(s, old, &lock); - - dm_exception_table_lock(&lock); dm_insert_exception(&s->complete, e); - dm_exception_table_unlock(&lock); return 0; } @@ -873,7 +868,7 @@ static int calc_max_buckets(void) /* use a fixed size of 2MB */ unsigned long mem = 2 * 1024 * 1024; - mem /= sizeof(struct hlist_bl_head); + mem /= sizeof(struct dm_hlist_head); return mem; } diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index bfaef27ca79f4a..22bc70923a8392 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -86,17 +86,13 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { - sprintf(buf, "%d\n", dm_suspended_md(md)); - - return strlen(buf); + return sysfs_emit(buf, "%d\n", dm_suspended_md(md)); } static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) { /* Purely for userspace compatibility */ - sprintf(buf, "%d\n", true); - - return strlen(buf); + return sysfs_emit(buf, "%d\n", true); } static DM_ATTR_RO(name); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ad0a60a07b9355..0522cd700e0e2d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -2043,6 +2043,10 @@ bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size, return true; } +/* + * This function will be skipped by noflush reloads of immutable request + * based devices (dm-mpath). + */ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c84149ba4e38e9..52ffb495f5a8e6 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -395,13 +395,13 @@ static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio * op->bio = NULL; } -static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) +static void issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) { struct thin_c *tc = op->tc; sector_t s = block_to_sectors(tc->pool, data_b); sector_t len = block_to_sectors(tc->pool, data_e - data_b); - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); + __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); } static void end_discard(struct discard_op *op, int r) @@ -1113,9 +1113,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m break; } - r = issue_discard(&op, b, e); - if (r) - goto out; + issue_discard(&op, b, e); b = e; } @@ -1188,8 +1186,8 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) struct discard_op op; begin_discard(&op, tc, discard_parent); - r = issue_discard(&op, m->data_block, data_end); - end_discard(&op, r); + issue_discard(&op, m->data_block, data_end); + end_discard(&op, 0); } } @@ -4383,11 +4381,8 @@ static void thin_postsuspend(struct dm_target *ti) { struct thin_c *tc = ti->private; - /* - * The dm_noflush_suspending flag has been cleared by now, so - * unfortunately we must always run this. - */ - noflush_work(tc, do_noflush_stop); + if (dm_noflush_suspending(ti)) + noflush_work(tc, do_noflush_stop); } static int thin_preresume(struct dm_target *ti) diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c index a0e5e7077d1386..e3bba0b28aad87 100644 --- a/drivers/md/dm-vdo/action-manager.c +++ b/drivers/md/dm-vdo/action-manager.c @@ -43,7 +43,7 @@ struct action { * @actions: The two action slots. * @current_action: The current action slot. * @zones: The number of zones in which an action is to be applied. - * @Scheduler: A function to schedule a default next action. + * @scheduler: A function to schedule a default next action. * @get_zone_thread_id: A function to get the id of the thread on which to apply an action to a * zone. * @initiator_thread_id: The ID of the thread on which actions may be initiated. diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c index 3f9dba525154c4..da153fef085e68 100644 --- a/drivers/md/dm-vdo/admin-state.c +++ b/drivers/md/dm-vdo/admin-state.c @@ -149,7 +149,8 @@ const struct admin_state_code *VDO_ADMIN_STATE_RESUMING = &VDO_CODE_RESUMING; /** * get_next_state() - Determine the state which should be set after a given operation completes * based on the operation and the current state. - * @operation The operation to be started. + * @state: The current admin state. + * @operation: The operation to be started. * * Return: The state to set when the operation completes or NULL if the operation can not be * started in the current state. @@ -187,6 +188,8 @@ static const struct admin_state_code *get_next_state(const struct admin_state *s /** * vdo_finish_operation() - Finish the current operation. + * @state: The current admin state. + * @result: The result of the operation. * * Will notify the operation waiter if there is one. This method should be used for operations * started with vdo_start_operation(). For operations which were started with vdo_start_draining(), @@ -214,8 +217,10 @@ bool vdo_finish_operation(struct admin_state *state, int result) /** * begin_operation() - Begin an operation if it may be started given the current state. - * @waiter A completion to notify when the operation is complete; may be NULL. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to be started. + * @waiter: A completion to notify when the operation is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: VDO_SUCCESS or an error. */ @@ -259,8 +264,10 @@ static int __must_check begin_operation(struct admin_state *state, /** * start_operation() - Start an operation if it may be started given the current state. - * @waiter A completion to notify when the operation is complete. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to be started. + * @waiter: A completion to notify when the operation is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the operation was started. */ @@ -274,10 +281,10 @@ static inline bool __must_check start_operation(struct admin_state *state, /** * check_code() - Check the result of a state validation. - * @valid true if the code is of an appropriate type. - * @code The code which failed to be of the correct type. - * @what What the code failed to be, for logging. - * @waiter The completion to notify of the error; may be NULL. + * @valid: True if the code is of an appropriate type. + * @code: The code which failed to be of the correct type. + * @what: What the code failed to be, for logging. + * @waiter: The completion to notify of the error; may be NULL. * * If the result failed, log an invalid state error and, if there is a waiter, notify it. * @@ -301,7 +308,8 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch /** * assert_vdo_drain_operation() - Check that an operation is a drain. - * @waiter The completion to finish with an error if the operation is not a drain. + * @operation: The operation to check. + * @waiter: The completion to finish with an error if the operation is not a drain. * * Return: true if the specified operation is a drain. */ @@ -313,9 +321,10 @@ static bool __must_check assert_vdo_drain_operation(const struct admin_state_cod /** * vdo_start_draining() - Initiate a drain operation if the current state permits it. - * @operation The type of drain to initiate. - * @waiter The completion to notify when the drain is complete. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of drain to initiate. + * @waiter: The completion to notify when the drain is complete. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the drain was initiated, if not the waiter will be notified. */ @@ -345,6 +354,7 @@ bool vdo_start_draining(struct admin_state *state, /** * vdo_finish_draining() - Finish a drain operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was draining; will notify the waiter if so. */ @@ -355,6 +365,8 @@ bool vdo_finish_draining(struct admin_state *state) /** * vdo_finish_draining_with_result() - Finish a drain operation with a status code. + * @state: The current admin state. + * @result: The result of the drain operation. * * Return: true if the state was draining; will notify the waiter if so. */ @@ -365,7 +377,8 @@ bool vdo_finish_draining_with_result(struct admin_state *state, int result) /** * vdo_assert_load_operation() - Check that an operation is a load. - * @waiter The completion to finish with an error if the operation is not a load. + * @operation: The operation to check. + * @waiter: The completion to finish with an error if the operation is not a load. * * Return: true if the specified operation is a load. */ @@ -377,9 +390,10 @@ bool vdo_assert_load_operation(const struct admin_state_code *operation, /** * vdo_start_loading() - Initiate a load operation if the current state permits it. - * @operation The type of load to initiate. - * @waiter The completion to notify when the load is complete (may be NULL). - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of load to initiate. + * @waiter: The completion to notify when the load is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the load was initiated, if not the waiter will be notified. */ @@ -393,6 +407,7 @@ bool vdo_start_loading(struct admin_state *state, /** * vdo_finish_loading() - Finish a load operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was loading; will notify the waiter if so. */ @@ -403,7 +418,8 @@ bool vdo_finish_loading(struct admin_state *state) /** * vdo_finish_loading_with_result() - Finish a load operation with a status code. - * @result The result of the load operation. + * @state: The current admin state. + * @result: The result of the load operation. * * Return: true if the state was loading; will notify the waiter if so. */ @@ -414,7 +430,8 @@ bool vdo_finish_loading_with_result(struct admin_state *state, int result) /** * assert_vdo_resume_operation() - Check whether an admin_state_code is a resume operation. - * @waiter The completion to notify if the operation is not a resume operation; may be NULL. + * @operation: The operation to check. + * @waiter: The completion to notify if the operation is not a resume operation; may be NULL. * * Return: true if the code is a resume operation. */ @@ -427,9 +444,10 @@ static bool __must_check assert_vdo_resume_operation(const struct admin_state_co /** * vdo_start_resuming() - Initiate a resume operation if the current state permits it. - * @operation The type of resume to start. - * @waiter The completion to notify when the resume is complete (may be NULL). - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of resume to start. + * @waiter: The completion to notify when the resume is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the resume was initiated, if not the waiter will be notified. */ @@ -443,6 +461,7 @@ bool vdo_start_resuming(struct admin_state *state, /** * vdo_finish_resuming() - Finish a resume operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was resuming; will notify the waiter if so. */ @@ -453,7 +472,8 @@ bool vdo_finish_resuming(struct admin_state *state) /** * vdo_finish_resuming_with_result() - Finish a resume operation with a status code. - * @result The result of the resume operation. + * @state: The current admin state. + * @result: The result of the resume operation. * * Return: true if the state was resuming; will notify the waiter if so. */ @@ -465,6 +485,7 @@ bool vdo_finish_resuming_with_result(struct admin_state *state, int result) /** * vdo_resume_if_quiescent() - Change the state to normal operation if the current state is * quiescent. + * @state: The current admin state. * * Return: VDO_SUCCESS if the state resumed, VDO_INVALID_ADMIN_STATE otherwise. */ @@ -479,6 +500,8 @@ int vdo_resume_if_quiescent(struct admin_state *state) /** * vdo_start_operation() - Attempt to start an operation. + * @state: The current admin state. + * @operation: The operation to attempt to start. * * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not */ @@ -490,8 +513,10 @@ int vdo_start_operation(struct admin_state *state, /** * vdo_start_operation_with_waiter() - Attempt to start an operation. - * @waiter the completion to notify when the operation completes or fails to start; may be NULL. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to attempt to start. + * @waiter: The completion to notify when the operation completes or fails to start; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not */ diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index baf683cabb1bad..a7db5b41155e9a 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -174,6 +174,7 @@ static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo /** * initialize_info() - Initialize all page info structures and put them on the free list. + * @cache: The page cache. * * Return: VDO_SUCCESS or an error. */ @@ -209,6 +210,7 @@ static int initialize_info(struct vdo_page_cache *cache) /** * allocate_cache_components() - Allocate components of the cache which require their own * allocation. + * @cache: The page cache. * * The caller is responsible for all clean up on errors. * @@ -238,6 +240,8 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache) /** * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's * thread. + * @cache: The page cache. + * @function_name: The funtion name to report if the assertion fails. */ static inline void assert_on_cache_thread(struct vdo_page_cache *cache, const char *function_name) @@ -271,6 +275,7 @@ static void report_cache_pressure(struct vdo_page_cache *cache) /** * get_page_state_name() - Return the name of a page state. + * @state: The page state to describe. * * If the page state is invalid a static string is returned and the invalid state is logged. * @@ -342,6 +347,8 @@ static void update_lru(struct page_info *info) /** * set_info_state() - Set the state of a page_info and put it on the right list, adjusting * counters. + * @info: The page info to update. + * @new_state: The new state to set. */ static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state) { @@ -416,6 +423,7 @@ static int reset_page_info(struct page_info *info) /** * find_free_page() - Find a free page. + * @cache: The page cache. * * Return: A pointer to the page info structure (if found), NULL otherwise. */ @@ -433,6 +441,7 @@ static struct page_info * __must_check find_free_page(struct vdo_page_cache *cac /** * find_page() - Find the page info (if any) associated with a given pbn. + * @cache: The page cache. * @pbn: The absolute physical block number of the page. * * Return: The page info for the page if available, or NULL if not. @@ -449,6 +458,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache, /** * select_lru_page() - Determine which page is least recently used. + * @cache: The page cache. * * Picks the least recently used from among the non-busy entries at the front of each of the lru * list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely @@ -523,6 +533,8 @@ static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info /** * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result. + * @info: The loaded page info. + * @waitq: The list of waiting data_vios. * * Upon completion the waitq will be empty. * @@ -548,7 +560,9 @@ static unsigned int distribute_page_over_waitq(struct page_info *info, /** * set_persistent_error() - Set a persistent error which all requests will receive in the future. + * @cache: The page cache. * @context: A string describing what triggered the error. + * @result: The error result to set on the cache. * * Once triggered, all enqueued completions will get this error. Any future requests will result in * this error as well. @@ -581,6 +595,7 @@ static void set_persistent_error(struct vdo_page_cache *cache, const char *conte /** * validate_completed_page() - Check that a page completion which is being freed to the cache * referred to a valid page and is in a valid state. + * @completion: The page completion to check. * @writable: Whether a writable page is required. * * Return: VDO_SUCCESS if the page was valid, otherwise as error @@ -758,6 +773,8 @@ static void load_cache_page_endio(struct bio *bio) /** * launch_page_load() - Begin the process of loading a page. + * @info: The page info to launch. + * @pbn: The absolute physical block number of the page to load. * * Return: VDO_SUCCESS or an error code. */ @@ -836,6 +853,7 @@ static void save_pages(struct vdo_page_cache *cache) /** * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved. + * @info: The page info to save. * * Once in the list, a page may not be used until it has been written out. */ @@ -854,6 +872,7 @@ static void schedule_page_save(struct page_info *info) /** * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving * pages if another save is not in progress. + * @info: The page info to save. */ static void launch_page_save(struct page_info *info) { @@ -864,6 +883,7 @@ static void launch_page_save(struct page_info *info) /** * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is * requesting a given page number. + * @waiter: The page completion waiter to check. * @context: A pointer to the pbn of the desired page. * * Implements waiter_match_fn. @@ -880,6 +900,7 @@ static bool completion_needs_page(struct vdo_waiter *waiter, void *context) /** * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and * any other completions that match it in page number. + * @info: The page info to allocate a page for. */ static void allocate_free_page(struct page_info *info) { @@ -925,6 +946,7 @@ static void allocate_free_page(struct page_info *info) /** * discard_a_page() - Begin the process of discarding a page. + * @cache: The page cache. * * If no page is discardable, increments a count of deferred frees so that the next release of a * page which is no longer busy will kick off another discard cycle. This is an indication that the @@ -955,10 +977,6 @@ static void discard_a_page(struct vdo_page_cache *cache) launch_page_save(info); } -/** - * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get - * a different page. - */ static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp) { struct vdo_page_cache *cache = vdo_page_comp->cache; @@ -1132,6 +1150,7 @@ static void write_pages(struct vdo_completion *flush_completion) /** * vdo_release_page_completion() - Release a VDO Page Completion. + * @completion: The page completion to release. * * The page referenced by this completion (if any) will no longer be held busy by this completion. * If a page becomes discardable and there are completions awaiting free pages then a new round of @@ -1172,10 +1191,6 @@ void vdo_release_page_completion(struct vdo_completion *completion) } } -/** - * load_page_for_completion() - Helper function to load a page as described by a VDO Page - * Completion. - */ static void load_page_for_completion(struct page_info *info, struct vdo_page_completion *vdo_page_comp) { @@ -1319,6 +1334,7 @@ int vdo_get_cached_page(struct vdo_completion *completion, /** * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache. + * @cache: The page cache. * * There must not be any dirty pages in the cache. * @@ -1345,6 +1361,10 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache) /** * get_tree_page_by_index() - Get the tree page for a given height and page index. + * @forest: The block map forest. + * @root_index: The root index of the tree to search. + * @height: The height in the tree. + * @page_index: The page index. * * Return: The requested page. */ @@ -2211,6 +2231,7 @@ static void allocate_block_map_page(struct block_map_zone *zone, /** * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio * resides and cache that result in the data_vio. + * @data_vio: The data vio. * * All ancestors in the tree will be allocated or loaded, as needed. */ @@ -2435,6 +2456,7 @@ static void deforest(struct forest *forest, size_t first_page_segment) /** * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if * there is one. + * @map: The block map. * @entries: The number of entries the block map will hold. * * Return: VDO_SUCCESS or an error. @@ -2476,6 +2498,7 @@ static int make_forest(struct block_map *map, block_count_t entries) /** * replace_forest() - Replace a block_map's forest with the already-prepared larger forest. + * @map: The block map. */ static void replace_forest(struct block_map *map) { @@ -2492,6 +2515,7 @@ static void replace_forest(struct block_map *map) /** * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the * traversal. + * @cursor: The cursor to complete. */ static void finish_cursor(struct cursor *cursor) { @@ -2549,6 +2573,7 @@ static void traversal_endio(struct bio *bio) /** * traverse() - Traverse a single block map tree. + * @cursor: A cursor tracking traversal progress. * * This is the recursive heart of the traversal process. */ @@ -2619,6 +2644,7 @@ static void traverse(struct cursor *cursor) /** * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with * which to load pages. + * @waiter: The parent of the cursor to launch. * @context: The pooled_vio just acquired. * * Implements waiter_callback_fn. @@ -2636,6 +2662,8 @@ static void launch_cursor(struct vdo_waiter *waiter, void *context) /** * compute_boundary() - Compute the number of pages used at each level of the given root's tree. + * @map: The block map. + * @root_index: The tree root index. * * Return: The list of page counts as a boundary structure. */ @@ -2668,6 +2696,7 @@ static struct boundary compute_boundary(struct block_map *map, root_count_t root /** * vdo_traverse_forest() - Walk the entire forest of a block map. + * @map: The block map. * @callback: A function to call with the pbn of each allocated node in the forest. * @completion: The completion to notify on each traversed PBN, and when traversal completes. */ @@ -2707,6 +2736,9 @@ void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback, /** * initialize_block_map_zone() - Initialize the per-zone portions of the block map. + * @map: The block map. + * @zone_number: The zone to initialize. + * @cache_size: The total block map cache size. * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be * written out. */ @@ -3091,6 +3123,7 @@ static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable, /** * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped. + * @data_vio: The data vio. * * This indicates the block map entry for the logical block is either unmapped or corrupted. */ @@ -3104,6 +3137,8 @@ static void clear_mapped_location(struct data_vio *data_vio) /** * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a * data_vio. + * @data_vio: The data vio. + * @entry: The new mapped entry to set. * * Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any * other failure diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c index 5ad85334632d79..2f00acbb3b2b54 100644 --- a/drivers/md/dm-vdo/completion.c +++ b/drivers/md/dm-vdo/completion.c @@ -65,6 +65,8 @@ static inline void assert_incomplete(struct vdo_completion *completion) /** * vdo_set_completion_result() - Set the result of a completion. + * @completion: The completion to update. + * @result: The result to set. * * Older errors will not be masked. */ @@ -77,6 +79,7 @@ void vdo_set_completion_result(struct vdo_completion *completion, int result) /** * vdo_launch_completion_with_priority() - Run or enqueue a completion. + * @completion: The completion to launch. * @priority: The priority at which to enqueue the completion. * * If called on the correct thread (i.e. the one specified in the completion's callback_thread_id @@ -125,6 +128,8 @@ void vdo_enqueue_completion(struct vdo_completion *completion, /** * vdo_requeue_completion_if_needed() - Requeue a completion if not called on the specified thread. + * @completion: The completion to requeue. + * @callback_thread_id: The thread on which to requeue the completion. * * Return: True if the completion was requeued; callers may not access the completion in this case. */ diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 262e11581f2dc8..3333e1e5b02ee7 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -227,6 +227,7 @@ static inline u64 get_arrival_time(struct bio *bio) /** * check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios * or waiters while holding the pool's lock. + * @pool: The data_vio pool. */ static bool check_for_drain_complete_locked(struct data_vio_pool *pool) { @@ -387,6 +388,7 @@ struct data_vio_compression_status advance_data_vio_compression_stage(struct dat /** * cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed. + * @data_vio: The data_vio. * * Return: true if the data_vio is in the packer and the caller was the first caller to cancel it. */ @@ -483,6 +485,8 @@ static void attempt_logical_block_lock(struct vdo_completion *completion) /** * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the * same parent and other state and send it on its way. + * @data_vio: The data_vio to launch. + * @lbn: The logical block number. */ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn) { @@ -641,6 +645,7 @@ static void update_limiter(struct limiter *limiter) /** * schedule_releases() - Ensure that release processing is scheduled. + * @pool: The data_vio pool. * * If this call switches the state to processing, enqueue. Otherwise, some other thread has already * done so. @@ -768,6 +773,8 @@ static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *po /** * initialize_data_vio() - Allocate the components of a data_vio. + * @data_vio: The data_vio to initialize. + * @vdo: The vdo containing the data_vio. * * The caller is responsible for cleaning up the data_vio on error. * @@ -880,6 +887,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, /** * free_data_vio_pool() - Free a data_vio_pool and the data_vios in it. + * @pool: The data_vio pool to free. * * All data_vios must be returned to the pool before calling this function. */ @@ -944,6 +952,8 @@ static void wait_permit(struct limiter *limiter, struct bio *bio) /** * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it. + * @pool: The data_vio pool. + * @bio: The bio to launch. * * This will block if data_vios or discard permits are not available. */ @@ -994,6 +1004,7 @@ static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) /** * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool. + * @pool: The data_vio pool. * @completion: The completion to notify when the pool has drained. */ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion) @@ -1005,6 +1016,7 @@ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *comp /** * resume_data_vio_pool() - Resume a data_vio pool. + * @pool: The data_vio pool. * @completion: The completion to notify when the pool has resumed. */ void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion) @@ -1024,6 +1036,7 @@ static void dump_limiter(const char *name, struct limiter *limiter) /** * dump_data_vio_pool() - Dump a data_vio pool to the log. + * @pool: The data_vio pool. * @dump_vios: Whether to dump the details of each busy data_vio as well. */ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios) @@ -1114,6 +1127,7 @@ static void perform_cleanup_stage(struct data_vio *data_vio, /** * release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at * the end of processing a data_vio. + * @completion: The data_vio holding the lock. */ static void release_allocated_lock(struct vdo_completion *completion) { @@ -1194,6 +1208,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) /** * release_logical_lock() - Release the logical block lock and flush generation lock at the end of * processing a data_vio. + * @completion: The data_vio holding the lock. */ static void release_logical_lock(struct vdo_completion *completion) { @@ -1228,6 +1243,7 @@ static void clean_hash_lock(struct vdo_completion *completion) /** * finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up. + * @data_vio: The data_vio. * * If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the * pool. @@ -1342,6 +1358,7 @@ void handle_data_vio_error(struct vdo_completion *completion) /** * get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a * data_vio. + * @data_vio: The data_vio. */ const char *get_data_vio_operation_name(struct data_vio *data_vio) { @@ -1355,7 +1372,7 @@ const char *get_data_vio_operation_name(struct data_vio *data_vio) /** * data_vio_allocate_data_block() - Allocate a data block. - * + * @data_vio: The data_vio. * @write_lock_type: The type of write lock to obtain on the block. * @callback: The callback which will attempt an allocation in the current zone and continue if it * succeeds. @@ -1379,6 +1396,7 @@ void data_vio_allocate_data_block(struct data_vio *data_vio, /** * release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block. + * @data_vio: The data_vio. * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten). * * If the reference to the locked block is still provisional, it will be released as well. @@ -1399,6 +1417,7 @@ void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset) /** * uncompress_data_vio() - Uncompress the data a data_vio has just read. + * @data_vio: The data_vio. * @mapping_state: The mapping state indicating which fragment to decompress. * @buffer: The buffer to receive the uncompressed data. */ @@ -1519,6 +1538,7 @@ static void complete_zero_read(struct vdo_completion *completion) /** * read_block() - Read a block asynchronously. + * @completion: The data_vio doing the read. * * This is the callback registered in read_block_mapping(). */ @@ -1675,6 +1695,7 @@ static void journal_remapping(struct vdo_completion *completion) /** * read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write. + * @completion: The data_vio doing the read. * * Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate * journal entry referencing the removal of this LBN->PBN mapping. @@ -1704,6 +1725,7 @@ void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lo /** * pack_compressed_data() - Attempt to pack the compressed data_vio into a block. + * @completion: The data_vio. * * This is the callback registered in launch_compress_data_vio(). */ @@ -1725,6 +1747,7 @@ static void pack_compressed_data(struct vdo_completion *completion) /** * compress_data_vio() - Do the actual work of compressing the data on a CPU queue. + * @completion: The data_vio. * * This callback is registered in launch_compress_data_vio(). */ @@ -1754,6 +1777,7 @@ static void compress_data_vio(struct vdo_completion *completion) /** * launch_compress_data_vio() - Continue a write by attempting to compress the data. + * @data_vio: The data_vio. * * This is a re-entry point to vio_write used by hash locks. */ @@ -1796,7 +1820,8 @@ void launch_compress_data_vio(struct data_vio *data_vio) /** * hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record * name as set). - + * @completion: The data_vio. + * * This callback is registered in prepare_for_dedupe(). */ static void hash_data_vio(struct vdo_completion *completion) @@ -1832,6 +1857,7 @@ static void prepare_for_dedupe(struct data_vio *data_vio) /** * write_bio_finished() - This is the bio_end_io function registered in write_block() to be called * when a data_vio's write to the underlying storage has completed. + * @bio: The bio to update. */ static void write_bio_finished(struct bio *bio) { @@ -1884,6 +1910,7 @@ void write_data_vio(struct data_vio *data_vio) /** * acknowledge_write_callback() - Acknowledge a write to the requestor. + * @completion: The data_vio. * * This callback is registered in allocate_block() and continue_write_with_block_map_slot(). */ @@ -1909,6 +1936,7 @@ static void acknowledge_write_callback(struct vdo_completion *completion) /** * allocate_block() - Attempt to allocate a block in the current allocation zone. + * @completion: The data_vio. * * This callback is registered in continue_write_with_block_map_slot(). */ @@ -1941,6 +1969,7 @@ static void allocate_block(struct vdo_completion *completion) /** * handle_allocation_error() - Handle an error attempting to allocate a block. + * @completion: The data_vio. * * This error handler is registered in continue_write_with_block_map_slot(). */ @@ -1970,6 +1999,7 @@ static int assert_is_discard(struct data_vio *data_vio) /** * continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map. + * @completion: The data_vio to continue. * * This callback is registered in launch_read_data_vio(). */ diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 4d983092a15224..75a26f3f446100 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -917,6 +917,8 @@ static int __must_check acquire_lock(struct hash_zone *zone, /** * enter_forked_lock() - Bind the data_vio to a new hash lock. + * @waiter: The data_vio's waiter link. + * @context: The new hash lock. * * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits * on that lock. @@ -971,7 +973,7 @@ static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agen * path. * @lock: The hash lock. * @data_vio: The data_vio to deduplicate using the hash lock. - * @has_claim: true if the data_vio already has claimed an increment from the duplicate lock. + * @has_claim: True if the data_vio already has claimed an increment from the duplicate lock. * * If no increments are available, this will roll over to a new hash lock and launch the data_vio * as the writing agent for that lock. @@ -996,7 +998,7 @@ static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio, * true copy of their data on disk. * @lock: The hash lock. * @agent: The data_vio acting as the agent for the lock. - * @agent_is_done: true only if the agent has already written or deduplicated against its data. + * @agent_is_done: True only if the agent has already written or deduplicated against its data. * * If the agent itself needs to deduplicate, an increment for it must already have been claimed * from the duplicate lock, ensuring the hash lock will still have a data_vio holding it. @@ -2146,8 +2148,8 @@ static void start_expiration_timer(struct dedupe_context *context) /** * report_dedupe_timeouts() - Record and eventually report that some dedupe requests reached their * expiration time without getting answers, so we timed them out. - * @zones: the hash zones. - * @timeouts: the number of newly timed out requests. + * @zones: The hash zones. + * @timeouts: The number of newly timed out requests. */ static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeouts) { @@ -2509,6 +2511,8 @@ static void initiate_suspend_index(struct admin_state *state) /** * suspend_index() - Suspend the UDS index prior to draining hash zones. + * @context: Not used. + * @completion: The completion for the suspend operation. * * Implements vdo_action_preamble_fn */ @@ -2521,21 +2525,13 @@ static void suspend_index(void *context, struct vdo_completion *completion) initiate_suspend_index); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct hash_zone, state)); } -/** - * drain_hash_zone() - Drain a hash zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void drain_hash_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -2572,6 +2568,8 @@ static void launch_dedupe_state_change(struct hash_zones *zones) /** * resume_index() - Resume the UDS index prior to resuming hash zones. + * @context: Not used. + * @parent: The completion for the resume operation. * * Implements vdo_action_preamble_fn */ @@ -2602,11 +2600,7 @@ static void resume_index(void *context, struct vdo_completion *parent) vdo_finish_completion(parent); } -/** - * resume_hash_zone() - Resume a hash zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void resume_hash_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -2634,7 +2628,7 @@ void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *pare /** * get_hash_zone_statistics() - Add the statistics for this hash zone to the tally for all zones. * @zone: The hash zone to query. - * @tally: The tally + * @tally: The tally. */ static void get_hash_zone_statistics(const struct hash_zone *zone, struct hash_lock_statistics *tally) @@ -2680,8 +2674,8 @@ static void get_index_statistics(struct hash_zones *zones, /** * vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index. - * @zones: The hash zones to query - * @stats: A structure to store the statistics + * @zones: The hash zones to query. + * @stats: A structure to store the statistics. * * Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS * index @@ -2856,9 +2850,9 @@ void vdo_set_dedupe_index_min_timer_interval(unsigned int value) /** * acquire_context() - Acquire a dedupe context from a hash_zone if any are available. - * @zone: the hash zone + * @zone: The hash zone. * - * Return: A dedupe_context or NULL if none are available + * Return: A dedupe_context or NULL if none are available. */ static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone) { diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 0e04c20216825f..6af40d40f2550a 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -1144,6 +1144,7 @@ static bool vdo_uses_device(struct vdo *vdo, const void *context) /** * get_thread_id_for_phase() - Get the thread id for the current phase of the admin operation in * progress. + * @vdo: The vdo. */ static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo) { @@ -1188,9 +1189,9 @@ static struct vdo_completion *prepare_admin_completion(struct vdo *vdo, /** * advance_phase() - Increment the phase of the current admin operation and prepare the admin * completion to run on the thread for the next phase. - * @vdo: The on which an admin operation is being performed + * @vdo: The vdo on which an admin operation is being performed. * - * Return: The current phase + * Return: The current phase. */ static u32 advance_phase(struct vdo *vdo) { diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index b7cc0f41caca4b..dd59691be84099 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -432,7 +432,10 @@ static void encode_block_map_state_2_0(u8 *buffer, size_t *offset, /** * vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each * level in order to grow the forest to a new number of entries. + * @root_count: The number of block map roots. + * @old_sizes: The sizes of the old tree segments. * @entries: The new number of entries the block map must address. + * @new_sizes: The sizes of the new tree segments. * * Return: The total number of non-leaf pages required. */ @@ -462,6 +465,9 @@ block_count_t vdo_compute_new_forest_pages(root_count_t root_count, /** * encode_recovery_journal_state_7_0() - Encode the state of a recovery journal. + * @buffer: A buffer to store the encoding. + * @offset: The offset in the buffer at which to encode. + * @state: The recovery journal state to encode. * * Return: VDO_SUCCESS or an error code. */ @@ -484,6 +490,7 @@ static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset, /** * decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer. * @buffer: The buffer containing the saved state. + * @offset: The offset to start decoding from. * @state: A pointer to a recovery journal state to hold the result of a successful decode. * * Return: VDO_SUCCESS or an error code. @@ -544,6 +551,9 @@ const char *vdo_get_journal_operation_name(enum journal_operation operation) /** * encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer. + * @buffer: A buffer to store the encoding. + * @offset: The offset in the buffer at which to encode. + * @state: The slab depot state to encode. */ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, struct slab_depot_state_2_0 state) @@ -570,6 +580,9 @@ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, /** * decode_slab_depot_state_2_0() - Decode slab depot component state version 2.0 from a buffer. + * @buffer: The buffer being decoded. + * @offset: The offset to start decoding from. + * @state: A pointer to a slab depot state to hold the decoded result. * * Return: VDO_SUCCESS or an error code. */ @@ -1156,6 +1169,9 @@ static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_componen /** * decode_vdo_component() - Decode the component data for the vdo itself out of the super block. + * @buffer: The buffer being decoded. + * @offset: The offset to start decoding from. + * @component: The vdo component structure to decode into. * * Return: VDO_SUCCESS or an error. */ @@ -1290,7 +1306,7 @@ void vdo_destroy_component_states(struct vdo_component_states *states) * understand. * @buffer: The buffer being decoded. * @offset: The offset to start decoding from. - * @geometry: The vdo geometry + * @geometry: The vdo geometry. * @states: An object to hold the successfully decoded state. * * Return: VDO_SUCCESS or an error. @@ -1329,7 +1345,7 @@ static int __must_check decode_components(u8 *buffer, size_t *offset, /** * vdo_decode_component_states() - Decode the payload of a super block. * @buffer: The buffer containing the encoded super block contents. - * @geometry: The vdo geometry + * @geometry: The vdo geometry. * @states: A pointer to hold the decoded states. * * Return: VDO_SUCCESS or an error. @@ -1383,6 +1399,9 @@ int vdo_validate_component_states(struct vdo_component_states *states, /** * vdo_encode_component_states() - Encode the state of all vdo components in the super block. + * @buffer: A buffer to store the encoding. + * @offset: The offset into the buffer to start the encoding. + * @states: The component states to encode. */ static void vdo_encode_component_states(u8 *buffer, size_t *offset, const struct vdo_component_states *states) @@ -1402,6 +1421,8 @@ static void vdo_encode_component_states(u8 *buffer, size_t *offset, /** * vdo_encode_super_block() - Encode a super block into its on-disk representation. + * @buffer: A buffer to store the encoding. + * @states: The component states to encode. */ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states) { @@ -1426,6 +1447,7 @@ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states) /** * vdo_decode_super_block() - Decode a super block from its on-disk representation. + * @buffer: The buffer to decode from. */ int vdo_decode_super_block(u8 *buffer) { diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index dd4fdee2ca0c5a..82a259ef16012b 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -522,11 +522,7 @@ static void vdo_complete_flush(struct vdo_flush *flush) vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct flusher, state)); diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index 0613c82bbe8eab..8a79b33b8b0904 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -372,6 +372,13 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na /** * vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will * be distributed to them in round-robin fashion. + * @thread_name_prefix: A prefix for the thread names to identify them as a vdo thread. + * @name: A base name to identify this queue. + * @owner: The vdo_thread structure to manage this queue. + * @type: The type of queue to create. + * @thread_count: The number of actual threads handling this queue. + * @thread_privates: An array of private contexts, one for each thread; may be NULL. + * @queue_ptr: A pointer to return the new work queue. * * Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless * of the actual number of queues and threads allocated here, code outside of the queue diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 11d47770b54d21..e26d75f8366dde 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -118,6 +118,7 @@ static void send_bio_to_device(struct vio *vio, struct bio *bio) /** * vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device * is busy. This callback should be used by vios which did not attempt to merge. + * @completion: The vio to submit. */ void vdo_submit_vio(struct vdo_completion *completion) { @@ -133,7 +134,7 @@ void vdo_submit_vio(struct vdo_completion *completion) * The list will always contain at least one entry (the bio for the vio on which it is called), but * other bios may have been merged with it as well. * - * Return: bio The head of the bio list to submit. + * Return: The head of the bio list to submit. */ static struct bio *get_bio_list(struct vio *vio) { @@ -158,6 +159,7 @@ static struct bio *get_bio_list(struct vio *vio) /** * submit_data_vio() - Submit a data_vio's bio to the storage below along with * any bios that have been merged with it. + * @completion: The vio to submit. * * Context: This call may block and so should only be called from a bio thread. */ @@ -184,7 +186,7 @@ static void submit_data_vio(struct vdo_completion *completion) * There are two types of merging possible, forward and backward, which are distinguished by a flag * that uses kernel elevator terminology. * - * Return: the vio to merge to, NULL if no merging is possible. + * Return: The vio to merge to, NULL if no merging is possible. */ static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio, bool back_merge) @@ -262,7 +264,7 @@ static int merge_to_next_head(struct int_map *bio_map, struct vio *vio, * * Currently this is only used for data_vios, but is broken out for future use with metadata vios. * - * Return: whether or not the vio was merged. + * Return: Whether or not the vio was merged. */ static bool try_bio_map_merge(struct vio *vio) { @@ -306,7 +308,7 @@ static bool try_bio_map_merge(struct vio *vio) /** * vdo_submit_data_vio() - Submit I/O for a data_vio. - * @data_vio: the data_vio for which to issue I/O. + * @data_vio: The data_vio for which to issue I/O. * * If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to * the appropriate bio zone directly. @@ -321,13 +323,13 @@ void vdo_submit_data_vio(struct data_vio *data_vio) /** * __submit_metadata_vio() - Submit I/O for a metadata vio. - * @vio: the vio for which to issue I/O - * @physical: the physical block number to read or write - * @callback: the bio endio function which will be called after the I/O completes - * @error_handler: the handler for submission or I/O errors (may be NULL) - * @operation: the type of I/O to perform - * @data: the buffer to read or write (may be NULL) - * @size: the I/O amount in bytes + * @vio: The vio for which to issue I/O. + * @physical: The physical block number to read or write. + * @callback: The bio endio function which will be called after the I/O completes. + * @error_handler: The handler for submission or I/O errors; may be NULL. + * @operation: The type of I/O to perform. + * @data: The buffer to read or write; may be NULL. + * @size: The I/O amount in bytes. * * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block * other vdo threads. @@ -441,7 +443,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter /** * vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer. - * @io_submitter: The I/O submitter data to tear down (may be NULL). + * @io_submitter: The I/O submitter data to tear down; may be NULL. */ void vdo_cleanup_io_submitter(struct io_submitter *io_submitter) { diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index 026f031ffc9e76..0a27e60a9dfd70 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -159,21 +159,13 @@ static void check_for_drain_complete(struct logical_zone *zone) vdo_finish_draining(&zone->state); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct logical_zone, state)); } -/** - * drain_logical_zone() - Drain a logical zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void drain_logical_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -192,11 +184,7 @@ void vdo_drain_logical_zones(struct logical_zones *zones, parent); } -/** - * resume_logical_zone() - Resume a logical zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void resume_logical_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -356,7 +344,7 @@ struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone) /** * vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging. - * @zone: The zone to dump + * @zone: The zone to dump. * * Context: the information is dumped in a thread-unsafe fashion. * diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index f70f5edabc1000..666be6d557e100 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -35,10 +35,10 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = { /** * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed * block. - * @mapping_state [in] The mapping state for the look up. - * @compressed_block [in] The compressed block that was read from disk. - * @fragment_offset [out] The offset of the fragment within a compressed block. - * @fragment_size [out] The size of the fragment. + * @mapping_state: The mapping state describing the fragment. + * @block: The compressed block that was read from disk. + * @fragment_offset: The offset of the fragment within the compressed block. + * @fragment_size: The size of the fragment. * * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if * the fragment is invalid. @@ -382,6 +382,7 @@ static void initialize_compressed_block(struct compressed_block *block, u16 size * @compression: The agent's compression_state to pack in to. * @data_vio: The data_vio to pack. * @offset: The offset into the compressed block at which to pack the fragment. + * @slot: The slot number in the compressed block. * @block: The compressed block which will be written out when batch is fully packed. * * Return: The new amount of space used. @@ -705,11 +706,7 @@ void vdo_increment_packer_flush_generation(struct packer *packer) vdo_flush_packer(packer); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { struct packer *packer = container_of(state, struct packer, state); diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index a43b5c45fab7c5..686eb7d714e60b 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -60,7 +60,7 @@ static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock. * @lock: The lock to check. * - * Return: true if the lock is a read lock. + * Return: True if the lock is a read lock. */ bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) { @@ -75,6 +75,7 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t /** * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock. * @lock: The PBN write lock to downgrade. + * @compressed_write: True if the written block was a compressed block. * * The lock holder count is cleared and the caller is responsible for setting the new count. */ @@ -582,7 +583,7 @@ static bool continue_allocating(struct data_vio *data_vio) * that fails try the next if possible. * @data_vio: The data_vio needing an allocation. * - * Return: true if a block was allocated, if not the data_vio will have been dispatched so the + * Return: True if a block was allocated, if not the data_vio will have been dispatched so the * caller must not touch it. */ bool vdo_allocate_block_in_zone(struct data_vio *data_vio) diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index de58184f538f50..9cc0f0ff166457 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -109,7 +109,7 @@ static atomic_t *get_decrement_counter(struct recovery_journal *journal, * @journal: The recovery journal. * @lock_number: The lock to check. * - * Return: true if the journal zone is locked. + * Return: True if the journal zone is locked. */ static bool is_journal_zone_locked(struct recovery_journal *journal, block_count_t lock_number) @@ -217,7 +217,7 @@ static struct recovery_journal_block * __must_check pop_free_list(struct recover * Indicates it has any uncommitted entries, which includes both entries not written and entries * written but not yet acknowledged. * - * Return: true if the block has any uncommitted entries. + * Return: True if the block has any uncommitted entries. */ static inline bool __must_check is_block_dirty(const struct recovery_journal_block *block) { @@ -228,7 +228,7 @@ static inline bool __must_check is_block_dirty(const struct recovery_journal_blo * is_block_empty() - Check whether a journal block is empty. * @block: The block to check. * - * Return: true if the block has no entries. + * Return: True if the block has no entries. */ static inline bool __must_check is_block_empty(const struct recovery_journal_block *block) { @@ -239,7 +239,7 @@ static inline bool __must_check is_block_empty(const struct recovery_journal_blo * is_block_full() - Check whether a journal block is full. * @block: The block to check. * - * Return: true if the block is full. + * Return: True if the block is full. */ static inline bool __must_check is_block_full(const struct recovery_journal_block *block) { @@ -260,6 +260,8 @@ static void assert_on_journal_thread(struct recovery_journal *journal, /** * continue_waiter() - Release a data_vio from the journal. + * @waiter: The data_vio waiting on journal activity. + * @context: The result of the journal operation. * * Invoked whenever a data_vio is to be released from the journal, either because its entry was * committed to disk, or because there was an error. Implements waiter_callback_fn. @@ -273,7 +275,7 @@ static void continue_waiter(struct vdo_waiter *waiter, void *context) * has_block_waiters() - Check whether the journal has any waiters on any blocks. * @journal: The journal in question. * - * Return: true if any block has a waiter. + * Return: True if any block has a waiter. */ static inline bool has_block_waiters(struct recovery_journal *journal) { @@ -296,7 +298,7 @@ static void notify_commit_waiters(struct recovery_journal *journal); * suspend_lock_counter() - Prevent the lock counter from notifying. * @counter: The counter. * - * Return: true if the lock counter was not notifying and hence the suspend was efficacious. + * Return: True if the lock counter was not notifying and hence the suspend was efficacious. */ static bool suspend_lock_counter(struct lock_counter *counter) { @@ -416,7 +418,7 @@ sequence_number_t vdo_get_recovery_journal_current_sequence_number(struct recove * * The head is the lowest sequence number of the block map head and the slab journal head. * - * Return: the head of the journal. + * Return: The head of the journal. */ static inline sequence_number_t get_recovery_journal_head(const struct recovery_journal *journal) { @@ -535,7 +537,7 @@ static void initialize_journal_state(struct recovery_journal *journal) * vdo_get_recovery_journal_length() - Get the number of usable recovery journal blocks. * @journal_size: The size of the recovery journal in blocks. * - * Return: the number of recovery journal blocks usable for entries. + * Return: The number of recovery journal blocks usable for entries. */ block_count_t vdo_get_recovery_journal_length(block_count_t journal_size) { @@ -1078,6 +1080,8 @@ static void update_usages(struct recovery_journal *journal, struct data_vio *dat /** * assign_entry() - Assign an entry waiter to the active block. + * @waiter: The data_vio. + * @context: The recovery journal block. * * Implements waiter_callback_fn. */ @@ -1165,6 +1169,8 @@ static void recycle_journal_block(struct recovery_journal_block *block) /** * continue_committed_waiter() - invoked whenever a VIO is to be released from the journal because * its entry was committed to disk. + * @waiter: The data_vio waiting on a journal write. + * @context: A pointer to the recovery journal. * * Implements waiter_callback_fn. */ @@ -1362,6 +1368,8 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block) /** * write_block() - Issue a block for writing. + * @waiter: The recovery journal block to write. + * @context: Not used. * * Implements waiter_callback_fn. */ @@ -1611,11 +1619,7 @@ void vdo_release_journal_entry_lock(struct recovery_journal *journal, smp_mb__after_atomic(); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct recovery_journal, state)); diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index f3d80ff7bef558..034ecaa51f4817 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -40,7 +40,7 @@ static const bool NORMAL_OPERATION = true; /** * get_lock() - Get the lock object for a slab journal block by sequence number. - * @journal: vdo_slab journal to retrieve from. + * @journal: The vdo_slab journal to retrieve from. * @sequence_number: Sequence number of the block. * * Return: The lock object for the given sequence number. @@ -110,7 +110,7 @@ static void initialize_journal_state(struct slab_journal *journal) * block_is_full() - Check whether a journal block is full. * @journal: The slab journal for the block. * - * Return: true if the tail block is full. + * Return: True if the tail block is full. */ static bool __must_check block_is_full(struct slab_journal *journal) { @@ -127,10 +127,11 @@ static void release_journal_locks(struct vdo_waiter *waiter, void *context); /** * is_slab_journal_blank() - Check whether a slab's journal is blank. + * @slab: The slab to check. * * A slab journal is blank if it has never had any entries recorded in it. * - * Return: true if the slab's journal has never been modified. + * Return: True if the slab's journal has never been modified. */ static bool is_slab_journal_blank(const struct vdo_slab *slab) { @@ -227,6 +228,7 @@ static u8 __must_check compute_fullness_hint(struct slab_depot *depot, /** * check_summary_drain_complete() - Check whether an allocators summary has finished draining. + * @allocator: The allocator to check. */ static void check_summary_drain_complete(struct block_allocator *allocator) { @@ -349,7 +351,7 @@ static void launch_write(struct slab_summary_block *block) /** * update_slab_summary_entry() - Update the entry for a slab. - * @slab: The slab whose entry is to be updated + * @slab: The slab whose entry is to be updated. * @waiter: The waiter that is updating the summary. * @tail_block_offset: The offset of the slab journal's tail block. * @load_ref_counts: Whether the reference counts must be loaded from disk on the vdo load. @@ -654,6 +656,7 @@ static void update_tail_block_location(struct slab_journal *journal) /** * reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries. + * @slab: The slab to reopen. */ static void reopen_slab_journal(struct vdo_slab *slab) { @@ -839,8 +842,6 @@ static void commit_tail(struct slab_journal *journal) * @sbn: The slab block number of the entry to encode. * @operation: The type of the entry. * @increment: True if this is an increment. - * - * Exposed for unit tests. */ static void encode_slab_journal_entry(struct slab_journal_block_header *tail_header, slab_journal_payload *payload, @@ -951,7 +952,7 @@ static inline block_count_t journal_length(const struct slab_journal *journal) * @parent: The completion to notify when there is space to add the entry if the entry could not be * added immediately. * - * Return: true if the entry was added immediately. + * Return: True if the entry was added immediately. */ bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn, enum journal_operation operation, bool increment, @@ -1003,7 +1004,7 @@ bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t * requires_reaping() - Check whether the journal must be reaped before adding new entries. * @journal: The journal to check. * - * Return: true if the journal must be reaped. + * Return: True if the journal must be reaped. */ static bool requires_reaping(const struct slab_journal *journal) { @@ -1275,6 +1276,8 @@ static void dirty_block(struct reference_block *block) /** * get_reference_block() - Get the reference block that covers the given block index. + * @slab: The slab containing the references. + * @index: The index of the physical block. */ static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab, slab_block_number index) @@ -1379,7 +1382,8 @@ static void prioritize_slab(struct vdo_slab *slab) /** * adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab. - * @incremented: true if the free block count went up. + * @slab: The slab. + * @incremented: True if the free block count went up. */ static void adjust_free_block_count(struct vdo_slab *slab, bool incremented) { @@ -1885,6 +1889,7 @@ static void add_entries(struct slab_journal *journal) /** * reset_search_cursor() - Reset the free block search back to the first reference counter in the * first reference block of a slab. + * @slab: The slab. */ static void reset_search_cursor(struct vdo_slab *slab) { @@ -1892,17 +1897,17 @@ static void reset_search_cursor(struct vdo_slab *slab) cursor->block = cursor->first_block; cursor->index = 0; - /* Unit tests have slabs with only one reference block (and it's a runt). */ cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count); } /** * advance_search_cursor() - Advance the search cursor to the start of the next reference block in - * a slab, + * a slab. + * @slab: The slab. * * Wraps around to the first reference block if the current block is the last reference block. * - * Return: true unless the cursor was at the last reference block. + * Return: True unless the cursor was at the last reference block. */ static bool advance_search_cursor(struct vdo_slab *slab) { @@ -1933,6 +1938,9 @@ static bool advance_search_cursor(struct vdo_slab *slab) /** * vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild. + * @depot: The slab depot. + * @pbn: The physical block number to adjust. + * @operation: The type opf operation. * * Return: VDO_SUCCESS or an error. */ @@ -2038,9 +2046,7 @@ static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr, * @slab: The slab counters to scan. * @index_ptr: A pointer to hold the array index of the free block. * - * Exposed for unit testing. - * - * Return: true if a free block was found in the specified range. + * Return: True if a free block was found in the specified range. */ static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr) { @@ -2097,7 +2103,7 @@ static bool find_free_block(const struct vdo_slab *slab, slab_block_number *inde * @slab: The slab to search. * @free_index_ptr: A pointer to receive the array index of the zero reference count. * - * Return: true if an unreferenced counter was found. + * Return: True if an unreferenced counter was found. */ static bool search_current_reference_block(const struct vdo_slab *slab, slab_block_number *free_index_ptr) @@ -2116,7 +2122,7 @@ static bool search_current_reference_block(const struct vdo_slab *slab, * counter index saved in the search cursor and searching up to the end of the last reference * block. The search does not wrap. * - * Return: true if an unreferenced counter was found. + * Return: True if an unreferenced counter was found. */ static bool search_reference_blocks(struct vdo_slab *slab, slab_block_number *free_index_ptr) @@ -2136,6 +2142,8 @@ static bool search_reference_blocks(struct vdo_slab *slab, /** * make_provisional_reference() - Do the bookkeeping for making a provisional reference. + * @slab: The slab. + * @block_number: The index for the physical block to reference. */ static void make_provisional_reference(struct vdo_slab *slab, slab_block_number block_number) @@ -2155,6 +2163,7 @@ static void make_provisional_reference(struct vdo_slab *slab, /** * dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty. + * @slab: The slab. */ static void dirty_all_reference_blocks(struct vdo_slab *slab) { @@ -2173,10 +2182,10 @@ static inline bool journal_points_equal(struct journal_point first, /** * match_bytes() - Check an 8-byte word for bytes matching the value specified - * @input: A word to examine the bytes of - * @match: The byte value sought + * @input: A word to examine the bytes of. + * @match: The byte value sought. * - * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise + * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise. */ static inline u64 match_bytes(u64 input, u8 match) { @@ -2191,12 +2200,12 @@ static inline u64 match_bytes(u64 input, u8 match) /** * count_valid_references() - Process a newly loaded refcount array - * @counters: the array of counters from a metadata block + * @counters: The array of counters from a metadata block. * - * Scan a 8-byte-aligned array of counters, fixing up any "provisional" values that weren't - * cleaned up at shutdown, changing them internally to "empty". + * Scan an 8-byte-aligned array of counters, fixing up any provisional values that + * weren't cleaned up at shutdown, changing them internally to zero. * - * Return: the number of blocks that are referenced (counters not "empty") + * Return: The number of blocks with a non-zero reference count. */ static unsigned int count_valid_references(vdo_refcount_t *counters) { @@ -2351,6 +2360,7 @@ static void load_reference_block_group(struct vdo_waiter *waiter, void *context) /** * load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a * pre-allocated reference counter. + * @slab: The slab. */ static void load_reference_blocks(struct vdo_slab *slab) { @@ -2375,6 +2385,7 @@ static void load_reference_blocks(struct vdo_slab *slab) /** * drain_slab() - Drain all reference count I/O. + * @slab: The slab. * * Depending upon the type of drain being performed (as recorded in the ref_count's vdo_slab), the * reference blocks may be loaded from disk or dirty reference blocks may be written out. @@ -2564,6 +2575,7 @@ static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context) /** * load_slab_journal() - Load a slab's journal by reading the journal's tail. + * @slab: The slab. */ static void load_slab_journal(struct vdo_slab *slab) { @@ -2663,11 +2675,7 @@ static void queue_slab(struct vdo_slab *slab) prioritize_slab(slab); } -/** - * initiate_slab_action() - Initiate a slab action. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_slab_action(struct admin_state *state) { struct vdo_slab *slab = container_of(state, struct vdo_slab, state); @@ -2720,7 +2728,7 @@ static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber) * has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub. * @scrubber: The scrubber to check. * - * Return: true if the scrubber has slabs to scrub. + * Return: True if the scrubber has slabs to scrub. */ static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber) { @@ -2741,6 +2749,7 @@ static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber) * finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because * there's been an error. * @scrubber: The scrubber. + * @result: The result of the scrubbing operation. */ static void finish_scrubbing(struct slab_scrubber *scrubber, int result) { @@ -3132,11 +3141,13 @@ static struct vdo_slab *next_slab(struct slab_iterator *iterator) /** * abort_waiter() - Abort vios waiting to make journal entries when read-only. + * @waiter: A waiting data_vio. + * @context: Not used. * * This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone * into read-only mode. Implements waiter_callback_fn. */ -static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused) +static void abort_waiter(struct vdo_waiter *waiter, void __always_unused *context) { struct reference_updater *updater = container_of(waiter, struct reference_updater, waiter); @@ -3536,7 +3547,7 @@ static void initiate_load(struct admin_state *state) /** * vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have * been recovered from the recovery journal. - * @completion The allocator completion + * @completion: The allocator completion. */ void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion) { @@ -3775,7 +3786,7 @@ static int initialize_slab_journal(struct vdo_slab *slab) * in the slab. * @allocator: The block allocator to which the slab belongs. * @slab_number: The slab number of the slab. - * @is_new: true if this slab is being allocated as part of a resize. + * @is_new: True if this slab is being allocated as part of a resize. * @slab_ptr: A pointer to receive the new slab. * * Return: VDO_SUCCESS or an error code. @@ -3894,11 +3905,7 @@ void vdo_abandon_new_slabs(struct slab_depot *depot) vdo_free(vdo_forget(depot->new_slabs)); } -/** - * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates. - * - * Implements vdo_zone_thread_getter_fn. - */ +/** Implements vdo_zone_thread_getter_fn. */ static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_number) { return ((struct slab_depot *) context)->allocators[zone_number].thread_id; @@ -3911,7 +3918,7 @@ static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_numb * @recovery_lock: The sequence number of the recovery journal block whose locks should be * released. * - * Return: true if the journal does hold a lock on the specified block (which it will release). + * Return: True if the journal released a lock on the specified block. */ static bool __must_check release_recovery_journal_lock(struct slab_journal *journal, sequence_number_t recovery_lock) @@ -3955,6 +3962,8 @@ static void release_tail_block_locks(void *context, zone_count_t zone_number, /** * prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks. + * @context: The slab depot. + * @parent: The parent operation. * * Implements vdo_action_preamble_fn. */ @@ -3968,6 +3977,7 @@ static void prepare_for_tail_block_commit(void *context, struct vdo_completion * /** * schedule_tail_block_commit() - Schedule a tail block commit if necessary. + * @context: The slab depot. * * This method should not be called directly. Rather, call vdo_schedule_default_action() on the * depot's action manager. @@ -4361,6 +4371,7 @@ struct slab_depot_state_2_0 vdo_record_slab_depot(const struct slab_depot *depot /** * vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot. + * @depot: The slab depot. * * Context: This method may be called only before entering normal operation from the load thread. * @@ -4615,7 +4626,9 @@ static void load_summary_endio(struct bio *bio) } /** - * load_slab_summary() - The preamble of a load operation. + * load_slab_summary() - Load the slab summary before the slab data. + * @context: The slab depot. + * @parent: The load operation. * * Implements vdo_action_preamble_fn. */ @@ -4731,7 +4744,7 @@ void vdo_update_slab_depot_size(struct slab_depot *depot) * vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to * the given size. * @depot: The depot to prepare to resize. - * @partition: The new depot partition + * @partition: The new depot partition. * * Return: VDO_SUCCESS or an error. */ @@ -4781,6 +4794,7 @@ int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot, /** * finish_registration() - Finish registering new slabs now that all of the allocators have * received their new slabs. + * @context: The slab depot. * * Implements vdo_action_conclusion_fn. */ diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 80b60867402255..09fd0628d18c7c 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -181,6 +181,8 @@ static void assign_thread_ids(struct thread_config *config, /** * initialize_thread_config() - Initialize the thread mapping + * @counts: The number and types of threads to create. + * @config: The thread_config to initialize. * * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all * three plus the packer and recovery journal. Otherwise, there must be at least one of each type, @@ -884,6 +886,7 @@ const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo) /** * record_vdo() - Record the state of the VDO for encoding in the super block. + * @vdo: The vdo. */ static void record_vdo(struct vdo *vdo) { @@ -1277,7 +1280,7 @@ void vdo_enter_read_only_mode(struct vdo *vdo, int error_code) * vdo_is_read_only() - Check whether the VDO is read-only. * @vdo: The vdo. * - * Return: true if the vdo is read-only. + * Return: True if the vdo is read-only. * * This method may be called from any thread, as opposed to examining the VDO's state field which * is only safe to check from the admin thread. @@ -1291,7 +1294,7 @@ bool vdo_is_read_only(struct vdo *vdo) * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode. * @vdo: The vdo to query. * - * Return: true if the vdo is in read-only mode. + * Return: True if the vdo is in read-only mode. */ bool vdo_in_read_only_mode(const struct vdo *vdo) { @@ -1302,7 +1305,7 @@ bool vdo_in_read_only_mode(const struct vdo *vdo) * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode. * @vdo: The vdo to query. * - * Return: true if the vdo is in recovery mode. + * Return: True if the vdo is in recovery mode. */ bool vdo_in_recovery_mode(const struct vdo *vdo) { diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h index 483ae873e002ff..1aaba73997b74b 100644 --- a/drivers/md/dm-vdo/vdo.h +++ b/drivers/md/dm-vdo/vdo.h @@ -279,8 +279,10 @@ static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo) /** * typedef vdo_filter_fn - Method type for vdo matching methods. + * @vdo: The vdo to match. + * @context: A parameter for the filter to use. * - * A filter function returns false if the vdo doesn't match. + * Return: True if the vdo matches the filter criteria, false if it doesn't. */ typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context); diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index 8fc22fb141965d..5ffc867d9c5e55 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -398,8 +398,9 @@ void free_vio_pool(struct vio_pool *pool) /** * is_vio_pool_busy() - Check whether an vio pool has outstanding entries. + * @pool: The vio pool. * - * Return: true if the pool is busy. + * Return: True if the pool is busy. */ bool is_vio_pool_busy(struct vio_pool *pool) { diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h index 4bfcb21901f149..7a8a6819aec4af 100644 --- a/drivers/md/dm-vdo/vio.h +++ b/drivers/md/dm-vdo/vio.h @@ -156,8 +156,7 @@ static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio /** * continue_vio() - Enqueue a vio to run its next callback. * @vio: The vio to continue. - * - * Return: The result of the current operation. + * @result: The result of the current operation. */ static inline void continue_vio(struct vio *vio, int result) { @@ -172,6 +171,9 @@ void vdo_count_completed_bios(struct bio *bio); /** * continue_vio_after_io() - Continue a vio now that its I/O has returned. + * @vio: The vio to continue. + * @callback: The next operation for this vio. + * @thread: Which thread to run the next operation on. */ static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback, thread_id_t thread) diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 72047b47a7a0a3..c79de517afee77 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -177,9 +177,11 @@ error: if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); - else if (r > 0) + else if (r > 0) { DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", v->data_dev->name, (unsigned long long)rsb, r); + atomic64_inc(&v->fec->corrected); + } return r; } @@ -188,14 +190,13 @@ error: * Locate data block erasures using verity hashes. */ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io, - u8 *want_digest, u8 *data) + const u8 *want_digest, const u8 *data) { if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)))) + io->tmp_digest))) return 0; - return memcmp(verity_io_real_digest(v, io), want_digest, - v->digest_size) != 0; + return memcmp(io->tmp_digest, want_digest, v->digest_size) != 0; } /* @@ -328,7 +329,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT); + fio->bufs[n] = kmem_cache_alloc(v->fec->cache, GFP_NOWAIT); /* we can manage with even one buffer if necessary */ if (unlikely(!fio->bufs[n])) break; @@ -362,7 +363,7 @@ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) */ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, struct dm_verity_fec_io *fio, u64 rsb, u64 offset, - bool use_erasures) + const u8 *want_digest, bool use_erasures) { int r, neras = 0; unsigned int pos; @@ -388,12 +389,11 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, /* Always re-validate the corrected block against the expected hash */ r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r < 0)) return r; - if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io), - v->digest_size)) { + if (memcmp(io->tmp_digest, want_digest, v->digest_size)) { DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)", v->data_dev->name, (unsigned long long)rsb, neras); return -EILSEQ; @@ -404,7 +404,8 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, /* Correct errors in a block. Copies corrected block to dest. */ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, - enum verity_block_type type, sector_t block, u8 *dest) + enum verity_block_type type, const u8 *want_digest, + sector_t block, u8 *dest) { int r; struct dm_verity_fec_io *fio = fec_io(io); @@ -413,10 +414,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (!verity_fec_is_enabled(v)) return -EOPNOTSUPP; - if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) { - DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); + if (fio->level) return -EIO; - } fio->level++; @@ -447,9 +446,9 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, * them first. Do a second attempt with erasures if the corruption is * bad enough. */ - r = fec_decode_rsb(v, io, fio, rsb, offset, false); + r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, false); if (r < 0) { - r = fec_decode_rsb(v, io, fio, rsb, offset, true); + r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, true); if (r < 0) goto done; } @@ -479,7 +478,8 @@ void verity_fec_finish_io(struct dm_verity_io *io) mempool_free(fio->bufs[n], &f->prealloc_pool); fec_for_each_extra_buffer(fio, n) - mempool_free(fio->bufs[n], &f->extra_pool); + if (fio->bufs[n]) + kmem_cache_free(f->cache, fio->bufs[n]); mempool_free(fio->output, &f->output_pool); } @@ -531,7 +531,6 @@ void verity_fec_dtr(struct dm_verity *v) mempool_exit(&f->rs_pool); mempool_exit(&f->prealloc_pool); - mempool_exit(&f->extra_pool); mempool_exit(&f->output_pool); kmem_cache_destroy(f->cache); @@ -784,12 +783,6 @@ int verity_fec_ctr(struct dm_verity *v) return ret; } - ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache); - if (ret) { - ti->error = "Cannot allocate FEC buffer extra pool"; - return ret; - } - /* Preallocate an output buffer for each thread */ ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(), 1 << v->data_dev_block_bits); diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 09123a61295387..5fd26787381271 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -23,9 +23,6 @@ #define DM_VERITY_FEC_BUF_MAX \ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) -/* maximum recursion level for verity_fec_decode */ -#define DM_VERITY_FEC_MAX_RECURSION 4 - #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" #define DM_VERITY_OPT_FEC_START "fec_start" @@ -45,9 +42,9 @@ struct dm_verity_fec { unsigned char rsn; /* N of RS(M, N) */ mempool_t rs_pool; /* mempool for fio->rs */ mempool_t prealloc_pool; /* mempool for preallocated buffers */ - mempool_t extra_pool; /* mempool for extra buffers */ mempool_t output_pool; /* mempool for output */ struct kmem_cache *cache; /* cache for buffers */ + atomic64_t corrected; /* corrected errors */ }; /* per-bio data */ @@ -68,8 +65,8 @@ struct dm_verity_fec_io { extern bool verity_fec_is_enabled(struct dm_verity *v); extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, - enum verity_block_type type, sector_t block, - u8 *dest); + enum verity_block_type type, const u8 *want_digest, + sector_t block, u8 *dest); extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz, char *result, unsigned int maxlen); @@ -99,6 +96,7 @@ static inline bool verity_fec_is_enabled(struct dm_verity *v) static inline int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, enum verity_block_type type, + const u8 *want_digest, sector_t block, u8 *dest) { return -EOPNOTSUPP; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 66a00a8ccb398b..5c17472d789666 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -117,11 +117,25 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, int verity_hash(struct dm_verity *v, struct dm_verity_io *io, const u8 *data, size_t len, u8 *digest) { - struct shash_desc *desc = &io->hash_desc; + struct shash_desc *desc; int r; + if (likely(v->use_sha256_lib)) { + struct sha256_ctx *ctx = &io->hash_ctx.sha256; + + /* + * Fast path using SHA-256 library. This is enabled only for + * verity version 1, where the salt is at the beginning. + */ + *ctx = *v->initial_hashstate.sha256; + sha256_update(ctx, data, len); + sha256_final(ctx, digest); + return 0; + } + + desc = &io->hash_ctx.shash; desc->tfm = v->shash_tfm; - if (unlikely(v->initial_hashstate == NULL)) { + if (unlikely(v->initial_hashstate.shash == NULL)) { /* Version 0: salt at end */ r = crypto_shash_init(desc) ?: crypto_shash_update(desc, data, len) ?: @@ -129,7 +143,7 @@ int verity_hash(struct dm_verity *v, struct dm_verity_io *io, crypto_shash_final(desc, digest); } else { /* Version 1: salt at beginning */ - r = crypto_shash_import(desc, v->initial_hashstate) ?: + r = crypto_shash_import(desc, v->initial_hashstate.shash) ?: crypto_shash_finup(desc, data, len, digest); } if (unlikely(r)) @@ -215,12 +229,12 @@ out: * Verify hash of a metadata block pertaining to the specified data block * ("block" argument) at a specified level ("level" argument). * - * On successful return, verity_io_want_digest(v, io) contains the hash value - * for a lower tree level or for the data block (if we're at the lowest level). + * On successful return, want_digest contains the hash value for a lower tree + * level or for the data block (if we're at the lowest level). * * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned. * If "skip_unverified" is false, unverified buffer is hashed and verified - * against current value of verity_io_want_digest(v, io). + * against current value of want_digest. */ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, sector_t block, int level, bool skip_unverified, @@ -259,7 +273,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, if (IS_ERR(data)) return r; if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA, - hash_block, data) == 0) { + want_digest, hash_block, data) == 0) { aux = dm_bufio_get_aux_data(buf); aux->hash_verified = 1; goto release_ok; @@ -279,11 +293,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, } r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r < 0)) goto release_ret_r; - if (likely(memcmp(verity_io_real_digest(v, io), want_digest, + if (likely(memcmp(io->tmp_digest, want_digest, v->digest_size) == 0)) aux->hash_verified = 1; else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { @@ -294,7 +308,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, r = -EAGAIN; goto release_ret_r; } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA, - hash_block, data) == 0) + want_digest, hash_block, data) == 0) aux->hash_verified = 1; else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA, @@ -358,7 +372,8 @@ out: } static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, - sector_t cur_block, u8 *dest) + const u8 *want_digest, sector_t cur_block, + u8 *dest) { struct page *page; void *buffer; @@ -382,12 +397,11 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, goto free_ret; r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r)) goto free_ret; - if (memcmp(verity_io_real_digest(v, io), - verity_io_want_digest(v, io), v->digest_size)) { + if (memcmp(io->tmp_digest, want_digest, v->digest_size)) { r = -EIO; goto free_ret; } @@ -402,9 +416,13 @@ free_ret: static int verity_handle_data_hash_mismatch(struct dm_verity *v, struct dm_verity_io *io, - struct bio *bio, sector_t blkno, - u8 *data) + struct bio *bio, + struct pending_block *block) { + const u8 *want_digest = block->want_digest; + sector_t blkno = block->blkno; + u8 *data = block->data; + if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* * Error handling code (FEC included) cannot be run in the @@ -412,14 +430,14 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v, */ return -EAGAIN; } - if (verity_recheck(v, io, blkno, data) == 0) { + if (verity_recheck(v, io, want_digest, blkno, data) == 0) { if (v->validated_blocks) set_bit(blkno, v->validated_blocks); return 0; } #if defined(CONFIG_DM_VERITY_FEC) - if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, blkno, - data) == 0) + if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest, + blkno, data) == 0) return 0; #endif if (bio->bi_status) @@ -433,6 +451,58 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v, return 0; } +static void verity_clear_pending_blocks(struct dm_verity_io *io) +{ + int i; + + for (i = io->num_pending - 1; i >= 0; i--) { + kunmap_local(io->pending_blocks[i].data); + io->pending_blocks[i].data = NULL; + } + io->num_pending = 0; +} + +static int verity_verify_pending_blocks(struct dm_verity *v, + struct dm_verity_io *io, + struct bio *bio) +{ + const unsigned int block_size = 1 << v->data_dev_block_bits; + int i, r; + + if (io->num_pending == 2) { + /* num_pending == 2 implies that the algorithm is SHA-256 */ + sha256_finup_2x(v->initial_hashstate.sha256, + io->pending_blocks[0].data, + io->pending_blocks[1].data, block_size, + io->pending_blocks[0].real_digest, + io->pending_blocks[1].real_digest); + } else { + for (i = 0; i < io->num_pending; i++) { + r = verity_hash(v, io, io->pending_blocks[i].data, + block_size, + io->pending_blocks[i].real_digest); + if (unlikely(r)) + return r; + } + } + + for (i = 0; i < io->num_pending; i++) { + struct pending_block *block = &io->pending_blocks[i]; + + if (likely(memcmp(block->real_digest, block->want_digest, + v->digest_size) == 0)) { + if (v->validated_blocks) + set_bit(block->blkno, v->validated_blocks); + } else { + r = verity_handle_data_hash_mismatch(v, io, bio, block); + if (unlikely(r)) + return r; + } + } + verity_clear_pending_blocks(io); + return 0; +} + /* * Verify one "dm_verity_io" structure. */ @@ -440,10 +510,14 @@ static int verity_verify_io(struct dm_verity_io *io) { struct dm_verity *v = io->v; const unsigned int block_size = 1 << v->data_dev_block_bits; + const int max_pending = v->use_sha256_finup_2x ? 2 : 1; struct bvec_iter iter_copy; struct bvec_iter *iter; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); unsigned int b; + int r; + + io->num_pending = 0; if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* @@ -457,21 +531,22 @@ static int verity_verify_io(struct dm_verity_io *io) for (b = 0; b < io->n_blocks; b++, bio_advance_iter(bio, iter, block_size)) { - int r; - sector_t cur_block = io->block + b; + sector_t blkno = io->block + b; + struct pending_block *block; bool is_zero; struct bio_vec bv; void *data; if (v->validated_blocks && bio->bi_status == BLK_STS_OK && - likely(test_bit(cur_block, v->validated_blocks))) + likely(test_bit(blkno, v->validated_blocks))) continue; - r = verity_hash_for_block(v, io, cur_block, - verity_io_want_digest(v, io), + block = &io->pending_blocks[io->num_pending]; + + r = verity_hash_for_block(v, io, blkno, block->want_digest, &is_zero); if (unlikely(r < 0)) - return r; + goto error; bv = bio_iter_iovec(bio, *iter); if (unlikely(bv.bv_len < block_size)) { @@ -482,7 +557,8 @@ static int verity_verify_io(struct dm_verity_io *io) * data block size to be greater than PAGE_SIZE. */ DMERR_LIMIT("unaligned io (data block spans pages)"); - return -EIO; + r = -EIO; + goto error; } data = bvec_kmap_local(&bv); @@ -496,29 +572,26 @@ static int verity_verify_io(struct dm_verity_io *io) kunmap_local(data); continue; } - - r = verity_hash(v, io, data, block_size, - verity_io_real_digest(v, io)); - if (unlikely(r < 0)) { - kunmap_local(data); - return r; + block->data = data; + block->blkno = blkno; + if (++io->num_pending == max_pending) { + r = verity_verify_pending_blocks(v, io, bio); + if (unlikely(r)) + goto error; } + } - if (likely(memcmp(verity_io_real_digest(v, io), - verity_io_want_digest(v, io), v->digest_size) == 0)) { - if (v->validated_blocks) - set_bit(cur_block, v->validated_blocks); - kunmap_local(data); - continue; - } - r = verity_handle_data_hash_mismatch(v, io, bio, cur_block, - data); - kunmap_local(data); + if (io->num_pending) { + r = verity_verify_pending_blocks(v, io, bio); if (unlikely(r)) - return r; + goto error; } return 0; + +error: + verity_clear_pending_blocks(io); + return r; } /* @@ -775,6 +848,10 @@ static void verity_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: DMEMIT("%c", v->hash_failed ? 'C' : 'V'); + if (verity_fec_is_enabled(v)) + DMEMIT(" %lld", atomic64_read(&v->fec->corrected)); + else + DMEMIT(" -"); break; case STATUSTYPE_TABLE: DMEMIT("%u %s %s %u %u %llu %llu %s ", @@ -1004,7 +1081,7 @@ static void verity_dtr(struct dm_target *ti) kvfree(v->validated_blocks); kfree(v->salt); - kfree(v->initial_hashstate); + kfree(v->initial_hashstate.shash); kfree(v->root_digest); kfree(v->zero_digest); verity_free_sig(v); @@ -1069,8 +1146,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v) if (!v->zero_digest) return r; - io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm), - GFP_KERNEL); + io = kmalloc(v->ti->per_io_data_size, GFP_KERNEL); if (!io) return r; /* verity_dtr will free zero_digest */ @@ -1252,11 +1328,26 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name) } v->shash_tfm = shash; v->digest_size = crypto_shash_digestsize(shash); - DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash)); if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) { ti->error = "Digest size too big"; return -EINVAL; } + if (likely(v->version && strcmp(alg_name, "sha256") == 0)) { + /* + * Fast path: use the library API for reduced overhead and + * interleaved hashing support. + */ + v->use_sha256_lib = true; + if (sha256_finup_2x_is_optimized()) + v->use_sha256_finup_2x = true; + ti->per_io_data_size = + offsetofend(struct dm_verity_io, hash_ctx.sha256); + } else { + /* Fallback case: use the generic crypto API. */ + ti->per_io_data_size = + offsetofend(struct dm_verity_io, hash_ctx.shash) + + crypto_shash_descsize(shash); + } return 0; } @@ -1277,7 +1368,18 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg) return -EINVAL; } } - if (v->version) { /* Version 1: salt at beginning */ + if (likely(v->use_sha256_lib)) { + /* Implies version 1: salt at beginning */ + v->initial_hashstate.sha256 = + kmalloc(sizeof(struct sha256_ctx), GFP_KERNEL); + if (!v->initial_hashstate.sha256) { + ti->error = "Cannot allocate initial hash state"; + return -ENOMEM; + } + sha256_init(v->initial_hashstate.sha256); + sha256_update(v->initial_hashstate.sha256, + v->salt, v->salt_size); + } else if (v->version) { /* Version 1: salt at beginning */ SHASH_DESC_ON_STACK(desc, v->shash_tfm); int r; @@ -1285,16 +1387,16 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg) * Compute the pre-salted hash state that can be passed to * crypto_shash_import() for each block later. */ - v->initial_hashstate = kmalloc( + v->initial_hashstate.shash = kmalloc( crypto_shash_statesize(v->shash_tfm), GFP_KERNEL); - if (!v->initial_hashstate) { + if (!v->initial_hashstate.shash) { ti->error = "Cannot allocate initial hash state"; return -ENOMEM; } desc->tfm = v->shash_tfm; r = crypto_shash_init(desc) ?: crypto_shash_update(desc, v->salt, v->salt_size) ?: - crypto_shash_export(desc, v->initial_hashstate); + crypto_shash_export(desc, v->initial_hashstate.shash); if (r) { ti->error = "Cannot set up initial hash state"; return r; @@ -1556,9 +1658,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - ti->per_io_data_size = sizeof(struct dm_verity_io) + - crypto_shash_descsize(v->shash_tfm); - r = verity_fec_ctr(v); if (r) goto bad; @@ -1690,7 +1789,7 @@ static struct target_type verity_target = { .name = "verity", /* Note: the LSMs depend on the singleton and immutable features */ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 6d141abd965c77..f975a9e5c5d6ba 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -16,6 +16,7 @@ #include <linux/device-mapper.h> #include <linux/interrupt.h> #include <crypto/hash.h> +#include <crypto/sha2.h> #define DM_VERITY_MAX_LEVELS 63 @@ -42,7 +43,10 @@ struct dm_verity { struct crypto_shash *shash_tfm; u8 *root_digest; /* digest of the root block */ u8 *salt; /* salt: its size is salt_size */ - u8 *initial_hashstate; /* salted initial state, if version >= 1 */ + union { + struct sha256_ctx *sha256; /* for use_sha256_lib=1 */ + u8 *shash; /* for use_sha256_lib=0 */ + } initial_hashstate; /* salted initial state, if version >= 1 */ u8 *zero_digest; /* digest for a zero block */ #ifdef CONFIG_SECURITY u8 *root_digest_sig; /* signature of the root digest */ @@ -59,6 +63,8 @@ struct dm_verity { unsigned char version; bool hash_failed:1; /* set if hash of any block failed */ bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */ + bool use_sha256_lib:1; /* use SHA-256 library instead of generic crypto API */ + bool use_sha256_finup_2x:1; /* use interleaved hashing optimization */ unsigned int digest_size; /* digest size for the current hash algorithm */ enum verity_mode mode; /* mode for handling verification errors */ enum verity_mode error_mode;/* mode for handling I/O errors */ @@ -78,6 +84,13 @@ struct dm_verity { mempool_t recheck_pool; }; +struct pending_block { + void *data; + sector_t blkno; + u8 want_digest[HASH_MAX_DIGESTSIZE]; + u8 real_digest[HASH_MAX_DIGESTSIZE]; +}; + struct dm_verity_io { struct dm_verity *v; @@ -94,28 +107,29 @@ struct dm_verity_io { struct work_struct work; struct work_struct bh_work; - u8 real_digest[HASH_MAX_DIGESTSIZE]; - u8 want_digest[HASH_MAX_DIGESTSIZE]; + u8 tmp_digest[HASH_MAX_DIGESTSIZE]; /* - * Temporary space for hashing. This is variable-length and must be at - * the end of the struct. struct shash_desc is just the fixed part; - * it's followed by a context of size crypto_shash_descsize(shash_tfm). + * This is the queue of data blocks that are pending verification. When + * the crypto layer supports interleaved hashing, we allow multiple + * blocks to be queued up in order to utilize it. This can improve + * performance significantly vs. sequential hashing of each block. */ - struct shash_desc hash_desc; -}; + int num_pending; + struct pending_block pending_blocks[2]; -static inline u8 *verity_io_real_digest(struct dm_verity *v, - struct dm_verity_io *io) -{ - return io->real_digest; -} - -static inline u8 *verity_io_want_digest(struct dm_verity *v, - struct dm_verity_io *io) -{ - return io->want_digest; -} + /* + * Temporary space for hashing. Either sha256 or shash is used, + * depending on the value of use_sha256_lib. If shash is used, + * then this field is variable-length, with total size + * sizeof(struct shash_desc) + crypto_shash_descsize(shash_tfm). + * For this reason, this field must be the end of the struct. + */ + union { + struct sha256_ctx sha256; + struct shash_desc shash; + } hash_ctx; +}; extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io, const u8 *data, size_t len, u8 *digest); diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 5a840c4ae31669..c95e417194b33c 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -203,8 +203,6 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) return ret; } - md->nr_zones = disk->nr_zones; - return 0; } @@ -452,7 +450,6 @@ void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim) set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); } else { clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - md->nr_zones = 0; md->disk->nr_zones = 0; } } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6c83ab940af752..b6327920226012 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -272,7 +272,7 @@ static int __init dm_init(void) int r, i; #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) - DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." + DMINFO("CONFIG_IMA_DISABLE_HTABLE is disabled." " Duplicate IMA measurements will not be recorded in the IMA log."); #endif @@ -1321,6 +1321,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); BUG_ON(bio_sectors > *tio->len_ptr); BUG_ON(n_sectors > bio_sectors); + BUG_ON(bio->bi_opf & REQ_ATOMIC); if (static_branch_unlikely(&zoned_enabled) && unlikely(bdev_is_zoned(bio->bi_bdev))) { @@ -1735,8 +1736,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); - if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count) - return BLK_STS_IOERR; + if (ci->bio->bi_opf & REQ_ATOMIC) { + if (unlikely(!dm_target_supports_atomic_writes(ti->type))) + return BLK_STS_IOERR; + if (unlikely(len != ci->sector_count)) + return BLK_STS_IOERR; + } setup_split_accounting(ci, len); @@ -2439,7 +2444,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, { struct dm_table *old_map; sector_t size, old_size; - int ret; lockdep_assert_held(&md->suspend_lock); @@ -2454,11 +2458,13 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, set_capacity(md->disk, size); - ret = dm_table_set_restrictions(t, md->queue, limits); - if (ret) { - set_capacity(md->disk, old_size); - old_map = ERR_PTR(ret); - goto out; + if (limits) { + int ret = dm_table_set_restrictions(t, md->queue, limits); + if (ret) { + set_capacity(md->disk, old_size); + old_map = ERR_PTR(ret); + goto out; + } } /* @@ -2836,6 +2842,7 @@ static void dm_wq_work(struct work_struct *work) static void dm_queue_flush(struct mapped_device *md) { + clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); smp_mb__after_atomic(); queue_work(md->wq, &md->work); @@ -2848,6 +2855,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); struct queue_limits limits; + bool update_limits = true; int r; mutex_lock(&md->suspend_lock); @@ -2857,19 +2865,30 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; /* + * To avoid a potential deadlock locking the queue limits, disallow + * updating the queue limits during a table swap, when updating an + * immutable request-based dm device (dm-multipath) during a noflush + * suspend. It is userspace's responsibility to make sure that the new + * table uses the same limits as the existing table, if it asks for a + * noflush suspend. + */ + if (dm_request_based(md) && md->immutable_target && + __noflush_suspending(md)) + update_limits = false; + /* * If the new table has no data devices, retain the existing limits. * This helps multipath with queue_if_no_path if all paths disappear, * then new I/O is queued based on these limits, and then some paths * reappear. */ - if (dm_table_has_no_data_devices(table)) { + else if (dm_table_has_no_data_devices(table)) { live_map = dm_get_live_table_fast(md); if (live_map) limits = md->queue->limits; dm_put_live_table_fast(md); } - if (!live_map) { + if (update_limits && !live_map) { r = dm_calculate_queue_limits(table, &limits); if (r) { map = ERR_PTR(r); @@ -2877,7 +2896,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) } } - map = __bind(md, table, &limits); + map = __bind(md, table, update_limits ? &limits : NULL); dm_issue_global_event(); out: @@ -2930,7 +2949,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. - * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); @@ -2993,8 +3011,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (!r) set_bit(dmf_suspended_flag, &md->flags); - if (noflush) - clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); if (map) synchronize_srcu(&md->io_barrier); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 9d1de68dee278c..d7d41b054b9824 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -106,7 +106,6 @@ config PHANTOM config RPMB tristate "RPMB partition interface" - depends on MMC || SCSI_UFSHCD help Unified RPMB unit interface for RPMB capable devices such as eMMC and UFS. Provides interface for in-kernel security controllers to access diff --git a/drivers/of/property.c b/drivers/of/property.c index c1feb631e3831d..4e3524227720a5 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -148,6 +148,39 @@ static void *of_find_property_value_of_size(const struct device_node *np, } /** + * of_property_read_u8_index - Find and read a u8 from a multi-value property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the u8 in the list of values + * @out_value: pointer to return value, modified only if no error. + * + * Search for a property in a device node and read nth 8-bit value from + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_value is modified only if a valid u8 value can be decoded. + */ +int of_property_read_u8_index(const struct device_node *np, + const char *propname, + u32 index, u8 *out_value) +{ + const u8 *val = of_find_property_value_of_size(np, propname, + ((index + 1) * sizeof(*out_value)), + 0, NULL); + + if (IS_ERR(val)) + return PTR_ERR(val); + + *out_value = val[index]; + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u8_index); + +/** * of_property_read_u16_index - Find and read a u16 from a multi-value property. * * @np: device node from which the property value is to be read. diff --git a/drivers/pci/controller/pcie-rzg3s-host.c b/drivers/pci/controller/pcie-rzg3s-host.c index 667e6d629474cf..83ec66a7082361 100644 --- a/drivers/pci/controller/pcie-rzg3s-host.c +++ b/drivers/pci/controller/pcie-rzg3s-host.c @@ -479,7 +479,7 @@ static void rzg3s_pcie_intx_irq_handler(struct irq_desc *desc) static irqreturn_t rzg3s_pcie_msi_irq(int irq, void *data) { u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG; - DECLARE_BITMAP(bitmap, RZG3S_PCI_MSI_INT_NR); + DECLARE_BITMAP(bitmap, RZG3S_PCI_MSI_INT_NR) = {0}; struct rzg3s_pcie_host *host = data; struct rzg3s_pcie_msi *msi = &host->msi; unsigned long bit; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index bf2d101f67a1e0..6f3147518376a0 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -760,7 +760,9 @@ config PWM_TEGRA config PWM_TH1520 tristate "TH1520 PWM support" + depends on ARCH_THEAD || COMPILE_TEST depends on RUST + depends on HAS_IOMEM && COMMON_CLK select RUST_PWM_ABSTRACTIONS help This option enables the driver for the PWM controller found on the diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 2933c41c77c88e..50dc779f7f9830 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -409,13 +409,22 @@ config RTC_DRV_MAX77686 config RTC_DRV_SPACEMIT_P1 tristate "SpacemiT P1 RTC" depends on ARCH_SPACEMIT || COMPILE_TEST - select MFD_SPACEMIT_P1 - default ARCH_SPACEMIT + depends on MFD_SPACEMIT_P1 + default MFD_SPACEMIT_P1 help Enable support for the RTC function in the SpacemiT P1 PMIC. This driver can also be built as a module, which will be called "spacemit-p1-rtc". +config RTC_DRV_NVIDIA_VRS10 + tristate "NVIDIA VRS10 RTC device" + help + If you say yes here you will get support for the battery backed RTC device + of NVIDIA VRS (Voltage Regulator Specification). The RTC is connected via + I2C interface and supports alarm functionality. + This driver can also be built as a module. If so, the module will be called + rtc-nvidia-vrs10. + config RTC_DRV_NCT3018Y tristate "Nuvoton NCT3018Y" depends on OF @@ -1063,6 +1072,21 @@ config RTC_DRV_ALPHA Direct support for the real-time clock found on every Alpha system, specifically MC146818 compatibles. If in doubt, say Y. +config RTC_DRV_ATCRTC100 + tristate "Andes ATCRTC100" + depends on ARCH_ANDES || COMPILE_TEST + select REGMAP_MMIO + help + If you say yes here you will get support for the Andes ATCRTC100 + RTC driver. + + This driver provides support for the Andes ATCRTC100 real-time clock + device. It allows setting and retrieving the time and date, as well + as setting alarms. + + To compile this driver as a module, choose M here: the module will + be called rtc-atcrtc100. + config RTC_DRV_DS1216 tristate "Dallas DS1216" depends on SNI_RM @@ -1749,7 +1773,7 @@ config RTC_DRV_MC13XXX tristate "Freescale MC13xxx RTC" help This enables support for the RTCs found on Freescale's PMICs - MC13783 and MC13892. + MC13783, MC13892 and MC34708. config RTC_DRV_MPC5121 tristate "Freescale MPC5121 built-in RTC" @@ -2074,6 +2098,17 @@ config RTC_DRV_WILCO_EC This can also be built as a module. If so, the module will be named "rtc_wilco_ec". +config RTC_DRV_MACSMC + tristate "Apple Mac System Management Controller RTC" + depends on MFD_MACSMC + help + If you say yes here you get support for RTC functions + inside Apple SPMI PMUs accessed through the SoC's + System Management Controller + + To compile this driver as a module, choose M here: the + module will be called rtc-macsmc. + config RTC_DRV_MSC313 tristate "MStar MSC313 RTC" depends on ARCH_MSTARV7 || COMPILE_TEST diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 8221bda6e6dcab..6cf7e066314e13 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_RTC_DRV_ASM9260) += rtc-asm9260.o obj-$(CONFIG_RTC_DRV_ASPEED) += rtc-aspeed.o obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o +obj-$(CONFIG_RTC_DRV_ATCRTC100) += rtc-atcrtc100.o obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o obj-$(CONFIG_RTC_DRV_BBNSM) += rtc-nxp-bbnsm.o obj-$(CONFIG_RTC_DRV_BD70528) += rtc-bd70528.o @@ -93,6 +94,7 @@ obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o obj-$(CONFIG_RTC_DRV_MA35D1) += rtc-ma35d1.o +obj-$(CONFIG_RTC_DRV_MACSMC) += rtc-macsmc.o obj-$(CONFIG_RTC_DRV_MAX31335) += rtc-max31335.o obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o @@ -121,6 +123,7 @@ obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o obj-$(CONFIG_RTC_DRV_NCT6694) += rtc-nct6694.o obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o +obj-$(CONFIG_RTC_DRV_NVIDIA_VRS10)+= rtc-nvidia-vrs10.o obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o obj-$(CONFIG_RTC_DRV_OPTEE) += rtc-optee.o diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c index 1928b29c10454d..123fb372fc9fe0 100644 --- a/drivers/rtc/rtc-amlogic-a4.c +++ b/drivers/rtc/rtc-amlogic-a4.c @@ -361,39 +361,26 @@ static int aml_rtc_probe(struct platform_device *pdev) "failed to get_enable rtc sys clk\n"); aml_rtc_init(rtc); - device_init_wakeup(dev, true); + devm_device_init_wakeup(dev); platform_set_drvdata(pdev, rtc); rtc->rtc_dev = devm_rtc_allocate_device(dev); - if (IS_ERR(rtc->rtc_dev)) { - ret = PTR_ERR(rtc->rtc_dev); - goto err_clk; - } + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); ret = devm_request_irq(dev, rtc->irq, aml_rtc_handler, IRQF_ONESHOT, "aml-rtc alarm", rtc); if (ret) { dev_err_probe(dev, ret, "IRQ%d request failed, ret = %d\n", rtc->irq, ret); - goto err_clk; + return ret; } rtc->rtc_dev->ops = &aml_rtc_ops; rtc->rtc_dev->range_min = 0; rtc->rtc_dev->range_max = U32_MAX; - ret = devm_rtc_register_device(rtc->rtc_dev); - if (ret) { - dev_err_probe(&pdev->dev, ret, "Failed to register RTC device: %d\n", ret); - goto err_clk; - } - - return 0; -err_clk: - clk_disable_unprepare(rtc->sys_clk); - device_init_wakeup(dev, false); - - return ret; + return devm_rtc_register_device(rtc->rtc_dev); } #ifdef CONFIG_PM_SLEEP @@ -421,14 +408,6 @@ static int aml_rtc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(aml_rtc_pm_ops, aml_rtc_suspend, aml_rtc_resume); -static void aml_rtc_remove(struct platform_device *pdev) -{ - struct aml_rtc_data *rtc = dev_get_drvdata(&pdev->dev); - - clk_disable_unprepare(rtc->sys_clk); - device_init_wakeup(&pdev->dev, false); -} - static const struct aml_rtc_config a5_rtc_config = { }; @@ -451,7 +430,6 @@ MODULE_DEVICE_TABLE(of, aml_rtc_device_id); static struct platform_driver aml_rtc_driver = { .probe = aml_rtc_probe, - .remove = aml_rtc_remove, .driver = { .name = "aml-rtc", .pm = &aml_rtc_pm_ops, diff --git a/drivers/rtc/rtc-atcrtc100.c b/drivers/rtc/rtc-atcrtc100.c new file mode 100644 index 00000000000000..9808fc2c5a492a --- /dev/null +++ b/drivers/rtc/rtc-atcrtc100.c @@ -0,0 +1,381 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Andes ATCRTC100 real time clock. + * + * Copyright (C) 2025 Andes Technology Corporation + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/math64.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> +#include <linux/regmap.h> +#include <linux/rtc.h> +#include <linux/workqueue.h> + +/* Register Offsets */ +#define RTC_ID 0x00 /* ID and Revision Register */ +#define RTC_RSV 0x04 /* Reserved Register */ +#define RTC_CNT 0x10 /* Counter Register */ +#define RTC_ALM 0x14 /* Alarm Register */ +#define RTC_CR 0x18 /* Control Register */ +#define RTC_STA 0x1C /* Status Register */ +#define RTC_TRIM 0x20 /* Digital Trimming Register */ + +/* RTC_ID Register */ +#define ID_MSK GENMASK(31, 8) +#define ID_ATCRTC100 0x030110 + +/* RTC_CNT and RTC_ALM Register Fields */ +#define SEC_MSK GENMASK(5, 0) +#define MIN_MSK GENMASK(11, 6) +#define HOUR_MSK GENMASK(16, 12) +#define DAY_MSK GENMASK(31, 17) +#define RTC_SEC_GET(x) FIELD_GET(SEC_MSK, x) +#define RTC_MIN_GET(x) FIELD_GET(MIN_MSK, x) +#define RTC_HOUR_GET(x) FIELD_GET(HOUR_MSK, x) +#define RTC_DAY_GET(x) FIELD_GET(DAY_MSK, x) +#define RTC_SEC_SET(x) FIELD_PREP(SEC_MSK, x) +#define RTC_MIN_SET(x) FIELD_PREP(MIN_MSK, x) +#define RTC_HOUR_SET(x) FIELD_PREP(HOUR_MSK, x) +#define RTC_DAY_SET(x) FIELD_PREP(DAY_MSK, x) + +/* RTC_CR Register Bits */ +#define RTC_EN BIT(0) /* RTC Enable */ +#define ALARM_WAKEUP BIT(1) /* Alarm Wakeup Enable */ +#define ALARM_INT BIT(2) /* Alarm Interrupt Enable */ +#define DAY_INT BIT(3) /* Day Interrupt Enable */ +#define HOUR_INT BIT(4) /* Hour Interrupt Enable */ +#define MIN_INT BIT(5) /* Minute Interrupt Enable */ +#define SEC_INT BIT(6) /* Second Periodic Interrupt Enable */ +#define HSEC_INT BIT(7) /* Half-Second Periodic Interrupt Enable */ + +/* RTC_STA Register Bits */ +#define WRITE_DONE BIT(16) /* Register write completion status */ + +/* Time conversion macro */ +#define ATCRTC_TIME_TO_SEC(D, H, M, S) \ + ((time64_t)(D) * 86400 + (H) * 3600 + (M) * 60 + (S)) + +/* Timeout for waiting for the write_done bit */ +#define ATCRTC_TIMEOUT_US 1000000 +#define ATCRTC_TIMEOUT_USLEEP_MIN 20 +#define ATCRTC_TIMEOUT_USLEEP_MAX 30 + +struct atcrtc_dev { + struct rtc_device *rtc_dev; + struct regmap *regmap; + struct work_struct rtc_work; + unsigned int alarm_irq; + bool alarm_en; +}; + +static const struct regmap_config atcrtc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = RTC_TRIM, + .cache_type = REGCACHE_NONE, +}; + +/** + * atcrtc_check_write_done - Wait for RTC registers to be synchronized. + * @rtc: Pointer to the atcrtc_dev structure. + * + * The WriteDone bit in the status register indicates the synchronization + * progress of RTC register updates. This bit is cleared to zero whenever + * any RTC control register (Counter, Alarm, Control, etc.) is written. + * It returns to one only after all previous updates have been fully + * synchronized to the RTC clock domain. This function polls the WriteDone + * bit with a timeout to ensure the device is ready for the next operation. + * + * Return: 0 on success, or -EBUSY on timeout. + */ +static int atcrtc_check_write_done(struct atcrtc_dev *rtc) +{ + unsigned int val; + + /* + * Using read_poll_timeout is more efficient than a manual loop + * with usleep_range. + */ + return regmap_read_poll_timeout(rtc->regmap, RTC_STA, val, + val & WRITE_DONE, + ATCRTC_TIMEOUT_USLEEP_MIN, + ATCRTC_TIMEOUT_US); +} + +static irqreturn_t atcrtc_alarm_isr(int irq, void *dev) +{ + struct atcrtc_dev *rtc = dev; + unsigned int status; + + regmap_read(rtc->regmap, RTC_STA, &status); + if (status & ALARM_INT) { + regmap_write(rtc->regmap, RTC_STA, ALARM_INT); + rtc->alarm_en = false; + schedule_work(&rtc->rtc_work); + rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int atcrtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + unsigned int mask; + int ret; + + ret = atcrtc_check_write_done(rtc); + if (ret) + return ret; + + mask = ALARM_WAKEUP | ALARM_INT; + regmap_update_bits(rtc->regmap, RTC_CR, mask, enable ? mask : 0); + + return 0; +} + +static void atcrtc_alarm_clear(struct work_struct *work) +{ + struct atcrtc_dev *rtc = + container_of(work, struct atcrtc_dev, rtc_work); + int ret; + + rtc_lock(rtc->rtc_dev); + + if (!rtc->alarm_en) { + ret = atcrtc_check_write_done(rtc); + if (ret) + dev_info(&rtc->rtc_dev->dev, + "failed to sync before clearing alarm: %d\n", + ret); + else + regmap_update_bits(rtc->regmap, RTC_CR, + ALARM_WAKEUP | ALARM_INT, 0); + } + rtc_unlock(rtc->rtc_dev); +} + +static int atcrtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + time64_t time; + unsigned int rtc_cnt; + + if (!regmap_test_bits(rtc->regmap, RTC_CR, RTC_EN)) + return -EIO; + + regmap_read(rtc->regmap, RTC_CNT, &rtc_cnt); + time = ATCRTC_TIME_TO_SEC(RTC_DAY_GET(rtc_cnt), + RTC_HOUR_GET(rtc_cnt), + RTC_MIN_GET(rtc_cnt), + RTC_SEC_GET(rtc_cnt)); + rtc_time64_to_tm(time, tm); + + return 0; +} + +static int atcrtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + time64_t time; + unsigned int counter; + unsigned int day; + int ret; + + time = rtc_tm_to_time64(tm); + day = div_s64(time, 86400); + counter = RTC_DAY_SET(day) | + RTC_HOUR_SET(tm->tm_hour) | + RTC_MIN_SET(tm->tm_min) | + RTC_SEC_SET(tm->tm_sec); + ret = atcrtc_check_write_done(rtc); + if (ret) + return ret; + regmap_write(rtc->regmap, RTC_CNT, counter); + + ret = atcrtc_check_write_done(rtc); + if (ret) + return ret; + regmap_update_bits(rtc->regmap, RTC_CR, RTC_EN, RTC_EN); + + return 0; +} + +static int atcrtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &wkalrm->time; + unsigned int rtc_alarm; + + wkalrm->enabled = regmap_test_bits(rtc->regmap, RTC_CR, ALARM_INT); + regmap_read(rtc->regmap, RTC_ALM, &rtc_alarm); + tm->tm_hour = RTC_HOUR_GET(rtc_alarm); + tm->tm_min = RTC_MIN_GET(rtc_alarm); + tm->tm_sec = RTC_SEC_GET(rtc_alarm); + + /* The RTC alarm does not support day/month/year fields */ + tm->tm_mday = -1; + tm->tm_mon = -1; + tm->tm_year = -1; + + return 0; +} + +static int atcrtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &wkalrm->time; + unsigned int rtc_alarm; + int ret; + + /* Disable alarm first before setting a new one */ + ret = atcrtc_alarm_irq_enable(dev, 0); + if (ret) + return ret; + + rtc->alarm_en = false; + + rtc_alarm = RTC_SEC_SET(tm->tm_sec) | + RTC_MIN_SET(tm->tm_min) | + RTC_HOUR_SET(tm->tm_hour); + + ret = atcrtc_check_write_done(rtc); + if (ret) + return ret; + + regmap_write(rtc->regmap, RTC_ALM, rtc_alarm); + + rtc->alarm_en = wkalrm->enabled; + ret = atcrtc_alarm_irq_enable(dev, wkalrm->enabled); + + return ret; +} + +static const struct rtc_class_ops rtc_ops = { + .read_time = atcrtc_read_time, + .set_time = atcrtc_set_time, + .read_alarm = atcrtc_read_alarm, + .set_alarm = atcrtc_set_alarm, + .alarm_irq_enable = atcrtc_alarm_irq_enable, +}; + +static int atcrtc_probe(struct platform_device *pdev) +{ + struct atcrtc_dev *atcrtc_dev; + void __iomem *reg_base; + unsigned int rtc_id; + int ret; + + atcrtc_dev = devm_kzalloc(&pdev->dev, sizeof(*atcrtc_dev), GFP_KERNEL); + if (!atcrtc_dev) + return -ENOMEM; + platform_set_drvdata(pdev, atcrtc_dev); + + reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg_base)) + return dev_err_probe(&pdev->dev, PTR_ERR(reg_base), + "Failed to map I/O space\n"); + + atcrtc_dev->regmap = devm_regmap_init_mmio(&pdev->dev, + reg_base, + &atcrtc_regmap_config); + if (IS_ERR(atcrtc_dev->regmap)) + return dev_err_probe(&pdev->dev, PTR_ERR(atcrtc_dev->regmap), + "Failed to initialize regmap\n"); + + regmap_read(atcrtc_dev->regmap, RTC_ID, &rtc_id); + if (FIELD_GET(ID_MSK, rtc_id) != ID_ATCRTC100) + return dev_err_probe(&pdev->dev, -ENODEV, + "Failed to initialize RTC: unsupported hardware ID 0x%x\n", + rtc_id); + + ret = platform_get_irq(pdev, 1); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "Failed to get IRQ for alarm\n"); + atcrtc_dev->alarm_irq = ret; + + ret = devm_request_irq(&pdev->dev, + atcrtc_dev->alarm_irq, + atcrtc_alarm_isr, + 0, + "atcrtc_alarm", + atcrtc_dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to request IRQ %d for alarm\n", + atcrtc_dev->alarm_irq); + + atcrtc_dev->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(atcrtc_dev->rtc_dev)) + return dev_err_probe(&pdev->dev, PTR_ERR(atcrtc_dev->rtc_dev), + "Failed to allocate RTC device\n"); + + set_bit(RTC_FEATURE_ALARM, atcrtc_dev->rtc_dev->features); + ret = device_init_wakeup(&pdev->dev, true); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to initialize wake capability\n"); + + ret = dev_pm_set_wake_irq(&pdev->dev, atcrtc_dev->alarm_irq); + if (ret) { + device_init_wakeup(&pdev->dev, false); + return dev_err_probe(&pdev->dev, ret, + "Failed to set wake IRQ\n"); + } + + atcrtc_dev->rtc_dev->ops = &rtc_ops; + + INIT_WORK(&atcrtc_dev->rtc_work, atcrtc_alarm_clear); + return devm_rtc_register_device(atcrtc_dev->rtc_dev); +} + +static int atcrtc_resume(struct device *dev) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(rtc->alarm_irq); + + return 0; +} + +static int atcrtc_suspend(struct device *dev) +{ + struct atcrtc_dev *rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(rtc->alarm_irq); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(atcrtc_pm_ops, atcrtc_suspend, atcrtc_resume); + +static const struct of_device_id atcrtc_dt_match[] = { + { .compatible = "andestech,atcrtc100" }, + { }, +}; +MODULE_DEVICE_TABLE(of, atcrtc_dt_match); + +static struct platform_driver atcrtc_platform_driver = { + .driver = { + .name = "atcrtc100", + .of_match_table = atcrtc_dt_match, + .pm = pm_sleep_ptr(&atcrtc_pm_ops), + }, + .probe = atcrtc_probe, +}; + +module_platform_driver(atcrtc_platform_driver); + +MODULE_AUTHOR("CL Wang <cl634@andestech.com>"); +MODULE_DESCRIPTION("Andes ATCRTC100 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 97423f1d0361f9..5fc8e36b1abf81 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -1268,9 +1268,6 @@ ds1685_rtc_probe(struct platform_device *pdev) rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000; rtc_dev->range_max = RTC_TIMESTAMP_END_2099; - /* Maximum periodic rate is 8192Hz (0.122070ms). */ - rtc_dev->max_user_freq = RTC_MAX_USER_FREQ; - /* See if the platform doesn't support UIE. */ if (pdata->uie_unsupported) clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc_dev->features); diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c index c828bc8e05b9cc..045d5d45ab4b07 100644 --- a/drivers/rtc/rtc-gamecube.c +++ b/drivers/rtc/rtc-gamecube.c @@ -242,6 +242,10 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d) } hw_srnprot = ioremap(res.start, resource_size(&res)); + if (!hw_srnprot) { + pr_err("failed to ioremap hw_srnprot\n"); + return -ENOMEM; + } old = ioread32be(hw_srnprot); /* TODO: figure out why we use this magic constant. I obtained it by diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c index 2aabb9151d4c6c..45a2c9f676c53f 100644 --- a/drivers/rtc/rtc-isl12026.c +++ b/drivers/rtc/rtc-isl12026.c @@ -484,6 +484,12 @@ static const struct of_device_id isl12026_dt_match[] = { }; MODULE_DEVICE_TABLE(of, isl12026_dt_match); +static const struct i2c_device_id isl12026_id[] = { + { "isl12026" }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, isl12026_id); + static struct i2c_driver isl12026_driver = { .driver = { .name = "rtc-isl12026", @@ -491,6 +497,7 @@ static struct i2c_driver isl12026_driver = { }, .probe = isl12026_probe, .remove = isl12026_remove, + .id_table = isl12026_id, }; module_i2c_driver(isl12026_driver); diff --git a/drivers/rtc/rtc-macsmc.c b/drivers/rtc/rtc-macsmc.c new file mode 100644 index 00000000000000..8fe88306695691 --- /dev/null +++ b/drivers/rtc/rtc-macsmc.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SMC RTC driver + * Copyright The Asahi Linux Contributors + */ + +#include <linux/bitops.h> +#include <linux/mfd/macsmc.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/slab.h> + +/* 48-bit RTC */ +#define RTC_BYTES 6 +#define RTC_BITS (8 * RTC_BYTES) + +/* 32768 Hz clock */ +#define RTC_SEC_SHIFT 15 + +struct macsmc_rtc { + struct device *dev; + struct apple_smc *smc; + struct rtc_device *rtc_dev; + struct nvmem_cell *rtc_offset; +}; + +static int macsmc_rtc_get_time(struct device *dev, struct rtc_time *tm) +{ + struct macsmc_rtc *rtc = dev_get_drvdata(dev); + u64 ctr = 0, off = 0; + time64_t now; + void *p_off; + size_t len; + int ret; + + ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES); + if (ret < 0) + return ret; + if (ret != RTC_BYTES) + return -EIO; + + p_off = nvmem_cell_read(rtc->rtc_offset, &len); + if (IS_ERR(p_off)) + return PTR_ERR(p_off); + if (len < RTC_BYTES) { + kfree(p_off); + return -EIO; + } + + memcpy(&off, p_off, RTC_BYTES); + kfree(p_off); + + /* Sign extend from 48 to 64 bits, then arithmetic shift right 15 bits to get seconds */ + now = sign_extend64(ctr + off, RTC_BITS - 1) >> RTC_SEC_SHIFT; + rtc_time64_to_tm(now, tm); + + return ret; +} + +static int macsmc_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct macsmc_rtc *rtc = dev_get_drvdata(dev); + u64 ctr = 0, off = 0; + int ret; + + ret = apple_smc_read(rtc->smc, SMC_KEY(CLKM), &ctr, RTC_BYTES); + if (ret < 0) + return ret; + if (ret != RTC_BYTES) + return -EIO; + + /* This sets the offset such that the set second begins now */ + off = (rtc_tm_to_time64(tm) << RTC_SEC_SHIFT) - ctr; + return nvmem_cell_write(rtc->rtc_offset, &off, RTC_BYTES); +} + +static const struct rtc_class_ops macsmc_rtc_ops = { + .read_time = macsmc_rtc_get_time, + .set_time = macsmc_rtc_set_time, +}; + +static int macsmc_rtc_probe(struct platform_device *pdev) +{ + struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent); + struct macsmc_rtc *rtc; + + /* + * MFD will probe this device even without a node in the device tree, + * thus bail out early if the SMC on the current machines does not + * support RTC and has no node in the device tree. + */ + if (!pdev->dev.of_node) + return -ENODEV; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->dev = &pdev->dev; + rtc->smc = smc; + + rtc->rtc_offset = devm_nvmem_cell_get(&pdev->dev, "rtc_offset"); + if (IS_ERR(rtc->rtc_offset)) + return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_offset), + "Failed to get rtc_offset NVMEM cell\n"); + + rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + + rtc->rtc_dev->ops = &macsmc_rtc_ops; + rtc->rtc_dev->range_min = S64_MIN >> (RTC_SEC_SHIFT + (64 - RTC_BITS)); + rtc->rtc_dev->range_max = S64_MAX >> (RTC_SEC_SHIFT + (64 - RTC_BITS)); + + platform_set_drvdata(pdev, rtc); + + return devm_rtc_register_device(rtc->rtc_dev); +} + +static const struct of_device_id macsmc_rtc_of_table[] = { + { .compatible = "apple,smc-rtc", }, + {} +}; +MODULE_DEVICE_TABLE(of, macsmc_rtc_of_table); + +static struct platform_driver macsmc_rtc_driver = { + .driver = { + .name = "macsmc-rtc", + .of_match_table = macsmc_rtc_of_table, + }, + .probe = macsmc_rtc_probe, +}; +module_platform_driver(macsmc_rtc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_DESCRIPTION("Apple SMC RTC driver"); +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c index dfb5bad3a3691d..23b7bf16b4cd5d 100644 --- a/drivers/rtc/rtc-max31335.c +++ b/drivers/rtc/rtc-max31335.c @@ -391,10 +391,8 @@ static int max31335_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) if (ret) return ret; - ret = regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg, - MAX31335_STATUS1_A1F, 0); - - return 0; + return regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg, + MAX31335_STATUS1_A1F, 0); } static int max31335_alarm_irq_enable(struct device *dev, unsigned int enabled) diff --git a/drivers/rtc/rtc-nvidia-vrs10.c b/drivers/rtc/rtc-nvidia-vrs10.c new file mode 100644 index 00000000000000..b157966985581b --- /dev/null +++ b/drivers/rtc/rtc-nvidia-vrs10.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NVIDIA Voltage Regulator Specification RTC + * + * SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. + * All rights reserved. + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/rtc.h> + +#define NVVRS_REG_VENDOR_ID 0x00 +#define NVVRS_REG_MODEL_REV 0x01 + +/* Interrupts registers */ +#define NVVRS_REG_INT_SRC1 0x10 +#define NVVRS_REG_INT_SRC2 0x11 +#define NVVRS_REG_INT_VENDOR 0x12 + +/* Control Registers */ +#define NVVRS_REG_CTL_1 0x28 +#define NVVRS_REG_CTL_2 0x29 + +/* RTC Registers */ +#define NVVRS_REG_RTC_T3 0x70 +#define NVVRS_REG_RTC_T2 0x71 +#define NVVRS_REG_RTC_T1 0x72 +#define NVVRS_REG_RTC_T0 0x73 +#define NVVRS_REG_RTC_A3 0x74 +#define NVVRS_REG_RTC_A2 0x75 +#define NVVRS_REG_RTC_A1 0x76 +#define NVVRS_REG_RTC_A0 0x77 + +/* Interrupt Mask */ +#define NVVRS_INT_SRC1_RSTIRQ_MASK BIT(0) +#define NVVRS_INT_SRC1_OSC_MASK BIT(1) +#define NVVRS_INT_SRC1_EN_MASK BIT(2) +#define NVVRS_INT_SRC1_RTC_MASK BIT(3) +#define NVVRS_INT_SRC1_PEC_MASK BIT(4) +#define NVVRS_INT_SRC1_WDT_MASK BIT(5) +#define NVVRS_INT_SRC1_EM_PD_MASK BIT(6) +#define NVVRS_INT_SRC1_INTERNAL_MASK BIT(7) +#define NVVRS_INT_SRC2_PBSP_MASK BIT(0) +#define NVVRS_INT_SRC2_ECC_DED_MASK BIT(1) +#define NVVRS_INT_SRC2_TSD_MASK BIT(2) +#define NVVRS_INT_SRC2_LDO_MASK BIT(3) +#define NVVRS_INT_SRC2_BIST_MASK BIT(4) +#define NVVRS_INT_SRC2_RT_CRC_MASK BIT(5) +#define NVVRS_INT_SRC2_VENDOR_MASK BIT(7) +#define NVVRS_INT_VENDOR0_MASK BIT(0) +#define NVVRS_INT_VENDOR1_MASK BIT(1) +#define NVVRS_INT_VENDOR2_MASK BIT(2) +#define NVVRS_INT_VENDOR3_MASK BIT(3) +#define NVVRS_INT_VENDOR4_MASK BIT(4) +#define NVVRS_INT_VENDOR5_MASK BIT(5) +#define NVVRS_INT_VENDOR6_MASK BIT(6) +#define NVVRS_INT_VENDOR7_MASK BIT(7) + +/* Controller Register Mask */ +#define NVVRS_REG_CTL_1_FORCE_SHDN (BIT(0) | BIT(1)) +#define NVVRS_REG_CTL_1_FORCE_ACT BIT(2) +#define NVVRS_REG_CTL_1_FORCE_INT BIT(3) +#define NVVRS_REG_CTL_2_EN_PEC BIT(0) +#define NVVRS_REG_CTL_2_REQ_PEC BIT(1) +#define NVVRS_REG_CTL_2_RTC_PU BIT(2) +#define NVVRS_REG_CTL_2_RTC_WAKE BIT(3) +#define NVVRS_REG_CTL_2_RST_DLY 0xF0 + +#define ALARM_RESET_VAL 0xffffffff +#define NVVRS_MIN_MODEL_REV 0x40 + +enum nvvrs_irq_regs { + NVVRS_IRQ_REG_INT_SRC1 = 0, + NVVRS_IRQ_REG_INT_SRC2 = 1, + NVVRS_IRQ_REG_INT_VENDOR = 2, + NVVRS_IRQ_REG_COUNT = 3, +}; + +struct nvvrs_rtc_info { + struct device *dev; + struct i2c_client *client; + struct rtc_device *rtc; + unsigned int irq; +}; + +static int nvvrs_update_bits(struct nvvrs_rtc_info *info, u8 reg, + u8 mask, u8 value) +{ + int ret; + u8 val; + + ret = i2c_smbus_read_byte_data(info->client, reg); + if (ret < 0) + return ret; + + val = (u8)ret; + val &= ~mask; + val |= (value & mask); + + return i2c_smbus_write_byte_data(info->client, reg, val); +} + +static int nvvrs_rtc_write_alarm(struct i2c_client *client, u8 *time) +{ + int ret; + + ret = i2c_smbus_write_byte_data(client, NVVRS_REG_RTC_A3, time[3]); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(client, NVVRS_REG_RTC_A2, time[2]); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(client, NVVRS_REG_RTC_A1, time[1]); + if (ret < 0) + return ret; + + return i2c_smbus_write_byte_data(client, NVVRS_REG_RTC_A0, time[0]); +} + +static int nvvrs_rtc_enable_alarm(struct nvvrs_rtc_info *info) +{ + int ret; + + /* Set RTC_WAKE bit for autonomous wake from sleep */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_2, NVVRS_REG_CTL_2_RTC_WAKE, + NVVRS_REG_CTL_2_RTC_WAKE); + if (ret < 0) + return ret; + + /* Set RTC_PU bit for autonomous wake from shutdown */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_2, NVVRS_REG_CTL_2_RTC_PU, + NVVRS_REG_CTL_2_RTC_PU); + if (ret < 0) + return ret; + + return 0; +} + +static int nvvrs_rtc_disable_alarm(struct nvvrs_rtc_info *info) +{ + struct i2c_client *client = info->client; + u8 val[4]; + int ret; + + /* Clear RTC_WAKE bit */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_2, NVVRS_REG_CTL_2_RTC_WAKE, + 0); + if (ret < 0) + return ret; + + /* Clear RTC_PU bit */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_2, NVVRS_REG_CTL_2_RTC_PU, + 0); + if (ret < 0) + return ret; + + /* Write ALARM_RESET_VAL in RTC Alarm register to disable alarm */ + val[0] = 0xff; + val[1] = 0xff; + val[2] = 0xff; + val[3] = 0xff; + + ret = nvvrs_rtc_write_alarm(client, val); + if (ret < 0) + return ret; + + return 0; +} + +static int nvvrs_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + time64_t secs = 0; + int ret; + u8 val; + + /* + * Multi-byte transfers are not supported with PEC enabled + * Read MSB first to avoid coherency issues + */ + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_T3); + if (ret < 0) + return ret; + + val = (u8)ret; + secs |= (time64_t)val << 24; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_T2); + if (ret < 0) + return ret; + + val = (u8)ret; + secs |= (time64_t)val << 16; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_T1); + if (ret < 0) + return ret; + + val = (u8)ret; + secs |= (time64_t)val << 8; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_T0); + if (ret < 0) + return ret; + + val = (u8)ret; + secs |= val; + + rtc_time64_to_tm(secs, tm); + + return 0; +} + +static int nvvrs_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + time64_t secs; + u8 time[4]; + int ret; + + secs = rtc_tm_to_time64(tm); + time[0] = secs & 0xff; + time[1] = (secs >> 8) & 0xff; + time[2] = (secs >> 16) & 0xff; + time[3] = (secs >> 24) & 0xff; + + ret = i2c_smbus_write_byte_data(info->client, NVVRS_REG_RTC_T3, time[3]); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(info->client, NVVRS_REG_RTC_T2, time[2]); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(info->client, NVVRS_REG_RTC_T1, time[1]); + if (ret < 0) + return ret; + + ret = i2c_smbus_write_byte_data(info->client, NVVRS_REG_RTC_T0, time[0]); + + return ret; +} + +static int nvvrs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + time64_t alarm_val = 0; + int ret; + u8 val; + + /* Multi-byte transfers are not supported with PEC enabled */ + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_A3); + if (ret < 0) + return ret; + + val = (u8)ret; + alarm_val |= (time64_t)val << 24; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_A2); + if (ret < 0) + return ret; + + val = (u8)ret; + alarm_val |= (time64_t)val << 16; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_A1); + if (ret < 0) + return ret; + + val = (u8)ret; + alarm_val |= (time64_t)val << 8; + + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_RTC_A0); + if (ret < 0) + return ret; + + val = (u8)ret; + alarm_val |= val; + + if (alarm_val == ALARM_RESET_VAL) + alrm->enabled = 0; + else + alrm->enabled = 1; + + rtc_time64_to_tm(alarm_val, &alrm->time); + + return 0; +} + +static int nvvrs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + time64_t secs; + u8 time[4]; + int ret; + + if (!alrm->enabled) { + ret = nvvrs_rtc_disable_alarm(info); + if (ret < 0) + return ret; + } + + ret = nvvrs_rtc_enable_alarm(info); + if (ret < 0) + return ret; + + secs = rtc_tm_to_time64(&alrm->time); + time[0] = secs & 0xff; + time[1] = (secs >> 8) & 0xff; + time[2] = (secs >> 16) & 0xff; + time[3] = (secs >> 24) & 0xff; + + ret = nvvrs_rtc_write_alarm(info->client, time); + + return ret; +} + +static int nvvrs_pseq_irq_clear(struct nvvrs_rtc_info *info) +{ + unsigned int i; + int ret; + + for (i = 0; i < NVVRS_IRQ_REG_COUNT; i++) { + ret = i2c_smbus_read_byte_data(info->client, + NVVRS_REG_INT_SRC1 + i); + if (ret < 0) { + dev_err(info->dev, "Failed to read INT_SRC%d : %d\n", + i + 1, ret); + return ret; + } + + ret = i2c_smbus_write_byte_data(info->client, + NVVRS_REG_INT_SRC1 + i, + (u8)ret); + if (ret < 0) { + dev_err(info->dev, "Failed to clear INT_SRC%d : %d\n", + i + 1, ret); + return ret; + } + } + + return 0; +} + +static irqreturn_t nvvrs_rtc_irq_handler(int irq, void *data) +{ + struct nvvrs_rtc_info *info = data; + int ret; + + /* Check for RTC alarm interrupt */ + ret = i2c_smbus_read_byte_data(info->client, NVVRS_REG_INT_SRC1); + if (ret < 0) + return IRQ_NONE; + + if (ret & NVVRS_INT_SRC1_RTC_MASK) { + rtc_lock(info->rtc); + rtc_update_irq(info->rtc, 1, RTC_IRQF | RTC_AF); + rtc_unlock(info->rtc); + } + + /* Clear all interrupts */ + if (nvvrs_pseq_irq_clear(info) < 0) + return IRQ_NONE; + + return IRQ_HANDLED; +} + +static int nvvrs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + /* + * This hardware does not support enabling/disabling the alarm IRQ + * independently. The alarm is disabled by clearing the alarm time + * via set_alarm(). + */ + return 0; +} + +static const struct rtc_class_ops nvvrs_rtc_ops = { + .read_time = nvvrs_rtc_read_time, + .set_time = nvvrs_rtc_set_time, + .read_alarm = nvvrs_rtc_read_alarm, + .set_alarm = nvvrs_rtc_set_alarm, + .alarm_irq_enable = nvvrs_rtc_alarm_irq_enable, +}; + +static int nvvrs_pseq_vendor_info(struct nvvrs_rtc_info *info) +{ + struct i2c_client *client = info->client; + u8 vendor_id, model_rev; + int ret; + + ret = i2c_smbus_read_byte_data(client, NVVRS_REG_VENDOR_ID); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to read Vendor ID\n"); + + vendor_id = (u8)ret; + + ret = i2c_smbus_read_byte_data(client, NVVRS_REG_MODEL_REV); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to read Model Revision\n"); + + model_rev = (u8)ret; + + if (model_rev < NVVRS_MIN_MODEL_REV) { + return dev_err_probe(&client->dev, -ENODEV, + "Chip revision 0x%02x is not supported!\n", + model_rev); + } + + dev_dbg(&client->dev, "NVVRS Vendor ID: 0x%02x, Model Rev: 0x%02x\n", + vendor_id, model_rev); + + return 0; +} + +static int nvvrs_rtc_probe(struct i2c_client *client) +{ + struct nvvrs_rtc_info *info; + int ret; + + info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + if (client->irq <= 0) + return dev_err_probe(&client->dev, -EINVAL, "No IRQ specified\n"); + + info->irq = client->irq; + info->dev = &client->dev; + client->flags |= I2C_CLIENT_PEC; + i2c_set_clientdata(client, info); + info->client = client; + + /* Check vendor info */ + if (nvvrs_pseq_vendor_info(info) < 0) + return dev_err_probe(&client->dev, -EINVAL, + "Failed to get vendor info\n"); + + /* Clear any pending IRQs before requesting IRQ handler */ + if (nvvrs_pseq_irq_clear(info) < 0) + return dev_err_probe(&client->dev, -EINVAL, + "Failed to clear interrupts\n"); + + /* Allocate RTC device */ + info->rtc = devm_rtc_allocate_device(info->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + + info->rtc->ops = &nvvrs_rtc_ops; + info->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + info->rtc->range_max = RTC_TIMESTAMP_END_2099; + + /* Request RTC IRQ */ + ret = devm_request_threaded_irq(info->dev, info->irq, NULL, + nvvrs_rtc_irq_handler, IRQF_ONESHOT, + "nvvrs-rtc", info); + if (ret < 0) { + dev_err_probe(info->dev, ret, "Failed to request RTC IRQ\n"); + return ret; + } + + /* RTC as a wakeup source */ + devm_device_init_wakeup(info->dev); + + return devm_rtc_register_device(info->rtc); +} + +#ifdef CONFIG_PM_SLEEP +static int nvvrs_rtc_suspend(struct device *dev) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + int ret; + + if (device_may_wakeup(dev)) { + /* Set RTC_WAKE bit for auto wake system from suspend state */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_2, + NVVRS_REG_CTL_2_RTC_WAKE, + NVVRS_REG_CTL_2_RTC_WAKE); + if (ret < 0) { + dev_err(info->dev, "Failed to set RTC_WAKE bit (%d)\n", + ret); + return ret; + } + + return enable_irq_wake(info->irq); + } + + return 0; +} + +static int nvvrs_rtc_resume(struct device *dev) +{ + struct nvvrs_rtc_info *info = dev_get_drvdata(dev); + int ret; + + if (device_may_wakeup(dev)) { + /* Clear FORCE_ACT bit */ + ret = nvvrs_update_bits(info, NVVRS_REG_CTL_1, + NVVRS_REG_CTL_1_FORCE_ACT, 0); + if (ret < 0) { + dev_err(info->dev, "Failed to clear FORCE_ACT bit (%d)\n", + ret); + return ret; + } + + return disable_irq_wake(info->irq); + } + + return 0; +} + +#endif +static SIMPLE_DEV_PM_OPS(nvvrs_rtc_pm_ops, nvvrs_rtc_suspend, nvvrs_rtc_resume); + +static const struct of_device_id nvvrs_rtc_of_match[] = { + { .compatible = "nvidia,vrs-10" }, + { }, +}; +MODULE_DEVICE_TABLE(of, nvvrs_rtc_of_match); + +static struct i2c_driver nvvrs_rtc_driver = { + .driver = { + .name = "rtc-nvidia-vrs10", + .pm = &nvvrs_rtc_pm_ops, + .of_match_table = nvvrs_rtc_of_match, + }, + .probe = nvvrs_rtc_probe, +}; + +module_i2c_driver(nvvrs_rtc_driver); + +MODULE_AUTHOR("Shubhi Garg <shgarg@nvidia.com>"); +MODULE_DESCRIPTION("NVIDIA Voltage Regulator Specification RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index 2812da2c50c514..52c11532bc3a36 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -340,8 +340,6 @@ static int pic32_rtc_probe(struct platform_device *pdev) if (ret) goto err_nortc; - pdata->rtc->max_user_freq = 128; - pic32_rtc_setfreq(&pdev->dev, 1); ret = devm_request_irq(&pdev->dev, pdata->alarm_irq, pic32_rtc_alarmirq, 0, diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c index ab816bdf0d7761..cbabaa4dc96a5a 100644 --- a/drivers/rtc/rtc-renesas-rtca3.c +++ b/drivers/rtc/rtc-renesas-rtca3.c @@ -726,7 +726,7 @@ static int rtca3_probe(struct platform_device *pdev) if (ret) return ret; - priv->rstc = devm_reset_control_get_shared(dev, NULL); + priv->rstc = devm_reset_control_array_get_shared(dev); if (IS_ERR(priv->rstc)) return PTR_ERR(priv->rstc); @@ -772,7 +772,6 @@ static int rtca3_probe(struct platform_device *pdev) return PTR_ERR(priv->rtc_dev); priv->rtc_dev->ops = &rtca3_ops; - priv->rtc_dev->max_user_freq = 256; priv->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_2000; priv->rtc_dev->range_max = RTC_TIMESTAMP_END_2099; diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c index c2a531f0e125be..d96f6bb68850da 100644 --- a/drivers/rtc/rtc-rv3028.c +++ b/drivers/rtc/rtc-rv3028.c @@ -1023,8 +1023,6 @@ static int rv3028_probe(struct i2c_client *client) eeprom_cfg.priv = rv3028; devm_rtc_nvmem_register(rv3028->rtc, &eeprom_cfg); - rv3028->rtc->max_user_freq = 1; - #ifdef CONFIG_COMMON_CLK rv3028_clkout_register_clk(rv3028, client); #endif diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c index b8376bd1d905be..6c09da7738e1a0 100644 --- a/drivers/rtc/rtc-rv3032.c +++ b/drivers/rtc/rtc-rv3032.c @@ -968,8 +968,6 @@ static int rv3032_probe(struct i2c_client *client) eeprom_cfg.priv = rv3032; devm_rtc_nvmem_register(rv3032->rtc, &eeprom_cfg); - rv3032->rtc->max_user_freq = 1; - #ifdef CONFIG_COMMON_CLK rv3032_clkout_register_clk(rv3032, client); #endif diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 1327251e527c21..4e9e04cbec89a0 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -738,8 +738,6 @@ static int rv8803_probe(struct i2c_client *client) devm_rtc_nvmem_register(rv8803->rtc, &nvmem_cfg); - rv8803->rtc->max_user_freq = 1; - return 0; } diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c index 7c423d672adb34..07bf35ac8d79bf 100644 --- a/drivers/rtc/rtc-rx6110.c +++ b/drivers/rtc/rtc-rx6110.c @@ -324,8 +324,6 @@ static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev) if (err) return err; - rx6110->rtc->max_user_freq = 1; - return 0; } diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index 2b6198d1cf81ea..171240e50f489e 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -412,7 +412,6 @@ static int rx8010_probe(struct i2c_client *client) } rx8010->rtc->ops = &rx8010_rtc_ops; - rx8010->rtc->max_user_freq = 1; rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099; diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 7e9f7cb90c2887..ced6e7adfe8d0e 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -565,8 +565,6 @@ static int rx8025_probe(struct i2c_client *client) clear_bit(RTC_FEATURE_ALARM, rx8025->rtc->features); } - rx8025->rtc->max_user_freq = 1; - set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rx8025->rtc->features); clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rx8025->rtc->features); diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 3408d2ab274194..07bd983b569296 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -66,7 +66,7 @@ struct s35390a { int twentyfourhour; }; -static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) +static int s35390a_set_reg(struct s35390a *s35390a, int reg, u8 *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { @@ -83,7 +83,7 @@ static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) return 0; } -static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) +static int s35390a_get_reg(struct s35390a *s35390a, int reg, u8 *buf, int len) { struct i2c_client *client = s35390a->client[reg]; struct i2c_msg msg[] = { @@ -168,7 +168,7 @@ static int s35390a_read_status(struct s35390a *s35390a, char *status1) static int s35390a_disable_test_mode(struct s35390a *s35390a) { - char buf[1]; + u8 buf[1]; if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) return -EIO; @@ -210,7 +210,7 @@ static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); int i; - char buf[7], status; + u8 buf[7], status; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, @@ -239,7 +239,7 @@ static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); - char buf[7], status; + u8 buf[7], status; int i, err; if (s35390a_read_status(s35390a, &status) == 1) @@ -273,7 +273,7 @@ static int s35390a_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); - char buf[3], sts = 0; + u8 buf[3], sts = 0; int err, i; dev_dbg(&client->dev, "%s: alm is secs=%d, mins=%d, hours=%d mday=%d, "\ @@ -326,7 +326,7 @@ static int s35390a_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); - char buf[3], sts; + u8 buf[3], sts; int i, err; err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); @@ -383,7 +383,7 @@ static int s35390a_rtc_ioctl(struct device *dev, unsigned int cmd, { struct i2c_client *client = to_i2c_client(dev); struct s35390a *s35390a = i2c_get_clientdata(client); - char sts; + u8 sts; int err; switch (cmd) { @@ -422,7 +422,7 @@ static int s35390a_probe(struct i2c_client *client) unsigned int i; struct s35390a *s35390a; struct rtc_device *rtc; - char buf, status1; + u8 buf, status1; struct device *dev = &client->dev; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 1ad93648d69c0b..26b2f4184eccf7 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -40,8 +40,6 @@ #define RTC_DEF_DIVIDER (32768 - 1) #define RTC_DEF_TRIM 0 -#define RTC_FREQ 1024 - static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id) { @@ -202,7 +200,6 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) } info->rtc->ops = &sa1100_rtc_ops; - info->rtc->max_user_freq = RTC_FREQ; info->rtc->range_max = U32_MAX; ret = devm_rtc_register_device(info->rtc); diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 619800a004799c..0510dc64c3e252 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -423,7 +423,6 @@ static int __init sh_rtc_probe(struct platform_device *pdev) writeb(tmp, rtc->regbase + RCR1); rtc->rtc_dev->ops = &sh_rtc_ops; - rtc->rtc_dev->max_user_freq = 256; if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900; diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index 46788db899533d..528e32b7d101c2 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -274,6 +274,12 @@ static const struct of_device_id tegra_rtc_dt_match[] = { }; MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match); +static const struct acpi_device_id tegra_rtc_acpi_match[] = { + { "NVDA0280" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, tegra_rtc_acpi_match); + static int tegra_rtc_probe(struct platform_device *pdev) { struct tegra_rtc_info *info; @@ -300,13 +306,11 @@ static int tegra_rtc_probe(struct platform_device *pdev) info->rtc->ops = &tegra_rtc_ops; info->rtc->range_max = U32_MAX; - info->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(info->clk)) - return PTR_ERR(info->clk); - - ret = clk_prepare_enable(info->clk); - if (ret < 0) - return ret; + if (dev_of_node(&pdev->dev)) { + info->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(info->clk)) + return PTR_ERR(info->clk); + } /* set context info */ info->pdev = pdev; @@ -324,32 +328,18 @@ static int tegra_rtc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, info->irq, tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "failed to request interrupt: %d\n", ret); - goto disable_clk; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "failed to request interrupt\n"); ret = devm_rtc_register_device(info->rtc); if (ret) - goto disable_clk; + return ret; dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n"); return 0; - -disable_clk: - clk_disable_unprepare(info->clk); - return ret; -} - -static void tegra_rtc_remove(struct platform_device *pdev) -{ - struct tegra_rtc_info *info = platform_get_drvdata(pdev); - - clk_disable_unprepare(info->clk); } -#ifdef CONFIG_PM_SLEEP static int tegra_rtc_suspend(struct device *dev) { struct tegra_rtc_info *info = dev_get_drvdata(dev); @@ -387,9 +377,8 @@ static int tegra_rtc_resume(struct device *dev) return 0; } -#endif -static SIMPLE_DEV_PM_OPS(tegra_rtc_pm_ops, tegra_rtc_suspend, tegra_rtc_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(tegra_rtc_pm_ops, tegra_rtc_suspend, tegra_rtc_resume); static void tegra_rtc_shutdown(struct platform_device *pdev) { @@ -399,12 +388,12 @@ static void tegra_rtc_shutdown(struct platform_device *pdev) static struct platform_driver tegra_rtc_driver = { .probe = tegra_rtc_probe, - .remove = tegra_rtc_remove, .shutdown = tegra_rtc_shutdown, .driver = { .name = "tegra_rtc", .of_match_table = tegra_rtc_dt_match, - .pm = &tegra_rtc_pm_ops, + .acpi_match_table = tegra_rtc_acpi_match, + .pm = pm_sleep_ptr(&tegra_rtc_pm_ops), }, }; module_platform_driver(tegra_rtc_driver); diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 5c602c0577989f..45b0e33293a591 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1260,6 +1260,7 @@ static void imm_detach(struct parport *pb) imm_struct *dev; list_for_each_entry(dev, &imm_hosts, list) { if (dev->dev->port == pb) { + disable_delayed_work_sync(&dev->imm_tq); list_del_init(&dev->list); scsi_remove_host(dev->host); scsi_host_put(dev->host); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 95123689e9d1fe..dbd58a7e7bc1f3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -61,8 +61,8 @@ #include <linux/hdreg.h> #include <linux/reboot.h> #include <linux/stringify.h> +#include <linux/irq.h> #include <asm/io.h> -#include <asm/irq.h> #include <asm/processor.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -7844,6 +7844,30 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) } /** + * ipr_set_affinity_nobalance + * @ioa_cfg: ipr_ioa_cfg struct for an ipr device + * @flag: bool + * true: ensable "IRQ_NO_BALANCING" bit for msix interrupt + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt + * Description: This function will be called to disable/enable + * "IRQ_NO_BALANCING" to avoid irqbalance daemon + * kicking in during adapter reset. + **/ +static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag) +{ + int irq, i; + + for (i = 0; i < ioa_cfg->nvectors; i++) { + irq = pci_irq_vector(ioa_cfg->pdev, i); + + if (flag) + irq_set_status_flags(irq, IRQ_NO_BALANCING); + else + irq_clear_status_flags(irq, IRQ_NO_BALANCING); + } +} + +/** * ipr_reset_restore_cfg_space - Restore PCI config space. * @ipr_cmd: ipr command struct * @@ -7866,6 +7890,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) return IPR_RC_JOB_CONTINUE; } + ipr_set_affinity_nobalance(ioa_cfg, false); ipr_fail_all_ops(ioa_cfg); if (ioa_cfg->sis64) { @@ -7945,6 +7970,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); if (rc == PCIBIOS_SUCCESSFUL) { + ipr_set_affinity_nobalance(ioa_cfg, true); ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); rc = IPR_RC_JOB_RETURN; diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 8566bb1208a057..6b15ad1bcadabe 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -141,6 +141,7 @@ Undo_event_q: Undo_ports: sas_unregister_ports(sas_ha); Undo_phys: + sas_unregister_phys(sas_ha); return error; } diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 6706f2be8d274b..d104c87f04f523 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -54,6 +54,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev); void sas_scsi_recover_host(struct Scsi_Host *shost); int sas_register_phys(struct sas_ha_struct *sas_ha); +void sas_unregister_phys(struct sas_ha_struct *sas_ha); struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); void sas_free_event(struct asd_sas_event *event); @@ -145,20 +146,6 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); - - /* - * If the device probe failed, the expander phy attached address - * needs to be reset so that the phy will not be treated as flutter - * in the next revalidation - */ - if (dev->parent && !dev_is_expander(dev->dev_type)) { - struct sas_phy *phy = dev->phy; - struct domain_device *parent = dev->parent; - struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number]; - - memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - } - sas_unregister_dev(dev->port, dev); } diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 635835c28ecd43..58f08dc2c1872c 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -116,6 +116,7 @@ static void sas_phye_shutdown(struct work_struct *work) int sas_register_phys(struct sas_ha_struct *sas_ha) { int i; + int err; /* Now register the phys. */ for (i = 0; i < sas_ha->num_phys; i++) { @@ -132,8 +133,10 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->frame_rcvd_size = 0; phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i); - if (!phy->phy) - return -ENOMEM; + if (!phy->phy) { + err = -ENOMEM; + goto rollback; + } phy->phy->identify.initiator_port_protocols = phy->iproto; @@ -146,10 +149,34 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; - sas_phy_add(phy->phy); + err = sas_phy_add(phy->phy); + if (err) { + sas_phy_free(phy->phy); + goto rollback; + } } return 0; +rollback: + for (i-- ; i >= 0 ; i--) { + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + sas_phy_delete(phy->phy); + sas_phy_free(phy->phy); + } + return err; +} + +void sas_unregister_phys(struct sas_ha_struct *sas_ha) +{ + int i; + + for (i = 0 ; i < sas_ha->num_phys ; i++) { + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + sas_phy_delete(phy->phy); + sas_phy_free(phy->phy); + } } const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 6742684e2990a7..31d68c151b2076 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -56,8 +56,8 @@ extern struct list_head mrioc_list; extern int prot_mask; extern atomic64_t event_counter; -#define MPI3MR_DRIVER_VERSION "8.15.0.5.50" -#define MPI3MR_DRIVER_RELDATE "12-August-2025" +#define MPI3MR_DRIVER_VERSION "8.15.0.5.51" +#define MPI3MR_DRIVER_RELDATE "18-November-2025" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index b88633e1efe275..d4ca878d088697 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -1184,6 +1184,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, if (is_added == true) tgtdev->io_throttle_enabled = (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; + if (!mrioc->sas_transport_enabled) + tgtdev->non_stl = 1; switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: @@ -4844,7 +4846,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget) spin_lock_irqsave(&mrioc->tgtdev_lock, flags); if (starget->channel == mrioc->scsi_device_channel) { tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); - if (tgt_dev && !tgt_dev->is_hidden) { + if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) { scsi_tgt_priv_data->starget = starget; scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3a57f07d73f587..16a44c0917e18b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -17,6 +17,7 @@ #include <linux/crash_dump.h> #include <linux/trace_events.h> #include <linux/trace.h> +#include <linux/irq.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> @@ -7776,6 +7777,31 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) } +/** + * qla2xxx_set_affinity_nobalance + * @pdev: pci_dev struct for a qla2xxx device + * @flag: bool + * true: enable "IRQ_NO_BALANCING" bit for msix interrupt + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt + * Description: This function will be called to disable/enable + * "IRQ_NO_BALANCING" to avoid irqbalance daemon + * kicking in during adapter reset. + **/ + +static void qla2xxx_set_affinity_nobalance(struct pci_dev *pdev, bool flag) +{ + int irq, i; + + for (i = 0; i < QLA_BASE_VECTORS; i++) { + irq = pci_irq_vector(pdev, i); + + if (flag) + irq_set_status_flags(irq, IRQ_NO_BALANCING); + else + irq_clear_status_flags(irq, IRQ_NO_BALANCING); + } +} + static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -7794,6 +7820,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) goto out; } + qla2xxx_set_affinity_nobalance(pdev, false); + switch (state) { case pci_channel_io_normal: qla_pci_set_eeh_busy(vha); @@ -7930,6 +7958,8 @@ exit_slot_reset: ql_dbg(ql_dbg_aer, base_vha, 0x900e, "Slot Reset returning %x.\n", ret); + qla2xxx_set_affinity_nobalance(pdev, true); + return ret; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index da2fc66ffedd5d..b0a62aaa1cca90 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1552,7 +1552,7 @@ static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) (val == PHAN_INITIALIZE_ACK)) return 0; set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(500); + schedule_timeout(msecs_to_jiffies(500)); } while (--retries); diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 7b56e00c7df6b0..b9d80531781494 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -353,7 +353,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach); * that may have a device handler attached * @gfp - the GFP mask used in the kmalloc() call when allocating memory * - * Returns name of attached handler, NULL if no handler is attached. + * Returns name of attached handler, NULL if no handler is attached, or + * and error pointer if an error occurred. * Caller must take care to free the returned string. */ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) @@ -363,10 +364,11 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) sdev = scsi_device_from_queue(q); if (!sdev) - return NULL; + return ERR_PTR(-ENODEV); if (sdev->handler) - handler_name = kstrdup(sdev->handler->name, gfp); + handler_name = kstrdup(sdev->handler->name, gfp) ? : + ERR_PTR(-ENOMEM); put_device(&sdev->sdev_gendev); return handler_name; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 51ad2ad07e4390..93031326ac3ee9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2801,7 +2801,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple); * * Must be called with user context, may sleep. * - * Returns zero if unsuccessful or an error if not. + * Returns zero if successful or an error if not. */ int scsi_device_quiesce(struct scsi_device *sdev) diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 55c1db81653400..fb68738dfb9b84 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -2052,8 +2052,14 @@ EXPORT_SYMBOL(sdw_clear_slave_status); int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg) { - if (msg->len > SDW_BPT_MSG_MAX_BYTES) { - dev_err(bus->dev, "Invalid BPT message length %d\n", msg->len); + int len = 0; + int i; + + for (i = 0; i < msg->sections; i++) + len += msg->sec[i].len; + + if (len > SDW_BPT_MSG_MAX_BYTES) { + dev_err(bus->dev, "Invalid BPT message length %d\n", len); return -EINVAL; } diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h index 02651fbb683aca..8115c64dd48e2f 100644 --- a/drivers/soundwire/bus.h +++ b/drivers/soundwire/bus.h @@ -73,21 +73,31 @@ struct sdw_msg { }; /** - * struct sdw_btp_msg - Message structure + * struct sdw_btp_section - Message section structure * @addr: Start Register address accessed in the Slave * @len: number of bytes to transfer. More than 64Kb can be transferred * but a practical limit of SDW_BPT_MSG_MAX_BYTES is enforced. - * @dev_num: Slave device number - * @flags: transfer flags, indicate if xfer is read or write - * @buf: message data buffer (filled by host for write, filled + * @buf: section data buffer (filled by host for write, filled * by Peripheral hardware for reads) */ -struct sdw_bpt_msg { +struct sdw_bpt_section { u32 addr; u32 len; + u8 *buf; +}; + +/** + * struct sdw_btp_msg - Message structure + * @sec: Pointer to array of sections + * @sections: Number of sections in the array + * @dev_num: Slave device number + * @flags: transfer flags, indicate if xfer is read or write + */ +struct sdw_bpt_msg { + struct sdw_bpt_section *sec; + int sections; u8 dev_num; u8 flags; - u8 *buf; }; #define SDW_DOUBLE_RATE_FACTOR 2 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 21bb491d026b44..a106e5e482c8e3 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -2094,6 +2094,36 @@ static unsigned int sdw_cdns_read_pdi1_buffer_size(unsigned int actual_data_size return total * 2; } +int sdw_cdns_bpt_find_bandwidth(int command, /* 0: write, 1: read */ + int row, int col, int frame_rate, + unsigned int *tx_dma_bandwidth, + unsigned int *rx_dma_bandwidth) +{ + unsigned int bpt_bits = row * (col - 1); + unsigned int bpt_bytes = bpt_bits >> 3; + unsigned int pdi0_buffer_size; + unsigned int pdi1_buffer_size; + unsigned int data_per_frame; + + data_per_frame = sdw_cdns_bra_actual_data_size(bpt_bytes); + if (!data_per_frame) + return -EINVAL; + + if (command == 0) { + pdi0_buffer_size = sdw_cdns_write_pdi0_buffer_size(data_per_frame); + pdi1_buffer_size = SDW_CDNS_WRITE_PDI1_BUFFER_SIZE; + } else { + pdi0_buffer_size = SDW_CDNS_READ_PDI0_BUFFER_SIZE; + pdi1_buffer_size = sdw_cdns_read_pdi1_buffer_size(data_per_frame); + } + + *tx_dma_bandwidth = pdi0_buffer_size * 8 * frame_rate; + *rx_dma_bandwidth = pdi1_buffer_size * 8 * frame_rate; + + return 0; +} +EXPORT_SYMBOL(sdw_cdns_bpt_find_bandwidth); + int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */ int row, int col, unsigned int data_bytes, unsigned int requested_bytes_per_frame, @@ -2114,9 +2144,6 @@ int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */ if (!actual_bpt_bytes) return -EINVAL; - if (data_bytes < actual_bpt_bytes) - actual_bpt_bytes = data_bytes; - /* * the caller may want to set the number of bytes per frame, * allow when possible @@ -2126,6 +2153,9 @@ int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */ *data_per_frame = actual_bpt_bytes; + if (data_bytes < actual_bpt_bytes) + actual_bpt_bytes = data_bytes; + if (command == 0) { /* * for writes we need to send all the data_bytes per frame, @@ -2294,17 +2324,20 @@ static int sdw_cdns_prepare_read_pd0_buffer(u8 *header, unsigned int header_size #define CDNS_BPT_ROLLING_COUNTER_START 1 -int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, int data_size, - int data_per_frame, u8 *dma_buffer, int dma_buffer_size, - int *dma_buffer_total_bytes) +int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec, + int data_per_frame, u8 *dma_buffer, + int dma_buffer_size, int *dma_buffer_total_bytes) { int total_dma_data_written = 0; u8 *p_dma_buffer = dma_buffer; u8 header[SDW_CDNS_BRA_HDR]; + unsigned int start_register; + unsigned int section_size; int dma_data_written; - u8 *p_data = data; + u8 *p_data; u8 counter; int ret; + int i; counter = CDNS_BPT_ROLLING_COUNTER_START; @@ -2312,47 +2345,57 @@ int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, header[0] |= GENMASK(7, 6); /* header is active */ header[0] |= (dev_num << 2); - while (data_size >= data_per_frame) { - header[1] = data_per_frame; - header[2] = start_register >> 24 & 0xFF; - header[3] = start_register >> 16 & 0xFF; - header[4] = start_register >> 8 & 0xFF; - header[5] = start_register >> 0 & 0xFF; - - ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR, - p_data, data_per_frame, - p_dma_buffer, dma_buffer_size, - &dma_data_written, counter); - if (ret < 0) - return ret; + for (i = 0; i < num_sec; i++) { + start_register = sec[i].addr; + section_size = sec[i].len; + p_data = sec[i].buf; - counter++; + while (section_size >= data_per_frame) { + header[1] = data_per_frame; + header[2] = start_register >> 24 & 0xFF; + header[3] = start_register >> 16 & 0xFF; + header[4] = start_register >> 8 & 0xFF; + header[5] = start_register >> 0 & 0xFF; - p_data += data_per_frame; - data_size -= data_per_frame; + ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR, + p_data, data_per_frame, + p_dma_buffer, dma_buffer_size, + &dma_data_written, counter); + if (ret < 0) + return ret; - p_dma_buffer += dma_data_written; - dma_buffer_size -= dma_data_written; - total_dma_data_written += dma_data_written; + counter++; - start_register += data_per_frame; - } + p_data += data_per_frame; + section_size -= data_per_frame; - if (data_size) { - header[1] = data_size; - header[2] = start_register >> 24 & 0xFF; - header[3] = start_register >> 16 & 0xFF; - header[4] = start_register >> 8 & 0xFF; - header[5] = start_register >> 0 & 0xFF; + p_dma_buffer += dma_data_written; + dma_buffer_size -= dma_data_written; + total_dma_data_written += dma_data_written; - ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR, - p_data, data_size, - p_dma_buffer, dma_buffer_size, - &dma_data_written, counter); - if (ret < 0) - return ret; + start_register += data_per_frame; + } - total_dma_data_written += dma_data_written; + if (section_size) { + header[1] = section_size; + header[2] = start_register >> 24 & 0xFF; + header[3] = start_register >> 16 & 0xFF; + header[4] = start_register >> 8 & 0xFF; + header[5] = start_register >> 0 & 0xFF; + + ret = sdw_cdns_prepare_write_pd0_buffer(header, SDW_CDNS_BRA_HDR, + p_data, section_size, + p_dma_buffer, dma_buffer_size, + &dma_data_written, counter); + if (ret < 0) + return ret; + + counter++; + + p_dma_buffer += dma_data_written; + dma_buffer_size -= dma_data_written; + total_dma_data_written += dma_data_written; + } } *dma_buffer_total_bytes = total_dma_data_written; @@ -2361,16 +2404,19 @@ int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, } EXPORT_SYMBOL(sdw_cdns_prepare_write_dma_buffer); -int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_size, +int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec, int data_per_frame, u8 *dma_buffer, int dma_buffer_size, - int *dma_buffer_total_bytes) + int *dma_buffer_total_bytes, unsigned int fake_size) { int total_dma_data_written = 0; u8 *p_dma_buffer = dma_buffer; u8 header[SDW_CDNS_BRA_HDR]; + unsigned int start_register; + unsigned int data_size; int dma_data_written; u8 counter; int ret; + int i; counter = CDNS_BPT_ROLLING_COUNTER_START; @@ -2378,13 +2424,58 @@ int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_si header[0] |= GENMASK(7, 6); /* header is active */ header[0] |= (dev_num << 2); - while (data_size >= data_per_frame) { - header[1] = data_per_frame; - header[2] = start_register >> 24 & 0xFF; - header[3] = start_register >> 16 & 0xFF; - header[4] = start_register >> 8 & 0xFF; - header[5] = start_register >> 0 & 0xFF; + for (i = 0; i < num_sec; i++) { + start_register = sec[i].addr; + data_size = sec[i].len; + while (data_size >= data_per_frame) { + header[1] = data_per_frame; + header[2] = start_register >> 24 & 0xFF; + header[3] = start_register >> 16 & 0xFF; + header[4] = start_register >> 8 & 0xFF; + header[5] = start_register >> 0 & 0xFF; + + ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, + p_dma_buffer, dma_buffer_size, + &dma_data_written, counter); + if (ret < 0) + return ret; + + counter++; + + data_size -= data_per_frame; + + p_dma_buffer += dma_data_written; + dma_buffer_size -= dma_data_written; + total_dma_data_written += dma_data_written; + + start_register += data_per_frame; + } + + if (data_size) { + header[1] = data_size; + header[2] = start_register >> 24 & 0xFF; + header[3] = start_register >> 16 & 0xFF; + header[4] = start_register >> 8 & 0xFF; + header[5] = start_register >> 0 & 0xFF; + + ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, + p_dma_buffer, dma_buffer_size, + &dma_data_written, counter); + if (ret < 0) + return ret; + + counter++; + p_dma_buffer += dma_data_written; + dma_buffer_size -= dma_data_written; + total_dma_data_written += dma_data_written; + } + } + + /* Add fake frame */ + header[0] &= ~GENMASK(7, 6); /* Set inactive flag in BPT/BRA frame heade */ + while (fake_size >= data_per_frame) { + header[1] = data_per_frame; ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer, dma_buffer_size, &dma_data_written, counter); @@ -2393,28 +2484,24 @@ int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_si counter++; - data_size -= data_per_frame; - + fake_size -= data_per_frame; p_dma_buffer += dma_data_written; dma_buffer_size -= dma_data_written; total_dma_data_written += dma_data_written; - - start_register += data_per_frame; } - if (data_size) { - header[1] = data_size; - header[2] = start_register >> 24 & 0xFF; - header[3] = start_register >> 16 & 0xFF; - header[4] = start_register >> 8 & 0xFF; - header[5] = start_register >> 0 & 0xFF; - + if (fake_size) { + header[1] = fake_size; ret = sdw_cdns_prepare_read_pd0_buffer(header, SDW_CDNS_BRA_HDR, p_dma_buffer, dma_buffer_size, &dma_data_written, counter); if (ret < 0) return ret; + counter++; + + p_dma_buffer += dma_data_written; + dma_buffer_size -= dma_data_written; total_dma_data_written += dma_data_written; } @@ -2495,14 +2582,14 @@ int sdw_cdns_check_write_response(struct device *dev, u8 *dma_buffer, ret = check_frame_start(header, counter); if (ret < 0) { dev_err(dev, "%s: bad frame %d/%d start header %x\n", - __func__, i, num_frames, header); + __func__, i + 1, num_frames, header); return ret; } ret = check_frame_end(footer); if (ret < 0) { dev_err(dev, "%s: bad frame %d/%d end footer %x\n", - __func__, i, num_frames, footer); + __func__, i + 1, num_frames, footer); return ret; } @@ -2549,9 +2636,12 @@ static u8 extract_read_data(u32 *data, int num_bytes, u8 *buffer) } int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size, - u8 *buffer, int buffer_size, int num_frames, int data_per_frame) + struct sdw_bpt_section *sec, int num_sec, int num_frames, + int data_per_frame) { int total_num_bytes = 0; + int buffer_size = 0; + int sec_index; u32 *p_data; u8 *p_buf; int counter; @@ -2565,7 +2655,10 @@ int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buf counter = CDNS_BPT_ROLLING_COUNTER_START; p_data = (u32 *)dma_buffer; - p_buf = buffer; + + sec_index = 0; + p_buf = sec[sec_index].buf; + buffer_size = sec[sec_index].len; for (i = 0; i < num_frames; i++) { header = *p_data++; @@ -2573,7 +2666,7 @@ int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buf ret = check_frame_start(header, counter); if (ret < 0) { dev_err(dev, "%s: bad frame %d/%d start header %x\n", - __func__, i, num_frames, header); + __func__, i + 1, num_frames, header); return ret; } @@ -2588,7 +2681,7 @@ int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buf if (crc != expected_crc) { dev_err(dev, "%s: bad frame %d/%d crc %#x expected %#x\n", - __func__, i, num_frames, crc, expected_crc); + __func__, i + 1, num_frames, crc, expected_crc); return -EIO; } @@ -2599,12 +2692,24 @@ int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buf ret = check_frame_end(footer); if (ret < 0) { dev_err(dev, "%s: bad frame %d/%d end footer %x\n", - __func__, i, num_frames, footer); + __func__, i + 1, num_frames, footer); return ret; } counter++; counter &= GENMASK(3, 0); + + if (buffer_size == total_num_bytes && (i + 1) < num_frames) { + sec_index++; + if (sec_index >= num_sec) { + dev_err(dev, "%s: incorrect section index %d i %d\n", + __func__, sec_index, i); + return -EINVAL; + } + p_buf = sec[sec_index].buf; + buffer_size = sec[sec_index].len; + total_num_bytes = 0; + } } return 0; } diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index 9373426c7f630b..668f807cff4b23 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* Copyright(c) 2015-17 Intel Corporation. */ #include <sound/soc.h> +#include "bus.h" #ifndef __SDW_CADENCE_H #define __SDW_CADENCE_H @@ -209,23 +210,29 @@ void sdw_cdns_config_update(struct sdw_cdns *cdns); int sdw_cdns_config_update_set_wait(struct sdw_cdns *cdns); /* SoundWire BPT/BRA helpers to format data */ +int sdw_cdns_bpt_find_bandwidth(int command, /* 0: write, 1: read */ + int row, int col, int frame_rate, + unsigned int *tx_dma_bandwidth, + unsigned int *rx_dma_bandwidth); + int sdw_cdns_bpt_find_buffer_sizes(int command, /* 0: write, 1: read */ int row, int col, unsigned int data_bytes, unsigned int requested_bytes_per_frame, unsigned int *data_per_frame, unsigned int *pdi0_buffer_size, unsigned int *pdi1_buffer_size, unsigned int *num_frames); -int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, u32 start_register, u8 *data, int data_size, - int data_per_frame, u8 *dma_buffer, int dma_buffer_size, - int *dma_buffer_total_bytes); +int sdw_cdns_prepare_write_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec, + int data_per_frame, u8 *dma_buffer, + int dma_buffer_size, int *dma_buffer_total_bytes); -int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, u32 start_register, int data_size, +int sdw_cdns_prepare_read_dma_buffer(u8 dev_num, struct sdw_bpt_section *sec, int num_sec, int data_per_frame, u8 *dma_buffer, int dma_buffer_size, - int *dma_buffer_total_bytes); + int *dma_buffer_total_bytes, unsigned int fake_size); int sdw_cdns_check_write_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size, int num_frames); int sdw_cdns_check_read_response(struct device *dev, u8 *dma_buffer, int dma_buffer_size, - u8 *buffer, int buffer_size, int num_frames, int data_per_frame); + struct sdw_bpt_section *sec, int num_sec, int num_frames, + int data_per_frame); #endif /* __SDW_CADENCE_H */ diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c index 1e0f9318b61656..6068011dd0d989 100644 --- a/drivers/soundwire/debugfs.c +++ b/drivers/soundwire/debugfs.c @@ -222,15 +222,23 @@ DEFINE_DEBUGFS_ATTRIBUTE(set_num_bytes_fops, NULL, static int do_bpt_sequence(struct sdw_slave *slave, bool write, u8 *buffer) { struct sdw_bpt_msg msg = {0}; + struct sdw_bpt_section *sec; - msg.addr = start_addr; - msg.len = num_bytes; + sec = kcalloc(1, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + msg.sections = 1; + + sec[0].addr = start_addr; + sec[0].len = num_bytes; + + msg.sec = sec; msg.dev_num = slave->dev_num; if (write) msg.flags = SDW_MSG_FLAG_WRITE; else msg.flags = SDW_MSG_FLAG_READ; - msg.buf = buffer; + sec[0].buf = buffer; return sdw_bpt_send_sync(slave->bus, slave, &msg); } diff --git a/drivers/soundwire/generic_bandwidth_allocation.c b/drivers/soundwire/generic_bandwidth_allocation.c index c18f0c16f92973..530ac66ac6fad2 100644 --- a/drivers/soundwire/generic_bandwidth_allocation.c +++ b/drivers/soundwire/generic_bandwidth_allocation.c @@ -124,6 +124,9 @@ static void sdw_compute_dp0_port_params(struct sdw_bus *bus) struct sdw_master_runtime *m_rt; list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) { + /* DP0 is for BPT only */ + if (m_rt->stream->type != SDW_STREAM_BPT) + continue; sdw_compute_dp0_master_ports(m_rt); sdw_compute_dp0_slave_ports(m_rt); } diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c index 5d08364ad6d141..1ed0251d259236 100644 --- a/drivers/soundwire/intel_ace2x.c +++ b/drivers/soundwire/intel_ace2x.c @@ -44,6 +44,8 @@ static int sdw_slave_bpt_stream_add(struct sdw_slave *slave, struct sdw_stream_r return ret; } +#define READ_PDI1_MIN_SIZE 12 + static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave *slave, struct sdw_bpt_msg *msg) { @@ -53,19 +55,31 @@ static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave * struct sdw_stream_runtime *stream; struct sdw_stream_config sconfig; struct sdw_port_config *pconfig; + unsigned int pdi0_buf_size_pre_frame; + unsigned int pdi1_buf_size_pre_frame; + unsigned int pdi0_buffer_size_; + unsigned int pdi1_buffer_size_; unsigned int pdi0_buffer_size; unsigned int tx_dma_bandwidth; unsigned int pdi1_buffer_size; unsigned int rx_dma_bandwidth; + unsigned int fake_num_frames; unsigned int data_per_frame; unsigned int tx_total_bytes; struct sdw_cdns_pdi *pdi0; struct sdw_cdns_pdi *pdi1; + unsigned int rx_alignment; + unsigned int tx_alignment; + unsigned int num_frames_; unsigned int num_frames; + unsigned int fake_size; + unsigned int tx_pad; + unsigned int rx_pad; int command; int ret1; int ret; int dir; + int len; int i; stream = sdw_alloc_stream("BPT", SDW_STREAM_BPT); @@ -138,23 +152,77 @@ static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave * command = (msg->flags & SDW_MSG_FLAG_WRITE) ? 0 : 1; - ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, cdns->bus.params.col, - msg->len, SDW_BPT_MSG_MAX_BYTES, &data_per_frame, - &pdi0_buffer_size, &pdi1_buffer_size, &num_frames); + ret = sdw_cdns_bpt_find_bandwidth(command, cdns->bus.params.row, + cdns->bus.params.col, + prop->default_frame_rate, + &tx_dma_bandwidth, &rx_dma_bandwidth); if (ret < 0) goto deprepare_stream; + len = 0; + pdi0_buffer_size = 0; + pdi1_buffer_size = 0; + num_frames = 0; + /* Add up pdi buffer size and frame numbers of each BPT sections */ + for (i = 0; i < msg->sections; i++) { + ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, + cdns->bus.params.col, + msg->sec[i].len, SDW_BPT_MSG_MAX_BYTES, + &data_per_frame, &pdi0_buffer_size_, + &pdi1_buffer_size_, &num_frames_); + if (ret < 0) + goto deprepare_stream; + + len += msg->sec[i].len; + pdi0_buffer_size += pdi0_buffer_size_; + pdi1_buffer_size += pdi1_buffer_size_; + num_frames += num_frames_; + } + sdw->bpt_ctx.pdi0_buffer_size = pdi0_buffer_size; sdw->bpt_ctx.pdi1_buffer_size = pdi1_buffer_size; sdw->bpt_ctx.num_frames = num_frames; sdw->bpt_ctx.data_per_frame = data_per_frame; - tx_dma_bandwidth = div_u64((u64)pdi0_buffer_size * 8 * (u64)prop->default_frame_rate, - num_frames); - rx_dma_bandwidth = div_u64((u64)pdi1_buffer_size * 8 * (u64)prop->default_frame_rate, - num_frames); + + rx_alignment = hda_sdw_bpt_get_buf_size_alignment(rx_dma_bandwidth); + tx_alignment = hda_sdw_bpt_get_buf_size_alignment(tx_dma_bandwidth); + + if (command) { /* read */ + /* Get buffer size of a full frame */ + ret = sdw_cdns_bpt_find_buffer_sizes(command, cdns->bus.params.row, + cdns->bus.params.col, + data_per_frame, SDW_BPT_MSG_MAX_BYTES, + &data_per_frame, &pdi0_buf_size_pre_frame, + &pdi1_buf_size_pre_frame, &fake_num_frames); + if (ret < 0) + goto deprepare_stream; + + /* find fake pdi1 buffer size */ + rx_pad = rx_alignment - (pdi1_buffer_size % rx_alignment); + while (rx_pad <= READ_PDI1_MIN_SIZE) + rx_pad += rx_alignment; + + pdi1_buffer_size += rx_pad; + /* It is fine if we request more than enough byte to read */ + fake_num_frames = DIV_ROUND_UP(rx_pad, pdi1_buf_size_pre_frame); + fake_size = fake_num_frames * data_per_frame; + + /* find fake pdi0 buffer size */ + pdi0_buffer_size += (fake_num_frames * pdi0_buf_size_pre_frame); + tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment); + pdi0_buffer_size += tx_pad; + } else { /* write */ + /* + * For the write command, the rx data block is 4, and the rx buffer size of a frame + * is 8. So the rx buffer size (pdi0_buffer_size) is always a multiple of rx + * alignment. + */ + tx_pad = tx_alignment - (pdi0_buffer_size % tx_alignment); + pdi0_buffer_size += tx_pad; + } dev_dbg(cdns->dev, "Message len %d transferred in %d frames (%d per frame)\n", - msg->len, num_frames, data_per_frame); + len, num_frames, data_per_frame); dev_dbg(cdns->dev, "sizes pdi0 %d pdi1 %d tx_bandwidth %d rx_bandwidth %d\n", pdi0_buffer_size, pdi1_buffer_size, tx_dma_bandwidth, rx_dma_bandwidth); @@ -169,15 +237,16 @@ static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave * } if (!command) { - ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->addr, msg->buf, - msg->len, data_per_frame, + ret = sdw_cdns_prepare_write_dma_buffer(msg->dev_num, msg->sec, msg->sections, + data_per_frame, sdw->bpt_ctx.dmab_tx_bdl.area, pdi0_buffer_size, &tx_total_bytes); } else { - ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->addr, msg->len, + ret = sdw_cdns_prepare_read_dma_buffer(msg->dev_num, msg->sec, msg->sections, data_per_frame, sdw->bpt_ctx.dmab_tx_bdl.area, - pdi0_buffer_size, &tx_total_bytes); + pdi0_buffer_size, &tx_total_bytes, + fake_size); } if (!ret) @@ -252,11 +321,16 @@ static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *s struct sdw_bpt_msg *msg) { struct sdw_cdns *cdns = &sdw->cdns; + int len = 0; int ret; + int i; + + for (i = 0; i < msg->sections; i++) + len += msg->sec[i].len; - if (msg->len < INTEL_BPT_MSG_BYTE_MIN) { + if (len < INTEL_BPT_MSG_BYTE_MIN) { dev_err(cdns->dev, "BPT message length %d is less than the minimum bytes %d\n", - msg->len, INTEL_BPT_MSG_BYTE_MIN); + len, INTEL_BPT_MSG_BYTE_MIN); return -EINVAL; } @@ -316,7 +390,7 @@ static int intel_ace2x_bpt_wait(struct sdw_intel *sdw, struct sdw_slave *slave, } else { ret = sdw_cdns_check_read_response(cdns->dev, sdw->bpt_ctx.dmab_rx_bdl.area, sdw->bpt_ctx.pdi1_buffer_size, - msg->buf, msg->len, sdw->bpt_ctx.num_frames, + msg->sec, msg->sections, sdw->bpt_ctx.num_frames, sdw->bpt_ctx.data_per_frame); if (ret < 0) dev_err(cdns->dev, "%s: BPT Read failed %d\n", __func__, ret); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 5b3078220189be..17afc5aa8b44e0 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -31,6 +31,7 @@ #define SWRM_VERSION_1_5_1 0x01050001 #define SWRM_VERSION_1_7_0 0x01070000 #define SWRM_VERSION_2_0_0 0x02000000 +#define SWRM_VERSION_3_1_0 0x03010000 #define SWRM_COMP_HW_VERSION 0x00 #define SWRM_COMP_CFG_ADDR 0x04 #define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1) @@ -40,6 +41,9 @@ #define SWRM_COMP_PARAMS_RD_FIFO_DEPTH GENMASK(19, 15) #define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0) #define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5) +#define SWRM_V3_COMP_PARAMS_WR_FIFO_DEPTH GENMASK(17, 10) +#define SWRM_V3_COMP_PARAMS_RD_FIFO_DEPTH GENMASK(23, 18) + #define SWRM_COMP_MASTER_ID 0x104 #define SWRM_V1_3_INTERRUPT_STATUS 0x200 #define SWRM_V2_0_INTERRUPT_STATUS 0x5000 @@ -99,14 +103,15 @@ #define SWRM_MCP_SLV_STATUS 0x1090 #define SWRM_MCP_SLV_STATUS_MASK GENMASK(1, 0) #define SWRM_MCP_SLV_STATUS_SZ 2 -#define SWRM_DP_PORT_CTRL_BANK(n, m) (0x1124 + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DP_PORT_CTRL_2_BANK(n, m) (0x1128 + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DP_BLOCK_CTRL_1(n) (0x112C + 0x100 * (n - 1)) -#define SWRM_DP_BLOCK_CTRL2_BANK(n, m) (0x1130 + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DP_PORT_HCTRL_BANK(n, m) (0x1134 + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DP_SAMPLECTRL2_BANK(n, m) (0x113C + 0x100 * (n - 1) + 0x40 * m) -#define SWRM_DIN_DPn_PCM_PORT_CTRL(n) (0x1054 + 0x100 * (n - 1)) + +#define SWRM_DPn_PORT_CTRL_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) +#define SWRM_DPn_PORT_CTRL_2_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) +#define SWRM_DPn_BLOCK_CTRL_1(offset, n) (offset + 0x100 * (n - 1)) +#define SWRM_DPn_BLOCK_CTRL2_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) +#define SWRM_DPn_PORT_HCTRL_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) +#define SWRM_DPn_BLOCK_CTRL3_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) +#define SWRM_DPn_SAMPLECTRL2_BANK(offset, n, m) (offset + 0x100 * (n - 1) + 0x40 * m) + #define SWR_V1_3_MSTR_MAX_REG_ADDR 0x1740 #define SWR_V2_0_MSTR_MAX_REG_ADDR 0x50ac @@ -128,7 +133,6 @@ #define MAX_FREQ_NUM 1 #define TIMEOUT_MS 100 #define QCOM_SWRM_MAX_RD_LEN 0x1 -#define QCOM_SDW_MAX_PORTS 14 #define DEFAULT_CLK_FREQ 9600000 #define SWRM_MAX_DAIS 0xF #define SWR_INVALID_PARAM 0xFF @@ -172,6 +176,13 @@ enum { SWRM_REG_CMD_FIFO_RD_CMD, SWRM_REG_CMD_FIFO_STATUS, SWRM_REG_CMD_FIFO_RD_FIFO_ADDR, + SWRM_OFFSET_DP_PORT_CTRL_BANK, + SWRM_OFFSET_DP_PORT_CTRL_2_BANK, + SWRM_OFFSET_DP_BLOCK_CTRL_1, + SWRM_OFFSET_DP_BLOCK_CTRL2_BANK, + SWRM_OFFSET_DP_PORT_HCTRL_BANK, + SWRM_OFFSET_DP_BLOCK_CTRL3_BANK, + SWRM_OFFSET_DP_SAMPLECTRL2_BANK, }; struct qcom_swrm_ctrl { @@ -195,6 +206,7 @@ struct qcom_swrm_ctrl { int wake_irq; int num_din_ports; int num_dout_ports; + int nports; int cols_index; int rows_index; unsigned long port_mask; @@ -202,14 +214,13 @@ struct qcom_swrm_ctrl { u8 rcmd_id; u8 wcmd_id; /* Port numbers are 1 - 14 */ - struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS + 1]; + struct qcom_swrm_port_config *pconfig; struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS]; enum sdw_slave_status status[SDW_MAX_DEVICES + 1]; int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val); int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val); u32 slave_status; u32 wr_fifo_depth; - u32 rd_fifo_depth; bool clock_stop_not_supported; }; @@ -231,6 +242,13 @@ static const unsigned int swrm_v1_3_reg_layout[] = { [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V1_3_CMD_FIFO_RD_CMD, [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V1_3_CMD_FIFO_STATUS, [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V1_3_CMD_FIFO_RD_FIFO_ADDR, + [SWRM_OFFSET_DP_PORT_CTRL_BANK] = 0x1124, + [SWRM_OFFSET_DP_PORT_CTRL_2_BANK] = 0x1128, + [SWRM_OFFSET_DP_BLOCK_CTRL_1] = 0x112c, + [SWRM_OFFSET_DP_BLOCK_CTRL2_BANK] = 0x1130, + [SWRM_OFFSET_DP_PORT_HCTRL_BANK] = 0x1134, + [SWRM_OFFSET_DP_BLOCK_CTRL3_BANK] = 0x1138, + [SWRM_OFFSET_DP_SAMPLECTRL2_BANK] = 0x113c, }; static const struct qcom_swrm_data swrm_v1_3_data = { @@ -265,6 +283,13 @@ static const unsigned int swrm_v2_0_reg_layout[] = { [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V2_0_CMD_FIFO_RD_CMD, [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V2_0_CMD_FIFO_STATUS, [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR, + [SWRM_OFFSET_DP_PORT_CTRL_BANK] = 0x1124, + [SWRM_OFFSET_DP_PORT_CTRL_2_BANK] = 0x1128, + [SWRM_OFFSET_DP_BLOCK_CTRL_1] = 0x112c, + [SWRM_OFFSET_DP_BLOCK_CTRL2_BANK] = 0x1130, + [SWRM_OFFSET_DP_PORT_HCTRL_BANK] = 0x1134, + [SWRM_OFFSET_DP_BLOCK_CTRL3_BANK] = 0x1138, + [SWRM_OFFSET_DP_SAMPLECTRL2_BANK] = 0x113c, }; static const struct qcom_swrm_data swrm_v2_0_data = { @@ -275,6 +300,32 @@ static const struct qcom_swrm_data swrm_v2_0_data = { .reg_layout = swrm_v2_0_reg_layout, }; +static const unsigned int swrm_v3_0_reg_layout[] = { + [SWRM_REG_FRAME_GEN_ENABLED] = SWRM_V2_0_LINK_STATUS, + [SWRM_REG_INTERRUPT_STATUS] = SWRM_V2_0_INTERRUPT_STATUS, + [SWRM_REG_INTERRUPT_MASK_ADDR] = 0, /* Not present */ + [SWRM_REG_INTERRUPT_CLEAR] = SWRM_V2_0_INTERRUPT_CLEAR, + [SWRM_REG_INTERRUPT_CPU_EN] = SWRM_V2_0_INTERRUPT_CPU_EN, + [SWRM_REG_CMD_FIFO_WR_CMD] = SWRM_V2_0_CMD_FIFO_WR_CMD, + [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V2_0_CMD_FIFO_RD_CMD, + [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V2_0_CMD_FIFO_STATUS, + [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR, + [SWRM_OFFSET_DP_PORT_CTRL_BANK] = 0x1224, + [SWRM_OFFSET_DP_PORT_CTRL_2_BANK] = 0x1228, + [SWRM_OFFSET_DP_BLOCK_CTRL_1] = 0x122c, + [SWRM_OFFSET_DP_BLOCK_CTRL2_BANK] = 0x1230, + [SWRM_OFFSET_DP_PORT_HCTRL_BANK] = 0x1234, + [SWRM_OFFSET_DP_BLOCK_CTRL3_BANK] = 0x1238, + [SWRM_OFFSET_DP_SAMPLECTRL2_BANK] = 0x123c, +}; + +static const struct qcom_swrm_data swrm_v3_0_data = { + .default_rows = 50, + .default_cols = 16, + .sw_clk_gate_required = true, + .max_reg = SWR_V2_0_MSTR_MAX_REG_ADDR, + .reg_layout = swrm_v3_0_reg_layout, +}; #define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus) static int qcom_swrm_ahb_reg_read(struct qcom_swrm_ctrl *ctrl, int reg, @@ -898,8 +949,11 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl) swrm_wait_for_frame_gen_enabled(ctrl); ctrl->slave_status = 0; ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val); - ctrl->rd_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_RD_FIFO_DEPTH, val); - ctrl->wr_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_WR_FIFO_DEPTH, val); + + if (ctrl->version >= SWRM_VERSION_3_1_0) + ctrl->wr_fifo_depth = FIELD_GET(SWRM_V3_COMP_PARAMS_WR_FIFO_DEPTH, val); + else + ctrl->wr_fifo_depth = FIELD_GET(SWRM_COMP_PARAMS_WR_FIFO_DEPTH, val); return 0; } @@ -966,10 +1020,10 @@ static int qcom_swrm_port_params(struct sdw_bus *bus, unsigned int bank) { struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus); + u32 offset = ctrl->reg_layout[SWRM_OFFSET_DP_BLOCK_CTRL_1]; - return ctrl->reg_write(ctrl, SWRM_DP_BLOCK_CTRL_1(p_params->num), - p_params->bps - 1); - + return ctrl->reg_write(ctrl, SWRM_DPn_BLOCK_CTRL_1(offset, p_params->num), + p_params->bps - 1); } static int qcom_swrm_transport_params(struct sdw_bus *bus, @@ -979,9 +1033,11 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus, struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus); struct qcom_swrm_port_config *pcfg; u32 value; - int reg = SWRM_DP_PORT_CTRL_BANK((params->port_num), bank); + int reg, offset = ctrl->reg_layout[SWRM_OFFSET_DP_PORT_CTRL_BANK]; int ret; + reg = SWRM_DPn_PORT_CTRL_BANK(offset, params->port_num, bank); + pcfg = &ctrl->pconfig[params->port_num]; value = pcfg->off1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT; @@ -993,15 +1049,19 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus, goto err; if (pcfg->si > 0xff) { + offset = ctrl->reg_layout[SWRM_OFFSET_DP_SAMPLECTRL2_BANK]; value = (pcfg->si >> 8) & 0xff; - reg = SWRM_DP_SAMPLECTRL2_BANK(params->port_num, bank); + reg = SWRM_DPn_SAMPLECTRL2_BANK(offset, params->port_num, bank); + ret = ctrl->reg_write(ctrl, reg, value); if (ret) goto err; } if (pcfg->lane_control != SWR_INVALID_PARAM) { - reg = SWRM_DP_PORT_CTRL_2_BANK(params->port_num, bank); + offset = ctrl->reg_layout[SWRM_OFFSET_DP_PORT_CTRL_2_BANK]; + reg = SWRM_DPn_PORT_CTRL_2_BANK(offset, params->port_num, bank); + value = pcfg->lane_control; ret = ctrl->reg_write(ctrl, reg, value); if (ret) @@ -1009,20 +1069,23 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus, } if (pcfg->blk_group_count != SWR_INVALID_PARAM) { - reg = SWRM_DP_BLOCK_CTRL2_BANK(params->port_num, bank); + offset = ctrl->reg_layout[SWRM_OFFSET_DP_BLOCK_CTRL2_BANK]; + + reg = SWRM_DPn_BLOCK_CTRL2_BANK(offset, params->port_num, bank); + value = pcfg->blk_group_count; ret = ctrl->reg_write(ctrl, reg, value); if (ret) goto err; } - if (pcfg->hstart != SWR_INVALID_PARAM - && pcfg->hstop != SWR_INVALID_PARAM) { - reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank); + offset = ctrl->reg_layout[SWRM_OFFSET_DP_PORT_HCTRL_BANK]; + reg = SWRM_DPn_PORT_HCTRL_BANK(offset, params->port_num, bank); + + if (pcfg->hstart != SWR_INVALID_PARAM && pcfg->hstop != SWR_INVALID_PARAM) { value = (pcfg->hstop << 4) | pcfg->hstart; ret = ctrl->reg_write(ctrl, reg, value); } else { - reg = SWRM_DP_PORT_HCTRL_BANK(params->port_num, bank); value = (SWR_HSTOP_MAX_VAL << 4) | SWR_HSTART_MIN_VAL; ret = ctrl->reg_write(ctrl, reg, value); } @@ -1031,7 +1094,8 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus, goto err; if (pcfg->bp_mode != SWR_INVALID_PARAM) { - reg = SWRM_DP_BLOCK_CTRL3_BANK(params->port_num, bank); + offset = ctrl->reg_layout[SWRM_OFFSET_DP_BLOCK_CTRL3_BANK]; + reg = SWRM_DPn_BLOCK_CTRL3_BANK(offset, params->port_num, bank); ret = ctrl->reg_write(ctrl, reg, pcfg->bp_mode); } @@ -1043,9 +1107,12 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus, struct sdw_enable_ch *enable_ch, unsigned int bank) { - u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank); + u32 reg; struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus); u32 val; + u32 offset = ctrl->reg_layout[SWRM_OFFSET_DP_PORT_CTRL_BANK]; + + reg = SWRM_DPn_PORT_CTRL_BANK(offset, enable_ch->port_num, bank); ctrl->reg_read(ctrl, reg, &val); @@ -1155,7 +1222,6 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl, struct snd_pcm_hw_params *params, int direction) { - struct sdw_port_config pconfig[QCOM_SDW_MAX_PORTS]; struct sdw_stream_config sconfig; struct sdw_master_runtime *m_rt; struct sdw_slave_runtime *s_rt; @@ -1164,6 +1230,10 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl, unsigned long *port_mask; int maxport, pn, nports = 0, ret = 0; unsigned int m_port; + struct sdw_port_config *pconfig __free(kfree) = kcalloc(ctrl->nports, + sizeof(*pconfig), GFP_KERNEL); + if (!pconfig) + return -ENOMEM; if (direction == SNDRV_PCM_STREAM_CAPTURE) sconfig.direction = SDW_DATA_DIR_TX; @@ -1188,8 +1258,7 @@ static int qcom_swrm_stream_alloc_ports(struct qcom_swrm_ctrl *ctrl, continue; port_mask = &ctrl->port_mask; - maxport = ctrl->num_dout_ports + ctrl->num_din_ports; - + maxport = ctrl->nports; list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { slave = s_rt->slave; @@ -1349,17 +1418,8 @@ static int qcom_swrm_register_dais(struct qcom_swrm_ctrl *ctrl) static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) { struct device_node *np = ctrl->dev->of_node; - u8 off1[QCOM_SDW_MAX_PORTS]; - u8 off2[QCOM_SDW_MAX_PORTS]; - u16 si[QCOM_SDW_MAX_PORTS]; - u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, }; - u8 hstart[QCOM_SDW_MAX_PORTS]; - u8 hstop[QCOM_SDW_MAX_PORTS]; - u8 word_length[QCOM_SDW_MAX_PORTS]; - u8 blk_group_count[QCOM_SDW_MAX_PORTS]; - u8 lane_control[QCOM_SDW_MAX_PORTS]; - int i, ret, nports, val; - bool si_16 = false; + struct qcom_swrm_port_config *pcfg; + int i, ret, val; ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val); @@ -1367,88 +1427,78 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) ctrl->num_din_ports = FIELD_GET(SWRM_COMP_PARAMS_DIN_PORTS_MASK, val); ret = of_property_read_u32(np, "qcom,din-ports", &val); - if (ret) - return ret; - - if (val > ctrl->num_din_ports) - return -EINVAL; + if (!ret) { /* only if present */ + if (val != ctrl->num_din_ports) { + dev_err(ctrl->dev, "din-ports (%d) mismatch with controller (%d)", + val, ctrl->num_din_ports); + } - ctrl->num_din_ports = val; + ctrl->num_din_ports = val; + } ret = of_property_read_u32(np, "qcom,dout-ports", &val); - if (ret) - return ret; + if (!ret) { /* only if present */ + if (val != ctrl->num_dout_ports) { + dev_err(ctrl->dev, "dout-ports (%d) mismatch with controller (%d)", + val, ctrl->num_dout_ports); + } - if (val > ctrl->num_dout_ports) - return -EINVAL; + ctrl->num_dout_ports = val; + } - ctrl->num_dout_ports = val; + ctrl->nports = ctrl->num_dout_ports + ctrl->num_din_ports; - nports = ctrl->num_dout_ports + ctrl->num_din_ports; - if (nports > QCOM_SDW_MAX_PORTS) - return -EINVAL; + ctrl->pconfig = devm_kcalloc(ctrl->dev, ctrl->nports + 1, + sizeof(*ctrl->pconfig), GFP_KERNEL); + if (!ctrl->pconfig) + return -ENOMEM; - /* Valid port numbers are from 1-14, so mask out port 0 explicitly */ set_bit(0, &ctrl->port_mask); + /* Valid port numbers are from 1, so mask out port 0 explicitly */ + for (i = 0; i < ctrl->nports; i++) { + pcfg = &ctrl->pconfig[i + 1]; - ret = of_property_read_u8_array(np, "qcom,ports-offset1", - off1, nports); - if (ret) - return ret; - - ret = of_property_read_u8_array(np, "qcom,ports-offset2", - off2, nports); - if (ret) - return ret; - - ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low", - (u8 *)si, nports); - if (ret) { - ret = of_property_read_u16_array(np, "qcom,ports-sinterval", - si, nports); + ret = of_property_read_u8_index(np, "qcom,ports-offset1", i, &pcfg->off1); if (ret) return ret; - si_16 = true; - } - ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode", - bp_mode, nports); - if (ret) { - if (ctrl->version <= SWRM_VERSION_1_3_0) - memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - else + ret = of_property_read_u8_index(np, "qcom,ports-offset2", i, &pcfg->off2); + if (ret) return ret; - } - memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports); + ret = of_property_read_u8_index(np, "qcom,ports-sinterval-low", i, (u8 *)&pcfg->si); + if (ret) { + ret = of_property_read_u16_index(np, "qcom,ports-sinterval", i, &pcfg->si); + if (ret) + return ret; + } + + ret = of_property_read_u8_index(np, "qcom,ports-block-pack-mode", + i, &pcfg->bp_mode); + if (ret) { + if (ctrl->version <= SWRM_VERSION_1_3_0) + pcfg->bp_mode = SWR_INVALID_PARAM; + else + return ret; + } + + /* Optional properties */ + pcfg->hstart = SWR_INVALID_PARAM; + pcfg->hstop = SWR_INVALID_PARAM; + pcfg->word_length = SWR_INVALID_PARAM; + pcfg->blk_group_count = SWR_INVALID_PARAM; + pcfg->lane_control = SWR_INVALID_PARAM; - memset(hstop, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - of_property_read_u8_array(np, "qcom,ports-hstop", hstop, nports); + of_property_read_u8_index(np, "qcom,ports-hstart", i, &pcfg->hstart); - memset(word_length, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - of_property_read_u8_array(np, "qcom,ports-word-length", word_length, nports); + of_property_read_u8_index(np, "qcom,ports-hstop", i, &pcfg->hstop); - memset(blk_group_count, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - of_property_read_u8_array(np, "qcom,ports-block-group-count", blk_group_count, nports); + of_property_read_u8_index(np, "qcom,ports-word-length", i, &pcfg->word_length); - memset(lane_control, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); - of_property_read_u8_array(np, "qcom,ports-lane-control", lane_control, nports); + of_property_read_u8_index(np, "qcom,ports-block-group-count", + i, &pcfg->blk_group_count); - for (i = 0; i < nports; i++) { - /* Valid port number range is from 1-14 */ - if (si_16) - ctrl->pconfig[i + 1].si = si[i]; - else - ctrl->pconfig[i + 1].si = ((u8 *)si)[i]; - ctrl->pconfig[i + 1].off1 = off1[i]; - ctrl->pconfig[i + 1].off2 = off2[i]; - ctrl->pconfig[i + 1].bp_mode = bp_mode[i]; - ctrl->pconfig[i + 1].hstart = hstart[i]; - ctrl->pconfig[i + 1].hstop = hstop[i]; - ctrl->pconfig[i + 1].word_length = word_length[i]; - ctrl->pconfig[i + 1].blk_group_count = blk_group_count[i]; - ctrl->pconfig[i + 1].lane_control = lane_control[i]; + of_property_read_u8_index(np, "qcom,ports-lane-control", i, &pcfg->lane_control); } return 0; @@ -1769,6 +1819,7 @@ static const struct of_device_id qcom_swrm_of_match[] = { { .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_6_data }, { .compatible = "qcom,soundwire-v1.7.0", .data = &swrm_v1_5_data }, { .compatible = "qcom,soundwire-v2.0.0", .data = &swrm_v2_0_data }, + { .compatible = "qcom,soundwire-v3.1.0", .data = &swrm_v3_0_data }, {/* sentinel */}, }; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index b8457477cee9eb..9f167ff8da7b07 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -5,8 +5,7 @@ * Copyright (C) 2011 Chris Boot <bootc@bootc.net> */ -#define KMSG_COMPONENT "sbp_target" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sbp_target: " fmt #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e8b7955d40f29a..50d21888a0c9a7 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1524,6 +1524,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index 90226f72c158aa..f662e7ce71f1bd 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -6,6 +6,7 @@ menuconfig SCSI_UFSHCD tristate "Universal Flash Storage Controller" depends on SCSI && SCSI_DMA + depends on RPMB || !RPMB select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 040a0ceb170a7f..80c0b49f30b012 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1455,15 +1455,14 @@ out: static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) { up_write(&hba->clk_scaling_lock); + mutex_unlock(&hba->wb_mutex); + blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); /* Enable Write Booster if current gear requires it else disable it */ if (ufshcd_enable_wb_if_scaling_up(hba) && !err) ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear); - mutex_unlock(&hba->wb_mutex); - - blk_mq_unquiesce_tagset(&hba->host->tag_set); - mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } @@ -6504,6 +6503,11 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) static void ufshcd_err_handling_prepare(struct ufs_hba *hba) { + /* + * A WLUN resume failure could potentially lead to the HBA being + * runtime suspended, so take an extra reference on hba->dev. + */ + pm_runtime_get_sync(hba->dev); ufshcd_rpm_get_sync(hba); if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || hba->is_sys_suspended) { @@ -6543,6 +6547,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); ufshcd_rpm_put(hba); + pm_runtime_put(hba->dev); } static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) @@ -6557,28 +6562,42 @@ static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) #ifdef CONFIG_PM static void ufshcd_recover_pm_error(struct ufs_hba *hba) { + struct scsi_target *starget = hba->ufs_device_wlun->sdev_target; struct Scsi_Host *shost = hba->host; struct scsi_device *sdev; struct request_queue *q; - int ret; + bool resume_sdev_queues = false; hba->is_sys_suspended = false; + /* - * Set RPM status of wlun device to RPM_ACTIVE, - * this also clears its runtime error. + * Ensure the parent's error status is cleared before proceeding + * to the child, as the parent must be active to activate the child. */ - ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + if (hba->dev->power.runtime_error) { + /* hba->dev has no functional parent thus simplily set RPM_ACTIVE */ + pm_runtime_set_active(hba->dev); + resume_sdev_queues = true; + } + + if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) { + /* + * starget, parent of wlun, might be suspended if wlun resume failed. + * Make sure parent is resumed before set child (wlun) active. + */ + pm_runtime_get_sync(&starget->dev); + pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + pm_runtime_put_sync(&starget->dev); + resume_sdev_queues = true; + } - /* hba device might have a runtime error otherwise */ - if (ret) - ret = pm_runtime_set_active(hba->dev); /* * If wlun device had runtime error, we also need to resume those * consumer scsi devices in case any of them has failed to be * resumed due to supplier runtime resume failure. This is to unblock * blk_queue_enter in case there are bios waiting inside it. */ - if (!ret) { + if (resume_sdev_queues) { shost_for_each_device(sdev, shost) { q = sdev->request_queue; if (q->dev && (q->rpm_status == RPM_SUSPENDED || @@ -6679,19 +6698,22 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_uic_err, hba->force_reset, ufshcd_is_link_broken(hba) ? "; link is broken" : ""); - /* - * Use ufshcd_rpm_get_noresume() here to safely perform link recovery - * even if an error occurs during runtime suspend or runtime resume. - * This avoids potential deadlocks that could happen if we tried to - * resume the device while a PM operation is already in progress. - */ - ufshcd_rpm_get_noresume(hba); - if (hba->pm_op_in_progress) { - ufshcd_link_recovery(hba); + if (hba->ufs_device_wlun) { + /* + * Use ufshcd_rpm_get_noresume() here to safely perform link + * recovery even if an error occurs during runtime suspend or + * runtime resume. This avoids potential deadlocks that could + * happen if we tried to resume the device while a PM operation + * is already in progress. + */ + ufshcd_rpm_get_noresume(hba); + if (hba->pm_op_in_progress) { + ufshcd_link_recovery(hba); + ufshcd_rpm_put(hba); + return; + } ufshcd_rpm_put(hba); - return; } - ufshcd_rpm_put(hba); down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 8d119b3223cbda..8ebee0cc531370 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1769,10 +1769,9 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int i, j, nminor = 0, testbus_len = 0; - u32 *testbus __free(kfree) = NULL; char *prefix; - testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL); if (!testbus) return; @@ -1794,13 +1793,12 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba) static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix, void __iomem *base) { - u32 *regs __free(kfree) = NULL; size_t pos; if (offset % 4 != 0 || len % 4 != 0) return -EINVAL; - regs = kzalloc(len, GFP_ATOMIC); + u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC); if (!regs) return -ENOMEM; diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index 3e7def3d31c16f..3d64a316ca31d3 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -3,9 +3,6 @@ config CEPH_FS tristate "Ceph distributed file system" depends on INET select CEPH_LIB - select CRC32 - select CRYPTO_AES - select CRYPTO select NETFS_SUPPORT select FS_ENCRYPTION_ALGS if FS_ENCRYPTION default n diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b1a8ff612c41dc..2f663972da99cc 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -18,6 +18,7 @@ #include "crypto.h" #include <linux/ceph/decode.h> #include <linux/ceph/messenger.h> +#include <trace/events/ceph.h> /* * Capability management @@ -4452,6 +4453,9 @@ void ceph_handle_caps(struct ceph_mds_session *session, session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode, seq, issue_seq, mseq); + trace_ceph_handle_caps(mdsc, session, op, &vino, ceph_inode(inode), + seq, issue_seq, mseq); + mutex_lock(&session->s_mutex); if (!inode) { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 1740047aef0f9a..7e4eab824daef8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -24,6 +24,7 @@ #include <linux/ceph/pagelist.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> +#include <trace/events/ceph.h> #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE) @@ -3288,6 +3289,8 @@ static void complete_request(struct ceph_mds_client *mdsc, { req->r_end_latency = ktime_get(); + trace_ceph_mdsc_complete_request(mdsc, req); + if (req->r_callback) req->r_callback(mdsc, req); complete_all(&req->r_completion); @@ -3419,6 +3422,8 @@ static int __send_request(struct ceph_mds_session *session, { int err; + trace_ceph_mdsc_send_request(session, req); + err = __prepare_send_request(session, req, drop_cap_releases); if (!err) { ceph_msg_get(req->r_request); @@ -3470,6 +3475,8 @@ static void __do_request(struct ceph_mds_client *mdsc, } if (mdsc->mdsmap->m_epoch == 0) { doutc(cl, "no mdsmap, waiting for map\n"); + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_no_mdsmap); list_add(&req->r_wait, &mdsc->waiting_for_map); return; } @@ -3491,6 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc, goto finish; } doutc(cl, "no mds or not active, waiting for map\n"); + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_no_active_mds); list_add(&req->r_wait, &mdsc->waiting_for_map); return; } @@ -3536,9 +3545,11 @@ static void __do_request(struct ceph_mds_client *mdsc, * it to the mdsc queue. */ if (session->s_state == CEPH_MDS_SESSION_REJECTED) { - if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) + if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) { + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_rejected); list_add(&req->r_wait, &mdsc->waiting_for_map); - else + } else err = -EACCES; goto out_session; } @@ -3552,6 +3563,8 @@ static void __do_request(struct ceph_mds_client *mdsc, if (random) req->r_resend_mds = mds; } + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_session); list_add(&req->r_wait, &session->s_waiting); goto out_session; } @@ -3652,6 +3665,7 @@ static void __wake_requests(struct ceph_mds_client *mdsc, list_del_init(&req->r_wait); doutc(cl, " wake request %p tid %llu\n", req, req->r_tid); + trace_ceph_mdsc_resume_request(mdsc, req); __do_request(mdsc, req); } } @@ -3678,6 +3692,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) req->r_session->s_mds == mds) { doutc(cl, " kicking tid %llu\n", req->r_tid); list_del_init(&req->r_wait); + trace_ceph_mdsc_resume_request(mdsc, req); __do_request(mdsc, req); } } @@ -3724,6 +3739,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, doutc(cl, "submit_request on %p for inode %p\n", req, dir); mutex_lock(&mdsc->mutex); __register_request(mdsc, req, dir); + trace_ceph_mdsc_submit_request(mdsc, req); __do_request(mdsc, req); err = req->r_err; mutex_unlock(&mdsc->mutex); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index c65f2b202b2b3e..521507ea826000 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -374,7 +374,7 @@ static int build_snap_context(struct ceph_mds_client *mdsc, /* alloc new snap context */ err = -ENOMEM; - if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) + if ((size_t)num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) goto fail; snapc = ceph_create_snap_context(num, GFP_NOFS); if (!snapc) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index f6bf24b5c683ef..7c1c1dac320da3 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -30,6 +30,9 @@ #include <uapi/linux/magic.h> +#define CREATE_TRACE_POINTS +#include <trace/events/ceph.h> + static DEFINE_SPINLOCK(ceph_fsc_lock); static LIST_HEAD(ceph_fsc_list); diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 2af424e200b35e..630f3056658ef3 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -29,11 +29,6 @@ struct fat_cache_id { int dcluster; }; -static inline int fat_max_cache(struct inode *inode) -{ - return FAT_MAX_CACHE; -} - static struct kmem_cache *fat_cache_cachep; static void init_once(void *foo) @@ -145,7 +140,7 @@ static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) cache = fat_cache_merge(inode, new); if (cache == NULL) { - if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) { + if (MSDOS_I(inode)->nr_caches < FAT_MAX_CACHE) { MSDOS_I(inode)->nr_caches++; spin_unlock(&MSDOS_I(inode)->cache_lru_lock); diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c8b837006bb272..fabda0f6ec1a8a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -258,7 +258,7 @@ err_start: /* * Kill the callback thread if it's no longer being used. */ -void nfs_callback_down(int minorversion, struct net *net) +void nfs_callback_down(int minorversion, struct net *net, struct rpc_xprt *xprt) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; @@ -270,7 +270,7 @@ void nfs_callback_down(int minorversion, struct net *net) if (cb_info->users == 0) { svc_set_num_threads(serv, NULL, 0); dprintk("nfs_callback_down: service destroyed\n"); - svc_destroy(&cb_info->serv); + xprt_svc_destroy_nullify_bc(xprt, &cb_info->serv); } mutex_unlock(&nfs_callback_mutex); } diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 154a6ed1299f85..8809f93d82c056 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -188,7 +188,8 @@ extern __be32 nfs4_callback_recall(void *argp, void *resp, struct cb_process_state *cps); #if IS_ENABLED(CONFIG_NFS_V4) extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); -extern void nfs_callback_down(int minorversion, struct net *net); +extern void nfs_callback_down(int minorversion, struct net *net, + struct rpc_xprt *xprt); #endif /* CONFIG_NFS_V4 */ /* * nfs41: Callbacks are expected to not cause substantial latency, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 54699299d5b168..2aaea9c98c2cd0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -784,10 +784,18 @@ static int nfs_init_server(struct nfs_server *server, server->fattr_valid = NFS_ATTR_FATTR_V4; } - if (ctx->rsize) + if (ctx->bsize) { + server->bsize = ctx->bsize; + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_BSIZE; + } + if (ctx->rsize) { server->rsize = nfs_io_size(ctx->rsize, clp->cl_proto); - if (ctx->wsize) + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_RSIZE; + } + if (ctx->wsize) { server->wsize = nfs_io_size(ctx->wsize, clp->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_WSIZE; + } server->acregmin = ctx->acregmin * HZ; server->acregmax = ctx->acregmax * HZ; @@ -977,8 +985,13 @@ EXPORT_SYMBOL_GPL(nfs_probe_server); void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) { target->flags = source->flags; - target->rsize = source->rsize; - target->wsize = source->wsize; + target->automount_inherit = source->automount_inherit; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_BSIZE) + target->bsize = source->bsize; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_RSIZE) + target->rsize = source->rsize; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_WSIZE) + target->wsize = source->wsize; target->acregmin = source->acregmin; target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 9d3a5f29f17fdc..2248e3ad089a7c 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -30,6 +30,11 @@ static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK; module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); +bool directory_delegations = true; +module_param(directory_delegations, bool, 0644); +MODULE_PARM_DESC(directory_delegations, + "Enable the use of directory delegations, defaults to on."); + static struct hlist_head *nfs_delegation_hash(struct nfs_server *server, const struct nfs_fh *fhandle) { @@ -143,6 +148,8 @@ static int nfs4_do_check_delegation(struct inode *inode, fmode_t type, */ int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags) { + if (S_ISDIR(inode->i_mode) && !directory_delegations) + nfs_inode_evict_delegation(inode); return nfs4_do_check_delegation(inode, type, flags, true); } @@ -379,6 +386,7 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi, delegation->inode = NULL; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); + clear_bit(NFS_INO_REQ_DIR_DELEG, &nfsi->flags); return delegation; } diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 08ec2e9c68a4a6..46d866adb5c265 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -124,6 +124,19 @@ static inline int nfs_have_delegated_mtime(struct inode *inode) NFS_DELEGATION_FLAG_TIME); } +extern bool directory_delegations; + +static inline void nfs_request_directory_delegation(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode)) + set_bit(NFS_INO_REQ_DIR_DELEG, &NFS_I(inode)->flags); +} + +static inline bool nfs_have_directory_delegation(struct inode *inode) +{ + return S_ISDIR(inode->i_mode) && nfs_have_delegated_attributes(inode); +} + int nfs4_delegation_hash_alloc(struct nfs_server *server); #endif diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ea9f6ca8f30fa2..23a78a742b619d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -789,16 +789,17 @@ again: goto out; } + nfs_set_verifier(dentry, dir_verifier); inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); alias = d_splice_alias(inode, dentry); d_lookup_done(dentry); if (alias) { if (IS_ERR(alias)) goto out; + nfs_set_verifier(alias, dir_verifier); dput(dentry); dentry = alias; } - nfs_set_verifier(dentry, dir_verifier); trace_nfs_readdir_lookup(d_inode(parent), dentry, 0); out: dput(dentry); @@ -1514,6 +1515,15 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry, return 0; if (!nfs_dentry_verify_change(dir, dentry)) return 0; + + /* + * If we have a directory delegation then we don't need to revalidate + * the directory. The delegation will either get recalled or we will + * receive a notification when it changes. + */ + if (nfs_have_directory_delegation(dir)) + return 0; + /* Revalidate nfsi->cache_change_attribute before we declare a match */ if (nfs_mapping_need_revalidate_inode(dir)) { if (rcu_walk) @@ -1894,13 +1904,15 @@ static int nfs_dentry_delete(const struct dentry *dentry) } /* Ensure that we revalidate inode->i_nlink */ -static void nfs_drop_nlink(struct inode *inode) +static void nfs_drop_nlink(struct inode *inode, unsigned long gencount) { + struct nfs_inode *nfsi = NFS_I(inode); + spin_lock(&inode->i_lock); /* drop the inode if we're reasonably sure this is the last link */ - if (inode->i_nlink > 0) + if (inode->i_nlink > 0 && gencount == nfsi->attr_gencount) drop_nlink(inode); - NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter(); + nfsi->attr_gencount = nfs_inc_attr_generation_counter(); nfs_set_cache_invalid( inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | NFS_INO_INVALID_NLINK); @@ -1914,8 +1926,9 @@ static void nfs_drop_nlink(struct inode *inode) static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) { if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { + unsigned long gencount = READ_ONCE(NFS_I(inode)->attr_gencount); nfs_complete_unlink(dentry, inode); - nfs_drop_nlink(inode); + nfs_drop_nlink(inode, gencount); } iput(inode); } @@ -1991,13 +2004,14 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in nfs_lookup_advise_force_readdirplus(dir, flags); no_entry: + nfs_set_verifier(dentry, dir_verifier); res = d_splice_alias(inode, dentry); if (res != NULL) { if (IS_ERR(res)) goto out; + nfs_set_verifier(res, dir_verifier); dentry = res; } - nfs_set_verifier(dentry, dir_verifier); out: trace_nfs_lookup_exit(dir, dentry, flags, PTR_ERR_OR_ZERO(res)); nfs_free_fattr(fattr); @@ -2139,12 +2153,12 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, d_drop(dentry); switch (err) { case -ENOENT: - d_splice_alias(NULL, dentry); if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) dir_verifier = inode_peek_iversion_raw(dir); else dir_verifier = nfs_save_change_attribute(dir); nfs_set_verifier(dentry, dir_verifier); + d_splice_alias(NULL, dentry); break; case -EISDIR: case -ENOTDIR: @@ -2203,6 +2217,13 @@ no_open: EXPORT_SYMBOL_GPL(nfs_atomic_open); static int +nfs_lookup_revalidate_delegated_parent(struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + return nfs_lookup_revalidate_done(dir, dentry, inode, 1); +} + +static int nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { @@ -2229,6 +2250,9 @@ nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, if (nfs_verifier_is_delegated(dentry)) return nfs_lookup_revalidate_delegated(dir, dentry, inode); + if (nfs_have_directory_delegation(dir)) + return nfs_lookup_revalidate_delegated_parent(dir, dentry, inode); + /* NFS only supports OPEN on regular files */ if (!S_ISREG(inode->i_mode)) goto full_reval; @@ -2507,9 +2531,11 @@ static int nfs_safe_remove(struct dentry *dentry) trace_nfs_remove_enter(dir, dentry); if (inode != NULL) { + unsigned long gencount = READ_ONCE(NFS_I(inode)->attr_gencount); + error = NFS_PROTO(dir)->remove(dir, dentry); if (error == 0) - nfs_drop_nlink(inode); + nfs_drop_nlink(inode, gencount); } else error = NFS_PROTO(dir)->remove(dir, dentry); if (error == -ENOENT) @@ -2709,6 +2735,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); + unsigned long new_gencount = 0; struct dentry *dentry = NULL; struct rpc_task *task; bool must_unblock = false; @@ -2761,6 +2788,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } else { block_revalidate(new_dentry); must_unblock = true; + new_gencount = NFS_I(new_inode)->attr_gencount; spin_unlock(&new_dentry->d_lock); } @@ -2800,7 +2828,7 @@ out: new_dir, new_dentry, error); if (!error) { if (new_inode != NULL) - nfs_drop_nlink(new_inode); + nfs_drop_nlink(new_inode, new_gencount); /* * The d_move() should be here instead of in an async RPC completion * handler because we need the proper locks to move the dentry. If diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f76fe406937aec..84049f3cd34052 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1389,6 +1389,9 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) status = pnfs_sync_inode(inode, false); if (status) goto out; + } else if (nfs_have_directory_delegation(inode)) { + status = 0; + goto out; } status = -ENOMEM; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 2ecd38e1d17a80..2e596244799f39 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -13,7 +13,7 @@ #include <linux/nfslocalio.h> #include <linux/wait_bit.h> -#define NFS_SB_MASK (SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) +#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) extern const struct export_operations nfs_export_ops; @@ -152,7 +152,6 @@ struct nfs_fs_context { struct super_block *sb; struct dentry *dentry; struct nfs_fattr *fattr; - unsigned int inherited_bsize; } clone_data; }; diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c index f33bfa7b58e6ae..a113bfdacfd61e 100644 --- a/fs/nfs/localio.c +++ b/fs/nfs/localio.c @@ -43,8 +43,8 @@ struct nfs_local_kiocb { size_t end_len; short int end_iter_index; atomic_t n_iters; + struct iov_iter iters[NFSLOCAL_MAX_IOS]; bool iter_is_dio_aligned[NFSLOCAL_MAX_IOS]; - struct iov_iter iters[NFSLOCAL_MAX_IOS] ____cacheline_aligned; /* End mostly DIO-specific members */ }; @@ -339,8 +339,6 @@ nfs_is_local_dio_possible(struct nfs_local_kiocb *iocb, int rw, if (unlikely(!nf_dio_mem_align || !nf_dio_offset_align)) return false; - if (unlikely(nf_dio_offset_align > PAGE_SIZE)) - return false; if (unlikely(len < nf_dio_offset_align)) return false; diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 5a4d193da1a98b..af9be0c5f51630 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -149,6 +149,7 @@ struct vfsmount *nfs_d_automount(struct path *path) struct vfsmount *mnt = ERR_PTR(-ENOMEM); struct nfs_server *server = NFS_SB(path->dentry->d_sb); struct nfs_client *client = server->nfs_client; + unsigned long s_flags = path->dentry->d_sb->s_flags; int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); int ret; @@ -169,11 +170,21 @@ struct vfsmount *nfs_d_automount(struct path *path) if (!ctx->clone_data.fattr) goto out_fc; + if (fc->cred != server->cred) { + put_cred(fc->cred); + fc->cred = get_cred(server->cred); + } + if (fc->net_ns != client->cl_net) { put_net(fc->net_ns); fc->net_ns = get_net(client->cl_net); } + /* Inherit the flags covered by NFS_SB_MASK */ + fc->sb_flags_mask |= NFS_SB_MASK; + fc->sb_flags &= ~NFS_SB_MASK; + fc->sb_flags |= s_flags & NFS_SB_MASK; + /* for submounts we want the same server; referrals will reassign */ memcpy(&ctx->nfs_server._address, &client->cl_addr, client->cl_addrlen); ctx->nfs_server.addrlen = client->cl_addrlen; @@ -184,6 +195,10 @@ struct vfsmount *nfs_d_automount(struct path *path) ctx->nfs_mod = client->cl_nfs_mod; get_nfs_version(ctx->nfs_mod); + /* Inherit block sizes if they were specified as mount parameters */ + if (server->automount_inherit & NFS_AUTOMOUNT_INHERIT_BSIZE) + ctx->bsize = server->bsize; + ret = client->rpc_ops->submount(fc, server); if (ret < 0) { mnt = ERR_PTR(ret); @@ -283,7 +298,6 @@ int nfs_do_submount(struct fs_context *fc) return -ENOMEM; ctx->internal = true; - ctx->clone_data.inherited_bsize = ctx->clone_data.sb->s_blocksize_bits; p = nfs_devname(dentry, buffer, 4096); if (IS_ERR(p)) { diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index a4cb67573aa7cd..1181f9cc6dbdb0 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -483,7 +483,8 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs3_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME]; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 3a4baed993c96f..96bccefbe2cb2e 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -281,8 +281,13 @@ error: */ static void nfs4_destroy_callback(struct nfs_client *clp) { - if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) - nfs_callback_down(clp->cl_mvops->minor_version, clp->cl_net); + if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) { + struct rpc_xprt *xprt; + + xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt); + nfs_callback_down(clp->cl_mvops->minor_version, clp->cl_net, + xprt); + } } static void nfs4_shutdown_client(struct nfs_client *clp) @@ -1174,10 +1179,20 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc) if (error < 0) return error; - if (ctx->rsize) - server->rsize = nfs_io_size(ctx->rsize, server->nfs_client->cl_proto); - if (ctx->wsize) - server->wsize = nfs_io_size(ctx->wsize, server->nfs_client->cl_proto); + if (ctx->bsize) { + server->bsize = ctx->bsize; + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_BSIZE; + } + if (ctx->rsize) { + server->rsize = + nfs_io_size(ctx->rsize, server->nfs_client->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_RSIZE; + } + if (ctx->wsize) { + server->wsize = + nfs_io_size(ctx->wsize, server->nfs_client->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_WSIZE; + } server->acregmin = ctx->acregmin * HZ; server->acregmax = ctx->acregmax * HZ; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 93c6ce04332b89..ec1ce593dea2bf 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1780,8 +1780,17 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, if (nfs_stateid_is_sequential(state, stateid)) break; - if (status) - break; + if (status) { + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { + trace_nfs4_open_stateid_update_skip(state->inode, + stateid, status); + return; + } else { + break; + } + } + /* Rely on seqids for serialisation with NFSv4.0 */ if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) break; @@ -3174,18 +3183,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); - dentry = opendata->dentry; - if (d_really_is_negative(dentry)) { - struct dentry *alias; - d_drop(dentry); - alias = d_splice_alias(igrab(state->inode), dentry); - /* d_splice_alias() can't fail here - it's a non-directory */ - if (alias) { - dput(ctx->dentry); - ctx->dentry = dentry = alias; - } - } - switch(opendata->o_arg.claim) { default: break; @@ -3196,7 +3193,20 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, break; if (opendata->o_res.delegation.type != 0) dir_verifier = nfs_save_change_attribute(dir); - nfs_set_verifier(dentry, dir_verifier); + } + + dentry = opendata->dentry; + nfs_set_verifier(dentry, dir_verifier); + if (d_really_is_negative(dentry)) { + struct dentry *alias; + d_drop(dentry); + alias = d_splice_alias(igrab(state->inode), dentry); + /* d_splice_alias() can't fail here - it's a non-directory */ + if (alias) { + dput(ctx->dentry); + nfs_set_verifier(alias, dir_verifier); + ctx->dentry = dentry = alias; + } } /* Parse layoutget results before we check for access */ @@ -4460,6 +4470,30 @@ out: return status; } +#if IS_ENABLED(CONFIG_NFS_V4_1) +static bool should_request_dir_deleg(struct inode *inode) +{ + if (!directory_delegations) + return false; + if (!inode) + return false; + if (!S_ISDIR(inode->i_mode)) + return false; + if (!nfs_server_capable(inode, NFS_CAP_DIR_DELEG)) + return false; + if (!test_and_clear_bit(NFS_INO_REQ_DIR_DELEG, &(NFS_I(inode)->flags))) + return false; + if (nfs4_have_delegation(inode, FMODE_READ, 0)) + return false; + return true; +} +#else +static bool should_request_dir_deleg(struct inode *inode) +{ + return false; +} +#endif /* CONFIG_NFS_V4_1 */ + static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct inode *inode) { @@ -4477,7 +4511,9 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_argp = &args, .rpc_resp = &res, }; + struct nfs4_gdd_res gdd_res; unsigned short task_flags = 0; + int status; if (nfs4_has_session(server->nfs_client)) task_flags = RPC_TASK_MOVEABLE; @@ -4486,11 +4522,31 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) task_flags |= RPC_TASK_TIMEOUT; + args.get_dir_deleg = should_request_dir_deleg(inode); + if (args.get_dir_deleg) + res.gdd_res = &gdd_res; + nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); nfs_fattr_init(fattr); nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); - return nfs4_do_call_sync(server->client, server, &msg, - &args.seq_args, &res.seq_res, task_flags); + + status = nfs4_do_call_sync(server->client, server, &msg, + &args.seq_args, &res.seq_res, task_flags); + if (args.get_dir_deleg) { + switch (status) { + case 0: + if (gdd_res.status != GDD4_OK) + break; + status = nfs_inode_set_delegation( + inode, current_cred(), FMODE_READ, + &gdd_res.deleg, 0, NFS4_OPEN_DELEGATE_READ); + break; + case -ENOTSUPP: + case -EOPNOTSUPP: + server->caps &= ~NFS_CAP_DIR_DELEG; + } + } + return status; } int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, @@ -4503,8 +4559,14 @@ int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, do { err = _nfs4_proc_getattr(server, fhandle, fattr, inode); trace_nfs4_getattr(server, fhandle, fattr, err); - err = nfs4_handle_exception(server, err, - &exception); + switch (err) { + default: + err = nfs4_handle_exception(server, err, &exception); + break; + case -ENOTSUPP: + case -EOPNOTSUPP: + exception.retry = true; + } } while (exception.retry); return err; } @@ -4768,6 +4830,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry int status = 0; if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { + nfs_request_directory_delegation(inode); res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return -ENOMEM; @@ -4875,6 +4938,8 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); + nfs_request_directory_delegation(dir); + if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) sattr->ia_mode &= ~current_umask(); state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); @@ -4971,6 +5036,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); nfs_fattr_init(res->dir_attr); + nfs_request_directory_delegation(d_inode(dentry->d_parent)); if (inode) { nfs4_inode_return_delegation(inode); @@ -5005,7 +5071,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs4_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { struct nfs_renameargs *arg = msg->rpc_argp; struct nfs_renameres *res = msg->rpc_resp; @@ -5016,6 +5083,8 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, nfs4_inode_make_writeable(old_inode); if (new_inode) nfs4_inode_return_delegation(new_inode); + if (same_parent) + nfs_request_directory_delegation(same_parent); msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; res->server = NFS_SB(old_dentry->d_sb); nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); @@ -10822,6 +10891,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN + | NFS_CAP_DIR_DELEG | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 @@ -10848,6 +10918,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .minor_version = 2, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN + | NFS_CAP_DIR_DELEG | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 9776d220cec33f..6285128e631a5a 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1353,6 +1353,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_skip); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait); DECLARE_EVENT_CLASS(nfs4_getattr_event, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1d0e6c10f921fa..b6fe30577fab57 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -393,6 +393,20 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) #define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) #define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) +#define encode_get_dir_deleg_maxsz (op_encode_hdr_maxsz + \ + 4 /* gdda_signal_deleg_avail */ + \ + 8 /* gdda_notification_types */ + \ + nfstime4_maxsz /* gdda_child_attr_delay */ + \ + nfstime4_maxsz /* gdda_dir_attr_delay */ + \ + nfs4_fattr_bitmap_maxsz /* gdda_child_attributes */ + \ + nfs4_fattr_bitmap_maxsz /* gdda_dir_attributes */) +#define decode_get_dir_deleg_maxsz (op_decode_hdr_maxsz + \ + 4 /* gddrnf_status */ + \ + encode_verifier_maxsz /* gddr_cookieverf */ + \ + encode_stateid_maxsz /* gddr_stateid */ + \ + 8 /* gddr_notification */ + \ + nfs4_fattr_maxsz /* gddr_child_attributes */ + \ + nfs4_fattr_maxsz /* gddr_dir_attributes */) #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_DEVICEID4_SIZE) + \ 1 /* layout type */ + \ @@ -444,6 +458,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #else /* CONFIG_NFS_V4_1 */ #define encode_sequence_maxsz 0 #define decode_sequence_maxsz 0 +#define encode_get_dir_deleg_maxsz 0 +#define decode_get_dir_deleg_maxsz 0 #define encode_layoutreturn_maxsz 0 #define decode_layoutreturn_maxsz 0 #define encode_layoutget_maxsz 0 @@ -631,11 +647,13 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ + encode_get_dir_deleg_maxsz + \ encode_getattr_maxsz + \ encode_renew_maxsz) #define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ + decode_get_dir_deleg_maxsz + \ decode_getattr_maxsz + \ decode_renew_maxsz) #define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \ @@ -2008,6 +2026,33 @@ static void encode_sequence(struct xdr_stream *xdr, #ifdef CONFIG_NFS_V4_1 static void +encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr) +{ + struct timespec64 ts = { 0, 0 }; + u32 notifications[1] = { 0 }; + u32 attributes[1] = { 0 }; + __be32 *p; + + encode_op_hdr(xdr, OP_GET_DIR_DELEGATION, decode_get_dir_deleg_maxsz, hdr); + + /* We don't handle CB_RECALLABLE_OBJ_AVAIL yet. */ + xdr_stream_encode_bool(xdr, false); + + xdr_encode_bitmap4(xdr, notifications, ARRAY_SIZE(notifications)); + + /* Request no delay on attribute updates */ + p = reserve_space(xdr, 12 + 12); + p = xdr_encode_nfstime4(p, &ts); + xdr_encode_nfstime4(p, &ts); + + /* Requested child attributes */ + xdr_encode_bitmap4(xdr, attributes, ARRAY_SIZE(attributes)); + + /* Requested dir attributes */ + xdr_encode_bitmap4(xdr, attributes, ARRAY_SIZE(attributes)); +} + +static void encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfs4_getdeviceinfo_args *args, struct compound_hdr *hdr) @@ -2143,6 +2188,11 @@ static void encode_free_stateid(struct xdr_stream *xdr, } #else static inline void +encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr) +{ +} + +static inline void encode_layoutreturn(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, struct compound_hdr *hdr) @@ -2356,6 +2406,8 @@ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); + if (args->get_dir_deleg) + encode_get_dir_delegation(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } @@ -5994,6 +6046,49 @@ static int decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) return decode_stateid(xdr, stateid); } +static int decode_get_dir_delegation(struct xdr_stream *xdr, + struct nfs4_getattr_res *res) +{ + struct nfs4_gdd_res *gdd_res = res->gdd_res; + nfs4_verifier cookieverf; + u32 bitmap[1]; + int status; + + status = decode_op_hdr(xdr, OP_GET_DIR_DELEGATION); + if (status) + return status; + + if (xdr_stream_decode_u32(xdr, &gdd_res->status)) + return -EIO; + + if (gdd_res->status == GDD4_UNAVAIL) + return xdr_inline_decode(xdr, 4) ? 0 : -EIO; + + status = decode_verifier(xdr, &cookieverf); + if (status) + return status; + + status = decode_delegation_stateid(xdr, &gdd_res->deleg); + if (status) + return status; + + /* Decode supported notification types. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + + /* Decode supported child attributes. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + + /* Decode supported attributes. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + return 0; +} + static int decode_getdeviceinfo(struct xdr_stream *xdr, struct nfs4_getdeviceinfo_res *res) { @@ -6208,6 +6303,12 @@ static int decode_free_stateid(struct xdr_stream *xdr, return res->status; } #else +static int decode_get_dir_delegation(struct xdr_stream *xdr, + struct nfs4_getattr_res *res) +{ + return 0; +} + static inline int decode_layoutreturn(struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) @@ -6525,6 +6626,11 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_putfh(xdr); if (status) goto out; + if (res->gdd_res) { + status = decode_get_dir_delegation(xdr, res); + if (status) + goto out; + } status = decode_getfattr(xdr, res->fattr, res->server); out: return status; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index f157d43d131222..b72d7cc3676624 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -464,6 +464,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, *next; set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(lo->plh_inode)->flags); list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) pnfs_clear_lseg_state(lseg, lseg_list); pnfs_clear_layoutreturn_info(lo); diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 63e71310b9f69b..39df80e4ae6fd8 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -353,7 +353,8 @@ static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME]; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 72dee6f3050e67..57d372db03b936 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1052,16 +1052,6 @@ int nfs_reconfigure(struct fs_context *fc) sync_filesystem(sb); /* - * The SB_RDONLY flag has been removed from the superblock during - * mounts to prevent interference between different filesystems. - * Similarly, it is also necessary to ignore the SB_RDONLY flag - * during reconfiguration; otherwise, it may also result in the - * creation of redundant superblocks when mounting a directory with - * different rw and ro flags multiple times. - */ - fc->sb_flags_mask &= ~SB_RDONLY; - - /* * Userspace mount programs that send binary options generally send * them populated with default values. We have no way to know which * ones were explicitly specified. Fall back to legacy behavior and @@ -1101,8 +1091,9 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) sb->s_blocksize = 0; sb->s_xattr = server->nfs_client->cl_nfs_mod->xattr; sb->s_op = server->nfs_client->cl_nfs_mod->sops; - if (ctx->bsize) - sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); + if (server->bsize) + sb->s_blocksize = + nfs_block_size(server->bsize, &sb->s_blocksize_bits); switch (server->nfs_client->rpc_ops->version) { case 2: @@ -1318,26 +1309,13 @@ int nfs_get_tree_common(struct fs_context *fc) if (IS_ERR(server)) return PTR_ERR(server); - /* - * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a - * superblock among each filesystem that mounts sub-directories - * belonging to a single exported root path. - * To prevent interference between different filesystems, the - * SB_RDONLY flag should be removed from the superblock. - */ if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; - else - fc->sb_flags &= ~SB_RDONLY; /* -o noac implies -o sync */ if (server->flags & NFS_MOUNT_NOAC) fc->sb_flags |= SB_SYNCHRONOUS; - if (ctx->clone_data.sb) - if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS) - fc->sb_flags |= SB_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ fc->s_fs_info = server; s = sget_fc(fc, compare_super, nfs_set_super); @@ -1361,13 +1339,8 @@ int nfs_get_tree_common(struct fs_context *fc) } if (!s->s_root) { - unsigned bsize = ctx->clone_data.inherited_bsize; /* initial superblock/root creation */ nfs_fill_super(s, ctx); - if (bsize) { - s->s_blocksize_bits = bsize; - s->s_blocksize = 1U << bsize; - } error = nfs_get_cache_cookie(s, ctx); if (error < 0) goto error_splat_super; diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index b55467911648d0..4db818c0f9ddb7 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -390,7 +390,8 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, nfs_sb_active(old_dir->i_sb); - NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dentry, new_dentry); + NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dentry, new_dentry, + old_dir == new_dir ? old_dir : NULL); return rpc_run_task(&task_setup_data); } diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index b267ec580da995..58bf58b68955c5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -10,6 +10,7 @@ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/quotaops.h> @@ -1037,7 +1038,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle, memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); eb = (struct ocfs2_extent_block *) bhs[i]->b_data; /* Ok, setup the minimal stuff here. */ - strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); + strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(first_blkno); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb->h_suballoc_slot = @@ -3654,7 +3655,6 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, * So we use the new rightmost path. */ ocfs2_mv_path(right_path, left_path); - left_path = NULL; } else ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); @@ -6164,7 +6164,7 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, struct buffer_head *bh = NULL; struct ocfs2_dinode *di; struct ocfs2_truncate_log *tl; - unsigned int tl_count; + unsigned int tl_count, tl_used; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, @@ -6185,8 +6185,10 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, di = (struct ocfs2_dinode *)bh->b_data; tl = &di->id2.i_dealloc; tl_count = le16_to_cpu(tl->tl_count); + tl_used = le16_to_cpu(tl->tl_used); if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || - tl_count == 0)) { + tl_count == 0 || + tl_used > tl_count)) { status = -EFSCORRUPTED; iput(inode); brelse(bh); @@ -6744,7 +6746,7 @@ static int ocfs2_reuse_blk_from_dealloc(handle_t *handle, /* We can't guarantee that buffer head is still cached, so * polutlate the extent block again. */ - strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); + strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(bf->free_blk); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb->h_suballoc_slot = cpu_to_le16(real_slot); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 8f714406528d62..701d27d908d4b2 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -434,7 +434,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, BUG_ON(buffer_jbd(bh)); ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { + if (unlikely(ocfs2_emergency_state(osb))) { ret = -EROFS; mlog_errno(ret); goto out; diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index 2f61d39e4e509b..6bc4e064ace464 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c @@ -4,6 +4,7 @@ */ #include <linux/slab.h> +#include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> @@ -590,7 +591,7 @@ static struct config_item *o2nm_node_group_make_item(struct config_group *group, if (node == NULL) return ERR_PTR(-ENOMEM); - strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ + strscpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 2785ff245e79e4..782afd9fa93481 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -136,7 +136,7 @@ static void ocfs2_init_dir_trailer(struct inode *inode, struct ocfs2_dir_block_trailer *trailer; trailer = ocfs2_trailer_from_bh(bh, inode->i_sb); - strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); + strscpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); trailer->db_compat_rec_len = cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer)); trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); @@ -2213,14 +2213,14 @@ static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode, de->name_len = 1; de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); - strcpy(de->name, "."); + strscpy(de->name, "."); ocfs2_set_de_type(de, S_IFDIR); de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len)); de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno); de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1)); de->name_len = 2; - strcpy(de->name, ".."); + strscpy(de->name, ".."); ocfs2_set_de_type(de, S_IFDIR); return de; @@ -2378,7 +2378,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; memset(dx_root, 0, osb->sb->s_blocksize); - strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); + strscpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc); dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); @@ -2454,7 +2454,7 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data; memset(dx_leaf, 0, osb->sb->s_blocksize); - strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE); + strscpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE); dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation); dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr); dx_leaf->dl_list.de_count = diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 21d797ccccd068..732c61599159cc 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -179,7 +179,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, file->f_path.dentry->d_name.name, (unsigned long long)datasync); - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; err = file_write_and_wait_range(file, start, end); @@ -209,7 +209,7 @@ int ocfs2_should_update_atime(struct inode *inode, struct timespec64 now; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return 0; if ((inode->i_flags & S_NOATIME) || @@ -1136,6 +1136,12 @@ int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry, attr->ia_valid & ATTR_GID ? from_kgid(&init_user_ns, attr->ia_gid) : 0); + status = ocfs2_emergency_state(osb); + if (unlikely(status)) { + mlog_errno(status); + goto bail; + } + /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; @@ -1943,7 +1949,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, handle_t *handle; unsigned long long max_off = inode->i_sb->s_maxbytes; - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; inode_lock(inode); @@ -2707,7 +2713,7 @@ static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in, return -EINVAL; if (!ocfs2_refcount_tree(osb)) return -EOPNOTSUPP; - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; /* Lock both files against IO */ diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 8340525e5589c0..b5fcc2725a2962 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -1442,6 +1442,14 @@ int ocfs2_validate_inode_block(struct super_block *sb, goto bail; } + if ((!di->i_links_count && !di->i_links_count_hi) || !di->i_mode) { + mlog(ML_ERROR, "Invalid dinode #%llu: " + "Corrupt state (nlink = %u or mode = %u) detected!\n", + (unsigned long long)bh->b_blocknr, + ocfs2_read_links_count(di), le16_to_cpu(di->i_mode)); + rc = -EFSCORRUPTED; + goto bail; + } /* * Errors after here are fatal. */ @@ -1604,8 +1612,7 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, trace_ocfs2_filecheck_repair_inode_block( (unsigned long long)bh->b_blocknr); - if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) || - ocfs2_is_soft_readonly(OCFS2_SB(sb))) { + if (unlikely(ocfs2_emergency_state(OCFS2_SB(sb)))) { mlog(ML_ERROR, "Filecheck: cannot repair dinode #%llu " "on readonly filesystem\n", diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index ce978a2497d9de..99637e34d9daf8 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -909,7 +909,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) struct buffer_head *di_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; inode_lock(inode); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index c90b254da75eb5..4ec6dbed65a86c 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -23,6 +23,7 @@ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/highmem.h> #include <linux/quotaops.h> #include <linux/iversion.h> @@ -568,7 +569,7 @@ static int __ocfs2_mknod_locked(struct inode *dir, ocfs2_set_links_count(fe, inode->i_nlink); fe->i_last_eb_blk = 0; - strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE); + strscpy(fe->i_signature, OCFS2_INODE_SIGNATURE); fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL); ktime_get_coarse_real_ts64(&ts); fe->i_atime = fe->i_ctime = fe->i_mtime = diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 6aaa94c554c12a..7b50e03dfa664a 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -680,6 +680,24 @@ static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb) return ret; } +static inline int ocfs2_is_readonly(struct ocfs2_super *osb) +{ + int ret; + spin_lock(&osb->osb_lock); + ret = osb->osb_flags & (OCFS2_OSB_SOFT_RO | OCFS2_OSB_HARD_RO); + spin_unlock(&osb->osb_lock); + + return ret; +} + +static inline int ocfs2_emergency_state(struct ocfs2_super *osb) +{ + if (ocfs2_is_readonly(osb)) + return -EROFS; + + return 0; +} + static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb) { return (osb->s_feature_incompat & diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index b0733c08ed13b1..ac3ec2c2111963 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -276,7 +276,7 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters) u32 first_new_cluster; u64 lgd_blkno; - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; if (new_clusters < 0) @@ -466,7 +466,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) u16 cl_bpc; u64 bg_ptr; - if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + if (unlikely(ocfs2_emergency_state(osb))) return -EROFS; main_bm_inode = ocfs2_get_system_file_inode(osb, diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index a28c127b9934bc..fca2fd07c881d1 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/string.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kmod.h> @@ -670,7 +671,7 @@ static int __init ocfs2_stack_glue_init(void) { int ret; - strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); + strscpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table); if (!ocfs2_table_header) { diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 6ac4dcd54588cf..8e6e5235b30c46 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -11,6 +11,7 @@ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/highmem.h> #include <cluster/masklog.h> @@ -372,7 +373,7 @@ static int ocfs2_block_group_fill(handle_t *handle, } memset(bg, 0, sb->s_blocksize); - strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE); + strscpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE); bg->bg_generation = cpu_to_le32(osb->fs_generation); bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1, osb->s_feature_incompat)); @@ -1992,6 +1993,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, } cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; + if (!le16_to_cpu(cl->cl_next_free_rec) || + le16_to_cpu(cl->cl_next_free_rec) > le16_to_cpu(cl->cl_count)) { + status = ocfs2_error(ac->ac_inode->i_sb, + "Chain allocator dinode %llu has invalid next " + "free chain record %u, but only %u total\n", + (unsigned long long)le64_to_cpu(fe->i_blkno), + le16_to_cpu(cl->cl_next_free_rec), + le16_to_cpu(cl->cl_count)); + goto bail; + } victim = ocfs2_find_victim_chain(cl); ac->ac_chain = victim; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2c7ba1480f7a1b..3cbafac50cd137 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2487,7 +2487,7 @@ static int ocfs2_handle_error(struct super_block *sb) rv = -EIO; } else { /* default option */ rv = -EROFS; - if (sb_rdonly(sb) && (ocfs2_is_soft_readonly(osb) || ocfs2_is_hard_readonly(osb))) + if (sb_rdonly(sb) && ocfs2_emergency_state(osb)) return rv; pr_crit("OCFS2: File system is now read-only.\n"); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index dc1761e8481422..1b21fbc16d73a2 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -49,9 +49,13 @@ #include "ocfs2_trace.h" struct ocfs2_xattr_def_value_root { - struct ocfs2_xattr_value_root xv; - struct ocfs2_extent_rec er; + /* Must be last as it ends in a flexible-array member. */ + TRAILING_OVERLAP(struct ocfs2_xattr_value_root, xv, xr_list.l_recs, + struct ocfs2_extent_rec er; + ); }; +static_assert(offsetof(struct ocfs2_xattr_def_value_root, xv.xr_list.l_recs) == + offsetof(struct ocfs2_xattr_def_value_root, er)); struct ocfs2_xattr_bucket { /* The inode these xattrs are associated with */ @@ -971,13 +975,39 @@ static int ocfs2_xattr_ibody_list(struct inode *inode, struct ocfs2_xattr_header *header = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); int ret = 0; + u16 xattr_count; + size_t max_entries; + u16 inline_size; if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) return ret; + inline_size = le16_to_cpu(di->i_xattr_inline_size); + + /* Validate inline size is reasonable */ + if (inline_size > inode->i_sb->s_blocksize || + inline_size < sizeof(struct ocfs2_xattr_header)) { + ocfs2_error(inode->i_sb, + "Invalid xattr inline size %u in inode %llu\n", + inline_size, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + return -EFSCORRUPTED; + } + header = (struct ocfs2_xattr_header *) - ((void *)di + inode->i_sb->s_blocksize - - le16_to_cpu(di->i_xattr_inline_size)); + ((void *)di + inode->i_sb->s_blocksize - inline_size); + + xattr_count = le16_to_cpu(header->xh_count); + max_entries = (inline_size - sizeof(struct ocfs2_xattr_header)) / + sizeof(struct ocfs2_xattr_entry); + + if (xattr_count > max_entries) { + ocfs2_error(inode->i_sb, + "xattr entry count %u exceeds maximum %zu in inode %llu\n", + xattr_count, max_entries, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + return -EFSCORRUPTED; + } ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size); diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index 37b23664ddf3ba..eeb4011cb217df 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -1357,37 +1357,6 @@ typedef struct smb_com_transaction_change_notify_rsp { __u16 ByteCount; /* __u8 Pad[3]; */ } __packed TRANSACT_CHANGE_NOTIFY_RSP; -/* Completion Filter flags for Notify */ -#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001 -#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002 -#define FILE_NOTIFY_CHANGE_NAME 0x00000003 -#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004 -#define FILE_NOTIFY_CHANGE_SIZE 0x00000008 -#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010 -#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020 -#define FILE_NOTIFY_CHANGE_CREATION 0x00000040 -#define FILE_NOTIFY_CHANGE_EA 0x00000080 -#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100 -#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200 -#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400 -#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800 - -#define FILE_ACTION_ADDED 0x00000001 -#define FILE_ACTION_REMOVED 0x00000002 -#define FILE_ACTION_MODIFIED 0x00000003 -#define FILE_ACTION_RENAMED_OLD_NAME 0x00000004 -#define FILE_ACTION_RENAMED_NEW_NAME 0x00000005 -#define FILE_ACTION_ADDED_STREAM 0x00000006 -#define FILE_ACTION_REMOVED_STREAM 0x00000007 -#define FILE_ACTION_MODIFIED_STREAM 0x00000008 - -/* response contains array of the following structures */ -struct file_notify_information { - __le32 NextEntryOffset; - __le32 Action; - __le32 FileNameLength; - __u8 FileName[]; -} __packed; struct cifs_quota_data { __u32 rsrvd1; /* 0 */ @@ -2034,40 +2003,6 @@ typedef struct { #define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */ -/* DeviceType Flags */ -#define FILE_DEVICE_CD_ROM 0x00000002 -#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003 -#define FILE_DEVICE_DFS 0x00000006 -#define FILE_DEVICE_DISK 0x00000007 -#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008 -#define FILE_DEVICE_FILE_SYSTEM 0x00000009 -#define FILE_DEVICE_NAMED_PIPE 0x00000011 -#define FILE_DEVICE_NETWORK 0x00000012 -#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014 -#define FILE_DEVICE_NULL 0x00000015 -#define FILE_DEVICE_PARALLEL_PORT 0x00000016 -#define FILE_DEVICE_PRINTER 0x00000018 -#define FILE_DEVICE_SERIAL_PORT 0x0000001b -#define FILE_DEVICE_STREAMS 0x0000001e -#define FILE_DEVICE_TAPE 0x0000001f -#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020 -#define FILE_DEVICE_VIRTUAL_DISK 0x00000024 -#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028 - -/* Device Characteristics */ -#define FILE_REMOVABLE_MEDIA 0x00000001 -#define FILE_READ_ONLY_DEVICE 0x00000002 -#define FILE_FLOPPY_DISKETTE 0x00000004 -#define FILE_WRITE_ONCE_MEDIA 0x00000008 -#define FILE_REMOTE_DEVICE 0x00000010 -#define FILE_DEVICE_IS_MOUNTED 0x00000020 -#define FILE_VIRTUAL_VOLUME 0x00000040 -#define FILE_DEVICE_SECURE_OPEN 0x00000100 -#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000 -#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000 -#define FILE_PORTABLE_DEVICE 0x00004000 -#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000 - /******************************************************************************/ /* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */ /******************************************************************************/ diff --git a/fs/smb/client/nterr.c b/fs/smb/client/nterr.c index 8f0bc441295eff..77f84767b7dfb0 100644 --- a/fs/smb/client/nterr.c +++ b/fs/smb/client/nterr.c @@ -13,6 +13,7 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_OK", NT_STATUS_OK}, + {"NT_STATUS_PENDING", NT_STATUS_PENDING}, {"NT_STATUS_MEDIA_CHANGED", NT_STATUS_MEDIA_CHANGED}, {"NT_STATUS_END_OF_MEDIA", NT_STATUS_END_OF_MEDIA}, {"NT_STATUS_MEDIA_CHECK", NT_STATUS_MEDIA_CHECK}, @@ -544,6 +545,7 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_DOMAIN_TRUST_INCONSISTENT", NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED}, + {"NT_STATUS_INVALID_LOCK_RANGE", NT_STATUS_INVALID_LOCK_RANGE}, {"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY}, {"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED}, {"NT_STATUS_RESOURCE_LANG_NOT_FOUND", @@ -675,9 +677,12 @@ const struct nt_err_code_struct nt_errs[] = { NT_STATUS_QUOTA_LIST_INCONSISTENT}, {"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE}, {"NT_STATUS_NOT_A_REPARSE_POINT", NT_STATUS_NOT_A_REPARSE_POINT}, + {"NT_STATUS_NETWORK_SESSION_EXPIRED", NT_STATUS_NETWORK_SESSION_EXPIRED}, {"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES}, {"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES}, {"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED}, {"NT_STATUS_NO_SUCH_JOB", NT_STATUS_NO_SUCH_JOB}, + {"NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP", + NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP}, {NULL, 0} }; diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h index 180602c22355e1..81f1a78cf41faa 100644 --- a/fs/smb/client/nterr.h +++ b/fs/smb/client/nterr.h @@ -35,518 +35,522 @@ extern const struct nt_err_code_struct nt_errs[]; */ #define NT_STATUS_OK 0x0000 +#define NT_STATUS_PENDING 0x0103 #define NT_STATUS_SOME_UNMAPPED 0x0107 #define NT_STATUS_BUFFER_OVERFLOW 0x80000005 #define NT_STATUS_NO_MORE_ENTRIES 0x8000001a #define NT_STATUS_MEDIA_CHANGED 0x8000001c #define NT_STATUS_END_OF_MEDIA 0x8000001e #define NT_STATUS_MEDIA_CHECK 0x80000020 -#define NT_STATUS_NO_DATA_DETECTED 0x8000001c +#define NT_STATUS_NO_DATA_DETECTED 0x80000022 #define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d #define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288 -#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288 -#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001 -#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002 -#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003 -#define NT_STATUS_INFO_LENGTH_MISMATCH 0xC0000000 | 0x0004 -#define NT_STATUS_ACCESS_VIOLATION 0xC0000000 | 0x0005 -#define NT_STATUS_IN_PAGE_ERROR 0xC0000000 | 0x0006 -#define NT_STATUS_PAGEFILE_QUOTA 0xC0000000 | 0x0007 -#define NT_STATUS_INVALID_HANDLE 0xC0000000 | 0x0008 -#define NT_STATUS_BAD_INITIAL_STACK 0xC0000000 | 0x0009 -#define NT_STATUS_BAD_INITIAL_PC 0xC0000000 | 0x000a -#define NT_STATUS_INVALID_CID 0xC0000000 | 0x000b -#define NT_STATUS_TIMER_NOT_CANCELED 0xC0000000 | 0x000c -#define NT_STATUS_INVALID_PARAMETER 0xC0000000 | 0x000d -#define NT_STATUS_NO_SUCH_DEVICE 0xC0000000 | 0x000e -#define NT_STATUS_NO_SUCH_FILE 0xC0000000 | 0x000f -#define NT_STATUS_INVALID_DEVICE_REQUEST 0xC0000000 | 0x0010 -#define NT_STATUS_END_OF_FILE 0xC0000000 | 0x0011 -#define NT_STATUS_WRONG_VOLUME 0xC0000000 | 0x0012 -#define NT_STATUS_NO_MEDIA_IN_DEVICE 0xC0000000 | 0x0013 -#define NT_STATUS_UNRECOGNIZED_MEDIA 0xC0000000 | 0x0014 -#define NT_STATUS_NONEXISTENT_SECTOR 0xC0000000 | 0x0015 -#define NT_STATUS_MORE_PROCESSING_REQUIRED 0xC0000000 | 0x0016 -#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017 -#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018 -#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019 -#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a -#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b -#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c -#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d -#define NT_STATUS_INVALID_LOCK_SEQUENCE 0xC0000000 | 0x001e -#define NT_STATUS_INVALID_VIEW_SIZE 0xC0000000 | 0x001f -#define NT_STATUS_INVALID_FILE_FOR_SECTION 0xC0000000 | 0x0020 -#define NT_STATUS_ALREADY_COMMITTED 0xC0000000 | 0x0021 -#define NT_STATUS_ACCESS_DENIED 0xC0000000 | 0x0022 -#define NT_STATUS_BUFFER_TOO_SMALL 0xC0000000 | 0x0023 -#define NT_STATUS_OBJECT_TYPE_MISMATCH 0xC0000000 | 0x0024 -#define NT_STATUS_NONCONTINUABLE_EXCEPTION 0xC0000000 | 0x0025 -#define NT_STATUS_INVALID_DISPOSITION 0xC0000000 | 0x0026 -#define NT_STATUS_UNWIND 0xC0000000 | 0x0027 -#define NT_STATUS_BAD_STACK 0xC0000000 | 0x0028 -#define NT_STATUS_INVALID_UNWIND_TARGET 0xC0000000 | 0x0029 -#define NT_STATUS_NOT_LOCKED 0xC0000000 | 0x002a -#define NT_STATUS_PARITY_ERROR 0xC0000000 | 0x002b -#define NT_STATUS_UNABLE_TO_DECOMMIT_VM 0xC0000000 | 0x002c -#define NT_STATUS_NOT_COMMITTED 0xC0000000 | 0x002d -#define NT_STATUS_INVALID_PORT_ATTRIBUTES 0xC0000000 | 0x002e -#define NT_STATUS_PORT_MESSAGE_TOO_LONG 0xC0000000 | 0x002f -#define NT_STATUS_INVALID_PARAMETER_MIX 0xC0000000 | 0x0030 -#define NT_STATUS_INVALID_QUOTA_LOWER 0xC0000000 | 0x0031 -#define NT_STATUS_DISK_CORRUPT_ERROR 0xC0000000 | 0x0032 -#define NT_STATUS_OBJECT_NAME_INVALID 0xC0000000 | 0x0033 -#define NT_STATUS_OBJECT_NAME_NOT_FOUND 0xC0000000 | 0x0034 -#define NT_STATUS_OBJECT_NAME_COLLISION 0xC0000000 | 0x0035 -#define NT_STATUS_HANDLE_NOT_WAITABLE 0xC0000000 | 0x0036 -#define NT_STATUS_PORT_DISCONNECTED 0xC0000000 | 0x0037 -#define NT_STATUS_DEVICE_ALREADY_ATTACHED 0xC0000000 | 0x0038 -#define NT_STATUS_OBJECT_PATH_INVALID 0xC0000000 | 0x0039 -#define NT_STATUS_OBJECT_PATH_NOT_FOUND 0xC0000000 | 0x003a -#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD 0xC0000000 | 0x003b -#define NT_STATUS_DATA_OVERRUN 0xC0000000 | 0x003c -#define NT_STATUS_DATA_LATE_ERROR 0xC0000000 | 0x003d -#define NT_STATUS_DATA_ERROR 0xC0000000 | 0x003e -#define NT_STATUS_CRC_ERROR 0xC0000000 | 0x003f -#define NT_STATUS_SECTION_TOO_BIG 0xC0000000 | 0x0040 -#define NT_STATUS_PORT_CONNECTION_REFUSED 0xC0000000 | 0x0041 -#define NT_STATUS_INVALID_PORT_HANDLE 0xC0000000 | 0x0042 -#define NT_STATUS_SHARING_VIOLATION 0xC0000000 | 0x0043 -#define NT_STATUS_QUOTA_EXCEEDED 0xC0000000 | 0x0044 -#define NT_STATUS_INVALID_PAGE_PROTECTION 0xC0000000 | 0x0045 -#define NT_STATUS_MUTANT_NOT_OWNED 0xC0000000 | 0x0046 -#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED 0xC0000000 | 0x0047 -#define NT_STATUS_PORT_ALREADY_SET 0xC0000000 | 0x0048 -#define NT_STATUS_SECTION_NOT_IMAGE 0xC0000000 | 0x0049 -#define NT_STATUS_SUSPEND_COUNT_EXCEEDED 0xC0000000 | 0x004a -#define NT_STATUS_THREAD_IS_TERMINATING 0xC0000000 | 0x004b -#define NT_STATUS_BAD_WORKING_SET_LIMIT 0xC0000000 | 0x004c -#define NT_STATUS_INCOMPATIBLE_FILE_MAP 0xC0000000 | 0x004d -#define NT_STATUS_SECTION_PROTECTION 0xC0000000 | 0x004e -#define NT_STATUS_EAS_NOT_SUPPORTED 0xC0000000 | 0x004f -#define NT_STATUS_EA_TOO_LARGE 0xC0000000 | 0x0050 -#define NT_STATUS_NONEXISTENT_EA_ENTRY 0xC0000000 | 0x0051 -#define NT_STATUS_NO_EAS_ON_FILE 0xC0000000 | 0x0052 -#define NT_STATUS_EA_CORRUPT_ERROR 0xC0000000 | 0x0053 -#define NT_STATUS_FILE_LOCK_CONFLICT 0xC0000000 | 0x0054 -#define NT_STATUS_LOCK_NOT_GRANTED 0xC0000000 | 0x0055 -#define NT_STATUS_DELETE_PENDING 0xC0000000 | 0x0056 -#define NT_STATUS_CTL_FILE_NOT_SUPPORTED 0xC0000000 | 0x0057 -#define NT_STATUS_UNKNOWN_REVISION 0xC0000000 | 0x0058 -#define NT_STATUS_REVISION_MISMATCH 0xC0000000 | 0x0059 -#define NT_STATUS_INVALID_OWNER 0xC0000000 | 0x005a -#define NT_STATUS_INVALID_PRIMARY_GROUP 0xC0000000 | 0x005b -#define NT_STATUS_NO_IMPERSONATION_TOKEN 0xC0000000 | 0x005c -#define NT_STATUS_CANT_DISABLE_MANDATORY 0xC0000000 | 0x005d -#define NT_STATUS_NO_LOGON_SERVERS 0xC0000000 | 0x005e -#define NT_STATUS_NO_SUCH_LOGON_SESSION 0xC0000000 | 0x005f -#define NT_STATUS_NO_SUCH_PRIVILEGE 0xC0000000 | 0x0060 -#define NT_STATUS_PRIVILEGE_NOT_HELD 0xC0000000 | 0x0061 -#define NT_STATUS_INVALID_ACCOUNT_NAME 0xC0000000 | 0x0062 -#define NT_STATUS_USER_EXISTS 0xC0000000 | 0x0063 -#define NT_STATUS_NO_SUCH_USER 0xC0000000 | 0x0064 -#define NT_STATUS_GROUP_EXISTS 0xC0000000 | 0x0065 -#define NT_STATUS_NO_SUCH_GROUP 0xC0000000 | 0x0066 -#define NT_STATUS_MEMBER_IN_GROUP 0xC0000000 | 0x0067 -#define NT_STATUS_MEMBER_NOT_IN_GROUP 0xC0000000 | 0x0068 -#define NT_STATUS_LAST_ADMIN 0xC0000000 | 0x0069 -#define NT_STATUS_WRONG_PASSWORD 0xC0000000 | 0x006a -#define NT_STATUS_ILL_FORMED_PASSWORD 0xC0000000 | 0x006b -#define NT_STATUS_PASSWORD_RESTRICTION 0xC0000000 | 0x006c -#define NT_STATUS_LOGON_FAILURE 0xC0000000 | 0x006d -#define NT_STATUS_ACCOUNT_RESTRICTION 0xC0000000 | 0x006e -#define NT_STATUS_INVALID_LOGON_HOURS 0xC0000000 | 0x006f -#define NT_STATUS_INVALID_WORKSTATION 0xC0000000 | 0x0070 -#define NT_STATUS_PASSWORD_EXPIRED 0xC0000000 | 0x0071 -#define NT_STATUS_ACCOUNT_DISABLED 0xC0000000 | 0x0072 -#define NT_STATUS_NONE_MAPPED 0xC0000000 | 0x0073 -#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED 0xC0000000 | 0x0074 -#define NT_STATUS_LUIDS_EXHAUSTED 0xC0000000 | 0x0075 -#define NT_STATUS_INVALID_SUB_AUTHORITY 0xC0000000 | 0x0076 -#define NT_STATUS_INVALID_ACL 0xC0000000 | 0x0077 -#define NT_STATUS_INVALID_SID 0xC0000000 | 0x0078 -#define NT_STATUS_INVALID_SECURITY_DESCR 0xC0000000 | 0x0079 -#define NT_STATUS_PROCEDURE_NOT_FOUND 0xC0000000 | 0x007a -#define NT_STATUS_INVALID_IMAGE_FORMAT 0xC0000000 | 0x007b -#define NT_STATUS_NO_TOKEN 0xC0000000 | 0x007c -#define NT_STATUS_BAD_INHERITANCE_ACL 0xC0000000 | 0x007d -#define NT_STATUS_RANGE_NOT_LOCKED 0xC0000000 | 0x007e -#define NT_STATUS_DISK_FULL 0xC0000000 | 0x007f -#define NT_STATUS_SERVER_DISABLED 0xC0000000 | 0x0080 -#define NT_STATUS_SERVER_NOT_DISABLED 0xC0000000 | 0x0081 -#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED 0xC0000000 | 0x0082 -#define NT_STATUS_GUIDS_EXHAUSTED 0xC0000000 | 0x0083 -#define NT_STATUS_INVALID_ID_AUTHORITY 0xC0000000 | 0x0084 -#define NT_STATUS_AGENTS_EXHAUSTED 0xC0000000 | 0x0085 -#define NT_STATUS_INVALID_VOLUME_LABEL 0xC0000000 | 0x0086 -#define NT_STATUS_SECTION_NOT_EXTENDED 0xC0000000 | 0x0087 -#define NT_STATUS_NOT_MAPPED_DATA 0xC0000000 | 0x0088 -#define NT_STATUS_RESOURCE_DATA_NOT_FOUND 0xC0000000 | 0x0089 -#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND 0xC0000000 | 0x008a -#define NT_STATUS_RESOURCE_NAME_NOT_FOUND 0xC0000000 | 0x008b -#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED 0xC0000000 | 0x008c -#define NT_STATUS_FLOAT_DENORMAL_OPERAND 0xC0000000 | 0x008d -#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO 0xC0000000 | 0x008e -#define NT_STATUS_FLOAT_INEXACT_RESULT 0xC0000000 | 0x008f -#define NT_STATUS_FLOAT_INVALID_OPERATION 0xC0000000 | 0x0090 -#define NT_STATUS_FLOAT_OVERFLOW 0xC0000000 | 0x0091 -#define NT_STATUS_FLOAT_STACK_CHECK 0xC0000000 | 0x0092 -#define NT_STATUS_FLOAT_UNDERFLOW 0xC0000000 | 0x0093 -#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO 0xC0000000 | 0x0094 -#define NT_STATUS_INTEGER_OVERFLOW 0xC0000000 | 0x0095 -#define NT_STATUS_PRIVILEGED_INSTRUCTION 0xC0000000 | 0x0096 -#define NT_STATUS_TOO_MANY_PAGING_FILES 0xC0000000 | 0x0097 -#define NT_STATUS_FILE_INVALID 0xC0000000 | 0x0098 -#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED 0xC0000000 | 0x0099 -#define NT_STATUS_INSUFFICIENT_RESOURCES 0xC0000000 | 0x009a -#define NT_STATUS_DFS_EXIT_PATH_FOUND 0xC0000000 | 0x009b -#define NT_STATUS_DEVICE_DATA_ERROR 0xC0000000 | 0x009c -#define NT_STATUS_DEVICE_NOT_CONNECTED 0xC0000000 | 0x009d -#define NT_STATUS_DEVICE_POWER_FAILURE 0xC0000000 | 0x009e -#define NT_STATUS_FREE_VM_NOT_AT_BASE 0xC0000000 | 0x009f -#define NT_STATUS_MEMORY_NOT_ALLOCATED 0xC0000000 | 0x00a0 -#define NT_STATUS_WORKING_SET_QUOTA 0xC0000000 | 0x00a1 -#define NT_STATUS_MEDIA_WRITE_PROTECTED 0xC0000000 | 0x00a2 -#define NT_STATUS_DEVICE_NOT_READY 0xC0000000 | 0x00a3 -#define NT_STATUS_INVALID_GROUP_ATTRIBUTES 0xC0000000 | 0x00a4 -#define NT_STATUS_BAD_IMPERSONATION_LEVEL 0xC0000000 | 0x00a5 -#define NT_STATUS_CANT_OPEN_ANONYMOUS 0xC0000000 | 0x00a6 -#define NT_STATUS_BAD_VALIDATION_CLASS 0xC0000000 | 0x00a7 -#define NT_STATUS_BAD_TOKEN_TYPE 0xC0000000 | 0x00a8 -#define NT_STATUS_BAD_MASTER_BOOT_RECORD 0xC0000000 | 0x00a9 -#define NT_STATUS_INSTRUCTION_MISALIGNMENT 0xC0000000 | 0x00aa -#define NT_STATUS_INSTANCE_NOT_AVAILABLE 0xC0000000 | 0x00ab -#define NT_STATUS_PIPE_NOT_AVAILABLE 0xC0000000 | 0x00ac -#define NT_STATUS_INVALID_PIPE_STATE 0xC0000000 | 0x00ad -#define NT_STATUS_PIPE_BUSY 0xC0000000 | 0x00ae -#define NT_STATUS_ILLEGAL_FUNCTION 0xC0000000 | 0x00af -#define NT_STATUS_PIPE_DISCONNECTED 0xC0000000 | 0x00b0 -#define NT_STATUS_PIPE_CLOSING 0xC0000000 | 0x00b1 -#define NT_STATUS_PIPE_CONNECTED 0xC0000000 | 0x00b2 -#define NT_STATUS_PIPE_LISTENING 0xC0000000 | 0x00b3 -#define NT_STATUS_INVALID_READ_MODE 0xC0000000 | 0x00b4 -#define NT_STATUS_IO_TIMEOUT 0xC0000000 | 0x00b5 -#define NT_STATUS_FILE_FORCED_CLOSED 0xC0000000 | 0x00b6 -#define NT_STATUS_PROFILING_NOT_STARTED 0xC0000000 | 0x00b7 -#define NT_STATUS_PROFILING_NOT_STOPPED 0xC0000000 | 0x00b8 -#define NT_STATUS_COULD_NOT_INTERPRET 0xC0000000 | 0x00b9 -#define NT_STATUS_FILE_IS_A_DIRECTORY 0xC0000000 | 0x00ba -#define NT_STATUS_NOT_SUPPORTED 0xC0000000 | 0x00bb -#define NT_STATUS_REMOTE_NOT_LISTENING 0xC0000000 | 0x00bc -#define NT_STATUS_DUPLICATE_NAME 0xC0000000 | 0x00bd -#define NT_STATUS_BAD_NETWORK_PATH 0xC0000000 | 0x00be -#define NT_STATUS_NETWORK_BUSY 0xC0000000 | 0x00bf -#define NT_STATUS_DEVICE_DOES_NOT_EXIST 0xC0000000 | 0x00c0 -#define NT_STATUS_TOO_MANY_COMMANDS 0xC0000000 | 0x00c1 -#define NT_STATUS_ADAPTER_HARDWARE_ERROR 0xC0000000 | 0x00c2 -#define NT_STATUS_INVALID_NETWORK_RESPONSE 0xC0000000 | 0x00c3 -#define NT_STATUS_UNEXPECTED_NETWORK_ERROR 0xC0000000 | 0x00c4 -#define NT_STATUS_BAD_REMOTE_ADAPTER 0xC0000000 | 0x00c5 -#define NT_STATUS_PRINT_QUEUE_FULL 0xC0000000 | 0x00c6 -#define NT_STATUS_NO_SPOOL_SPACE 0xC0000000 | 0x00c7 -#define NT_STATUS_PRINT_CANCELLED 0xC0000000 | 0x00c8 -#define NT_STATUS_NETWORK_NAME_DELETED 0xC0000000 | 0x00c9 -#define NT_STATUS_NETWORK_ACCESS_DENIED 0xC0000000 | 0x00ca -#define NT_STATUS_BAD_DEVICE_TYPE 0xC0000000 | 0x00cb -#define NT_STATUS_BAD_NETWORK_NAME 0xC0000000 | 0x00cc -#define NT_STATUS_TOO_MANY_NAMES 0xC0000000 | 0x00cd -#define NT_STATUS_TOO_MANY_SESSIONS 0xC0000000 | 0x00ce -#define NT_STATUS_SHARING_PAUSED 0xC0000000 | 0x00cf -#define NT_STATUS_REQUEST_NOT_ACCEPTED 0xC0000000 | 0x00d0 -#define NT_STATUS_REDIRECTOR_PAUSED 0xC0000000 | 0x00d1 -#define NT_STATUS_NET_WRITE_FAULT 0xC0000000 | 0x00d2 -#define NT_STATUS_PROFILING_AT_LIMIT 0xC0000000 | 0x00d3 -#define NT_STATUS_NOT_SAME_DEVICE 0xC0000000 | 0x00d4 -#define NT_STATUS_FILE_RENAMED 0xC0000000 | 0x00d5 -#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED 0xC0000000 | 0x00d6 -#define NT_STATUS_NO_SECURITY_ON_OBJECT 0xC0000000 | 0x00d7 -#define NT_STATUS_CANT_WAIT 0xC0000000 | 0x00d8 -#define NT_STATUS_PIPE_EMPTY 0xC0000000 | 0x00d9 -#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO 0xC0000000 | 0x00da -#define NT_STATUS_CANT_TERMINATE_SELF 0xC0000000 | 0x00db -#define NT_STATUS_INVALID_SERVER_STATE 0xC0000000 | 0x00dc -#define NT_STATUS_INVALID_DOMAIN_STATE 0xC0000000 | 0x00dd -#define NT_STATUS_INVALID_DOMAIN_ROLE 0xC0000000 | 0x00de -#define NT_STATUS_NO_SUCH_DOMAIN 0xC0000000 | 0x00df -#define NT_STATUS_DOMAIN_EXISTS 0xC0000000 | 0x00e0 -#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED 0xC0000000 | 0x00e1 -#define NT_STATUS_OPLOCK_NOT_GRANTED 0xC0000000 | 0x00e2 -#define NT_STATUS_INVALID_OPLOCK_PROTOCOL 0xC0000000 | 0x00e3 -#define NT_STATUS_INTERNAL_DB_CORRUPTION 0xC0000000 | 0x00e4 -#define NT_STATUS_INTERNAL_ERROR 0xC0000000 | 0x00e5 -#define NT_STATUS_GENERIC_NOT_MAPPED 0xC0000000 | 0x00e6 -#define NT_STATUS_BAD_DESCRIPTOR_FORMAT 0xC0000000 | 0x00e7 -#define NT_STATUS_INVALID_USER_BUFFER 0xC0000000 | 0x00e8 -#define NT_STATUS_UNEXPECTED_IO_ERROR 0xC0000000 | 0x00e9 -#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR 0xC0000000 | 0x00ea -#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR 0xC0000000 | 0x00eb -#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR 0xC0000000 | 0x00ec -#define NT_STATUS_NOT_LOGON_PROCESS 0xC0000000 | 0x00ed -#define NT_STATUS_LOGON_SESSION_EXISTS 0xC0000000 | 0x00ee -#define NT_STATUS_INVALID_PARAMETER_1 0xC0000000 | 0x00ef -#define NT_STATUS_INVALID_PARAMETER_2 0xC0000000 | 0x00f0 -#define NT_STATUS_INVALID_PARAMETER_3 0xC0000000 | 0x00f1 -#define NT_STATUS_INVALID_PARAMETER_4 0xC0000000 | 0x00f2 -#define NT_STATUS_INVALID_PARAMETER_5 0xC0000000 | 0x00f3 -#define NT_STATUS_INVALID_PARAMETER_6 0xC0000000 | 0x00f4 -#define NT_STATUS_INVALID_PARAMETER_7 0xC0000000 | 0x00f5 -#define NT_STATUS_INVALID_PARAMETER_8 0xC0000000 | 0x00f6 -#define NT_STATUS_INVALID_PARAMETER_9 0xC0000000 | 0x00f7 -#define NT_STATUS_INVALID_PARAMETER_10 0xC0000000 | 0x00f8 -#define NT_STATUS_INVALID_PARAMETER_11 0xC0000000 | 0x00f9 -#define NT_STATUS_INVALID_PARAMETER_12 0xC0000000 | 0x00fa -#define NT_STATUS_REDIRECTOR_NOT_STARTED 0xC0000000 | 0x00fb -#define NT_STATUS_REDIRECTOR_STARTED 0xC0000000 | 0x00fc -#define NT_STATUS_STACK_OVERFLOW 0xC0000000 | 0x00fd -#define NT_STATUS_NO_SUCH_PACKAGE 0xC0000000 | 0x00fe -#define NT_STATUS_BAD_FUNCTION_TABLE 0xC0000000 | 0x00ff -#define NT_STATUS_DIRECTORY_NOT_EMPTY 0xC0000000 | 0x0101 -#define NT_STATUS_FILE_CORRUPT_ERROR 0xC0000000 | 0x0102 -#define NT_STATUS_NOT_A_DIRECTORY 0xC0000000 | 0x0103 -#define NT_STATUS_BAD_LOGON_SESSION_STATE 0xC0000000 | 0x0104 -#define NT_STATUS_LOGON_SESSION_COLLISION 0xC0000000 | 0x0105 -#define NT_STATUS_NAME_TOO_LONG 0xC0000000 | 0x0106 -#define NT_STATUS_FILES_OPEN 0xC0000000 | 0x0107 -#define NT_STATUS_CONNECTION_IN_USE 0xC0000000 | 0x0108 -#define NT_STATUS_MESSAGE_NOT_FOUND 0xC0000000 | 0x0109 -#define NT_STATUS_PROCESS_IS_TERMINATING 0xC0000000 | 0x010a -#define NT_STATUS_INVALID_LOGON_TYPE 0xC0000000 | 0x010b -#define NT_STATUS_NO_GUID_TRANSLATION 0xC0000000 | 0x010c -#define NT_STATUS_CANNOT_IMPERSONATE 0xC0000000 | 0x010d -#define NT_STATUS_IMAGE_ALREADY_LOADED 0xC0000000 | 0x010e -#define NT_STATUS_ABIOS_NOT_PRESENT 0xC0000000 | 0x010f -#define NT_STATUS_ABIOS_LID_NOT_EXIST 0xC0000000 | 0x0110 -#define NT_STATUS_ABIOS_LID_ALREADY_OWNED 0xC0000000 | 0x0111 -#define NT_STATUS_ABIOS_NOT_LID_OWNER 0xC0000000 | 0x0112 -#define NT_STATUS_ABIOS_INVALID_COMMAND 0xC0000000 | 0x0113 -#define NT_STATUS_ABIOS_INVALID_LID 0xC0000000 | 0x0114 -#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE 0xC0000000 | 0x0115 -#define NT_STATUS_ABIOS_INVALID_SELECTOR 0xC0000000 | 0x0116 -#define NT_STATUS_NO_LDT 0xC0000000 | 0x0117 -#define NT_STATUS_INVALID_LDT_SIZE 0xC0000000 | 0x0118 -#define NT_STATUS_INVALID_LDT_OFFSET 0xC0000000 | 0x0119 -#define NT_STATUS_INVALID_LDT_DESCRIPTOR 0xC0000000 | 0x011a -#define NT_STATUS_INVALID_IMAGE_NE_FORMAT 0xC0000000 | 0x011b -#define NT_STATUS_RXACT_INVALID_STATE 0xC0000000 | 0x011c -#define NT_STATUS_RXACT_COMMIT_FAILURE 0xC0000000 | 0x011d -#define NT_STATUS_MAPPED_FILE_SIZE_ZERO 0xC0000000 | 0x011e -#define NT_STATUS_TOO_MANY_OPENED_FILES 0xC0000000 | 0x011f -#define NT_STATUS_CANCELLED 0xC0000000 | 0x0120 -#define NT_STATUS_CANNOT_DELETE 0xC0000000 | 0x0121 -#define NT_STATUS_INVALID_COMPUTER_NAME 0xC0000000 | 0x0122 -#define NT_STATUS_FILE_DELETED 0xC0000000 | 0x0123 -#define NT_STATUS_SPECIAL_ACCOUNT 0xC0000000 | 0x0124 -#define NT_STATUS_SPECIAL_GROUP 0xC0000000 | 0x0125 -#define NT_STATUS_SPECIAL_USER 0xC0000000 | 0x0126 -#define NT_STATUS_MEMBERS_PRIMARY_GROUP 0xC0000000 | 0x0127 -#define NT_STATUS_FILE_CLOSED 0xC0000000 | 0x0128 -#define NT_STATUS_TOO_MANY_THREADS 0xC0000000 | 0x0129 -#define NT_STATUS_THREAD_NOT_IN_PROCESS 0xC0000000 | 0x012a -#define NT_STATUS_TOKEN_ALREADY_IN_USE 0xC0000000 | 0x012b -#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED 0xC0000000 | 0x012c -#define NT_STATUS_COMMITMENT_LIMIT 0xC0000000 | 0x012d -#define NT_STATUS_INVALID_IMAGE_LE_FORMAT 0xC0000000 | 0x012e -#define NT_STATUS_INVALID_IMAGE_NOT_MZ 0xC0000000 | 0x012f -#define NT_STATUS_INVALID_IMAGE_PROTECT 0xC0000000 | 0x0130 -#define NT_STATUS_INVALID_IMAGE_WIN_16 0xC0000000 | 0x0131 -#define NT_STATUS_LOGON_SERVER_CONFLICT 0xC0000000 | 0x0132 -#define NT_STATUS_TIME_DIFFERENCE_AT_DC 0xC0000000 | 0x0133 -#define NT_STATUS_SYNCHRONIZATION_REQUIRED 0xC0000000 | 0x0134 -#define NT_STATUS_DLL_NOT_FOUND 0xC0000000 | 0x0135 -#define NT_STATUS_OPEN_FAILED 0xC0000000 | 0x0136 -#define NT_STATUS_IO_PRIVILEGE_FAILED 0xC0000000 | 0x0137 -#define NT_STATUS_ORDINAL_NOT_FOUND 0xC0000000 | 0x0138 -#define NT_STATUS_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0139 -#define NT_STATUS_CONTROL_C_EXIT 0xC0000000 | 0x013a -#define NT_STATUS_LOCAL_DISCONNECT 0xC0000000 | 0x013b -#define NT_STATUS_REMOTE_DISCONNECT 0xC0000000 | 0x013c -#define NT_STATUS_REMOTE_RESOURCES 0xC0000000 | 0x013d -#define NT_STATUS_LINK_FAILED 0xC0000000 | 0x013e -#define NT_STATUS_LINK_TIMEOUT 0xC0000000 | 0x013f -#define NT_STATUS_INVALID_CONNECTION 0xC0000000 | 0x0140 -#define NT_STATUS_INVALID_ADDRESS 0xC0000000 | 0x0141 -#define NT_STATUS_DLL_INIT_FAILED 0xC0000000 | 0x0142 -#define NT_STATUS_MISSING_SYSTEMFILE 0xC0000000 | 0x0143 -#define NT_STATUS_UNHANDLED_EXCEPTION 0xC0000000 | 0x0144 -#define NT_STATUS_APP_INIT_FAILURE 0xC0000000 | 0x0145 -#define NT_STATUS_PAGEFILE_CREATE_FAILED 0xC0000000 | 0x0146 -#define NT_STATUS_NO_PAGEFILE 0xC0000000 | 0x0147 -#define NT_STATUS_INVALID_LEVEL 0xC0000000 | 0x0148 -#define NT_STATUS_WRONG_PASSWORD_CORE 0xC0000000 | 0x0149 -#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT 0xC0000000 | 0x014a -#define NT_STATUS_PIPE_BROKEN 0xC0000000 | 0x014b -#define NT_STATUS_REGISTRY_CORRUPT 0xC0000000 | 0x014c -#define NT_STATUS_REGISTRY_IO_FAILED 0xC0000000 | 0x014d -#define NT_STATUS_NO_EVENT_PAIR 0xC0000000 | 0x014e -#define NT_STATUS_UNRECOGNIZED_VOLUME 0xC0000000 | 0x014f -#define NT_STATUS_SERIAL_NO_DEVICE_INITED 0xC0000000 | 0x0150 -#define NT_STATUS_NO_SUCH_ALIAS 0xC0000000 | 0x0151 -#define NT_STATUS_MEMBER_NOT_IN_ALIAS 0xC0000000 | 0x0152 -#define NT_STATUS_MEMBER_IN_ALIAS 0xC0000000 | 0x0153 -#define NT_STATUS_ALIAS_EXISTS 0xC0000000 | 0x0154 -#define NT_STATUS_LOGON_NOT_GRANTED 0xC0000000 | 0x0155 -#define NT_STATUS_TOO_MANY_SECRETS 0xC0000000 | 0x0156 -#define NT_STATUS_SECRET_TOO_LONG 0xC0000000 | 0x0157 -#define NT_STATUS_INTERNAL_DB_ERROR 0xC0000000 | 0x0158 -#define NT_STATUS_FULLSCREEN_MODE 0xC0000000 | 0x0159 -#define NT_STATUS_TOO_MANY_CONTEXT_IDS 0xC0000000 | 0x015a -#define NT_STATUS_LOGON_TYPE_NOT_GRANTED 0xC0000000 | 0x015b -#define NT_STATUS_NOT_REGISTRY_FILE 0xC0000000 | 0x015c -#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x015d -#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR 0xC0000000 | 0x015e -#define NT_STATUS_FT_MISSING_MEMBER 0xC0000000 | 0x015f -#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY 0xC0000000 | 0x0160 -#define NT_STATUS_ILLEGAL_CHARACTER 0xC0000000 | 0x0161 -#define NT_STATUS_UNMAPPABLE_CHARACTER 0xC0000000 | 0x0162 -#define NT_STATUS_UNDEFINED_CHARACTER 0xC0000000 | 0x0163 -#define NT_STATUS_FLOPPY_VOLUME 0xC0000000 | 0x0164 -#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND 0xC0000000 | 0x0165 -#define NT_STATUS_FLOPPY_WRONG_CYLINDER 0xC0000000 | 0x0166 -#define NT_STATUS_FLOPPY_UNKNOWN_ERROR 0xC0000000 | 0x0167 -#define NT_STATUS_FLOPPY_BAD_REGISTERS 0xC0000000 | 0x0168 -#define NT_STATUS_DISK_RECALIBRATE_FAILED 0xC0000000 | 0x0169 -#define NT_STATUS_DISK_OPERATION_FAILED 0xC0000000 | 0x016a -#define NT_STATUS_DISK_RESET_FAILED 0xC0000000 | 0x016b -#define NT_STATUS_SHARED_IRQ_BUSY 0xC0000000 | 0x016c -#define NT_STATUS_FT_ORPHANING 0xC0000000 | 0x016d -#define NT_STATUS_PARTITION_FAILURE 0xC0000000 | 0x0172 -#define NT_STATUS_INVALID_BLOCK_LENGTH 0xC0000000 | 0x0173 -#define NT_STATUS_DEVICE_NOT_PARTITIONED 0xC0000000 | 0x0174 -#define NT_STATUS_UNABLE_TO_LOCK_MEDIA 0xC0000000 | 0x0175 -#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA 0xC0000000 | 0x0176 -#define NT_STATUS_EOM_OVERFLOW 0xC0000000 | 0x0177 -#define NT_STATUS_NO_MEDIA 0xC0000000 | 0x0178 -#define NT_STATUS_NO_SUCH_MEMBER 0xC0000000 | 0x017a -#define NT_STATUS_INVALID_MEMBER 0xC0000000 | 0x017b -#define NT_STATUS_KEY_DELETED 0xC0000000 | 0x017c -#define NT_STATUS_NO_LOG_SPACE 0xC0000000 | 0x017d -#define NT_STATUS_TOO_MANY_SIDS 0xC0000000 | 0x017e -#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x017f -#define NT_STATUS_KEY_HAS_CHILDREN 0xC0000000 | 0x0180 -#define NT_STATUS_CHILD_MUST_BE_VOLATILE 0xC0000000 | 0x0181 -#define NT_STATUS_DEVICE_CONFIGURATION_ERROR 0xC0000000 | 0x0182 -#define NT_STATUS_DRIVER_INTERNAL_ERROR 0xC0000000 | 0x0183 -#define NT_STATUS_INVALID_DEVICE_STATE 0xC0000000 | 0x0184 -#define NT_STATUS_IO_DEVICE_ERROR 0xC0000000 | 0x0185 -#define NT_STATUS_DEVICE_PROTOCOL_ERROR 0xC0000000 | 0x0186 -#define NT_STATUS_BACKUP_CONTROLLER 0xC0000000 | 0x0187 -#define NT_STATUS_LOG_FILE_FULL 0xC0000000 | 0x0188 -#define NT_STATUS_TOO_LATE 0xC0000000 | 0x0189 -#define NT_STATUS_NO_TRUST_LSA_SECRET 0xC0000000 | 0x018a -#define NT_STATUS_NO_TRUST_SAM_ACCOUNT 0xC0000000 | 0x018b -#define NT_STATUS_TRUSTED_DOMAIN_FAILURE 0xC0000000 | 0x018c -#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 0xC0000000 | 0x018d -#define NT_STATUS_EVENTLOG_FILE_CORRUPT 0xC0000000 | 0x018e -#define NT_STATUS_EVENTLOG_CANT_START 0xC0000000 | 0x018f -#define NT_STATUS_TRUST_FAILURE 0xC0000000 | 0x0190 -#define NT_STATUS_MUTANT_LIMIT_EXCEEDED 0xC0000000 | 0x0191 -#define NT_STATUS_NETLOGON_NOT_STARTED 0xC0000000 | 0x0192 -#define NT_STATUS_ACCOUNT_EXPIRED 0xC0000000 | 0x0193 -#define NT_STATUS_POSSIBLE_DEADLOCK 0xC0000000 | 0x0194 -#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT 0xC0000000 | 0x0195 -#define NT_STATUS_REMOTE_SESSION_LIMIT 0xC0000000 | 0x0196 -#define NT_STATUS_EVENTLOG_FILE_CHANGED 0xC0000000 | 0x0197 -#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT 0xC0000000 | 0x0198 -#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT 0xC0000000 | 0x0199 -#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT 0xC0000000 | 0x019a -#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT 0xC0000000 | 0x019b -#define NT_STATUS_FS_DRIVER_REQUIRED 0xC0000000 | 0x019c -#define NT_STATUS_NO_USER_SESSION_KEY 0xC0000000 | 0x0202 -#define NT_STATUS_USER_SESSION_DELETED 0xC0000000 | 0x0203 -#define NT_STATUS_RESOURCE_LANG_NOT_FOUND 0xC0000000 | 0x0204 -#define NT_STATUS_INSUFF_SERVER_RESOURCES 0xC0000000 | 0x0205 -#define NT_STATUS_INVALID_BUFFER_SIZE 0xC0000000 | 0x0206 -#define NT_STATUS_INVALID_ADDRESS_COMPONENT 0xC0000000 | 0x0207 -#define NT_STATUS_INVALID_ADDRESS_WILDCARD 0xC0000000 | 0x0208 -#define NT_STATUS_TOO_MANY_ADDRESSES 0xC0000000 | 0x0209 -#define NT_STATUS_ADDRESS_ALREADY_EXISTS 0xC0000000 | 0x020a -#define NT_STATUS_ADDRESS_CLOSED 0xC0000000 | 0x020b -#define NT_STATUS_CONNECTION_DISCONNECTED 0xC0000000 | 0x020c -#define NT_STATUS_CONNECTION_RESET 0xC0000000 | 0x020d -#define NT_STATUS_TOO_MANY_NODES 0xC0000000 | 0x020e -#define NT_STATUS_TRANSACTION_ABORTED 0xC0000000 | 0x020f -#define NT_STATUS_TRANSACTION_TIMED_OUT 0xC0000000 | 0x0210 -#define NT_STATUS_TRANSACTION_NO_RELEASE 0xC0000000 | 0x0211 -#define NT_STATUS_TRANSACTION_NO_MATCH 0xC0000000 | 0x0212 -#define NT_STATUS_TRANSACTION_RESPONDED 0xC0000000 | 0x0213 -#define NT_STATUS_TRANSACTION_INVALID_ID 0xC0000000 | 0x0214 -#define NT_STATUS_TRANSACTION_INVALID_TYPE 0xC0000000 | 0x0215 -#define NT_STATUS_NOT_SERVER_SESSION 0xC0000000 | 0x0216 -#define NT_STATUS_NOT_CLIENT_SESSION 0xC0000000 | 0x0217 -#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE 0xC0000000 | 0x0218 -#define NT_STATUS_DEBUG_ATTACH_FAILED 0xC0000000 | 0x0219 -#define NT_STATUS_SYSTEM_PROCESS_TERMINATED 0xC0000000 | 0x021a -#define NT_STATUS_DATA_NOT_ACCEPTED 0xC0000000 | 0x021b -#define NT_STATUS_NO_BROWSER_SERVERS_FOUND 0xC0000000 | 0x021c -#define NT_STATUS_VDM_HARD_ERROR 0xC0000000 | 0x021d -#define NT_STATUS_DRIVER_CANCEL_TIMEOUT 0xC0000000 | 0x021e -#define NT_STATUS_REPLY_MESSAGE_MISMATCH 0xC0000000 | 0x021f -#define NT_STATUS_MAPPED_ALIGNMENT 0xC0000000 | 0x0220 -#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH 0xC0000000 | 0x0221 -#define NT_STATUS_LOST_WRITEBEHIND_DATA 0xC0000000 | 0x0222 -#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID 0xC0000000 | 0x0223 -#define NT_STATUS_PASSWORD_MUST_CHANGE 0xC0000000 | 0x0224 -#define NT_STATUS_NOT_FOUND 0xC0000000 | 0x0225 -#define NT_STATUS_NOT_TINY_STREAM 0xC0000000 | 0x0226 -#define NT_STATUS_RECOVERY_FAILURE 0xC0000000 | 0x0227 -#define NT_STATUS_STACK_OVERFLOW_READ 0xC0000000 | 0x0228 -#define NT_STATUS_FAIL_CHECK 0xC0000000 | 0x0229 -#define NT_STATUS_DUPLICATE_OBJECTID 0xC0000000 | 0x022a -#define NT_STATUS_OBJECTID_EXISTS 0xC0000000 | 0x022b -#define NT_STATUS_CONVERT_TO_LARGE 0xC0000000 | 0x022c -#define NT_STATUS_RETRY 0xC0000000 | 0x022d -#define NT_STATUS_FOUND_OUT_OF_SCOPE 0xC0000000 | 0x022e -#define NT_STATUS_ALLOCATE_BUCKET 0xC0000000 | 0x022f -#define NT_STATUS_PROPSET_NOT_FOUND 0xC0000000 | 0x0230 -#define NT_STATUS_MARSHALL_OVERFLOW 0xC0000000 | 0x0231 -#define NT_STATUS_INVALID_VARIANT 0xC0000000 | 0x0232 -#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND 0xC0000000 | 0x0233 -#define NT_STATUS_ACCOUNT_LOCKED_OUT 0xC0000000 | 0x0234 -#define NT_STATUS_HANDLE_NOT_CLOSABLE 0xC0000000 | 0x0235 -#define NT_STATUS_CONNECTION_REFUSED 0xC0000000 | 0x0236 -#define NT_STATUS_GRACEFUL_DISCONNECT 0xC0000000 | 0x0237 -#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED 0xC0000000 | 0x0238 -#define NT_STATUS_ADDRESS_NOT_ASSOCIATED 0xC0000000 | 0x0239 -#define NT_STATUS_CONNECTION_INVALID 0xC0000000 | 0x023a -#define NT_STATUS_CONNECTION_ACTIVE 0xC0000000 | 0x023b -#define NT_STATUS_NETWORK_UNREACHABLE 0xC0000000 | 0x023c -#define NT_STATUS_HOST_UNREACHABLE 0xC0000000 | 0x023d -#define NT_STATUS_PROTOCOL_UNREACHABLE 0xC0000000 | 0x023e -#define NT_STATUS_PORT_UNREACHABLE 0xC0000000 | 0x023f -#define NT_STATUS_REQUEST_ABORTED 0xC0000000 | 0x0240 -#define NT_STATUS_CONNECTION_ABORTED 0xC0000000 | 0x0241 -#define NT_STATUS_BAD_COMPRESSION_BUFFER 0xC0000000 | 0x0242 -#define NT_STATUS_USER_MAPPED_FILE 0xC0000000 | 0x0243 -#define NT_STATUS_AUDIT_FAILED 0xC0000000 | 0x0244 -#define NT_STATUS_TIMER_RESOLUTION_NOT_SET 0xC0000000 | 0x0245 -#define NT_STATUS_CONNECTION_COUNT_LIMIT 0xC0000000 | 0x0246 -#define NT_STATUS_LOGIN_TIME_RESTRICTION 0xC0000000 | 0x0247 -#define NT_STATUS_LOGIN_WKSTA_RESTRICTION 0xC0000000 | 0x0248 -#define NT_STATUS_IMAGE_MP_UP_MISMATCH 0xC0000000 | 0x0249 -#define NT_STATUS_INSUFFICIENT_LOGON_INFO 0xC0000000 | 0x0250 -#define NT_STATUS_BAD_DLL_ENTRYPOINT 0xC0000000 | 0x0251 -#define NT_STATUS_BAD_SERVICE_ENTRYPOINT 0xC0000000 | 0x0252 -#define NT_STATUS_LPC_REPLY_LOST 0xC0000000 | 0x0253 -#define NT_STATUS_IP_ADDRESS_CONFLICT1 0xC0000000 | 0x0254 -#define NT_STATUS_IP_ADDRESS_CONFLICT2 0xC0000000 | 0x0255 -#define NT_STATUS_REGISTRY_QUOTA_LIMIT 0xC0000000 | 0x0256 -#define NT_STATUS_PATH_NOT_COVERED 0xC0000000 | 0x0257 -#define NT_STATUS_NO_CALLBACK_ACTIVE 0xC0000000 | 0x0258 -#define NT_STATUS_LICENSE_QUOTA_EXCEEDED 0xC0000000 | 0x0259 -#define NT_STATUS_PWD_TOO_SHORT 0xC0000000 | 0x025a -#define NT_STATUS_PWD_TOO_RECENT 0xC0000000 | 0x025b -#define NT_STATUS_PWD_HISTORY_CONFLICT 0xC0000000 | 0x025c -#define NT_STATUS_PLUGPLAY_NO_DEVICE 0xC0000000 | 0x025e -#define NT_STATUS_UNSUPPORTED_COMPRESSION 0xC0000000 | 0x025f -#define NT_STATUS_INVALID_HW_PROFILE 0xC0000000 | 0x0260 -#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH 0xC0000000 | 0x0261 -#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND 0xC0000000 | 0x0262 -#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0263 -#define NT_STATUS_RESOURCE_NOT_OWNED 0xC0000000 | 0x0264 -#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265 -#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266 -#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267 -#define NT_STATUS_NOT_A_REPARSE_POINT 0xC0000000 | 0x0275 -#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE /* scheduler */ +#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000289 +#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001) +#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002) +#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003) +#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004) +#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005) +#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006) +#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007) +#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008) +#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009) +#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a) +#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b) +#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c) +#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d) +#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e) +#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f) +#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010) +#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011) +#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012) +#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013) +#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014) +#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015) +#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016) +#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017) +#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018) +#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019) +#define NT_STATUS_UNABLE_TO_FREE_VM (0xC0000000 | 0x001a) +#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b) +#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c) +#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d) +#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e) +#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f) +#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020) +#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021) +#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022) +#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023) +#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024) +#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025) +#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026) +#define NT_STATUS_UNWIND (0xC0000000 | 0x0027) +#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028) +#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029) +#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a) +#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b) +#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c) +#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d) +#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e) +#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f) +#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030) +#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031) +#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032) +#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033) +#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034) +#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035) +#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036) +#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037) +#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038) +#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039) +#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a) +#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b) +#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c) +#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d) +#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e) +#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f) +#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040) +#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041) +#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042) +#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043) +#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044) +#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045) +#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046) +#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047) +#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048) +#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049) +#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a) +#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b) +#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c) +#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d) +#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e) +#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f) +#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050) +#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051) +#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052) +#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053) +#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054) +#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055) +#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056) +#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057) +#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058) +#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059) +#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a) +#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b) +#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c) +#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d) +#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e) +#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f) +#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060) +#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061) +#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062) +#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063) +#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064) +#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065) +#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066) +#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067) +#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068) +#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069) +#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a) +#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b) +#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c) +#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d) +#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e) +#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f) +#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070) +#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071) +#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072) +#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073) +#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074) +#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075) +#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076) +#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077) +#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078) +#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079) +#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a) +#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b) +#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c) +#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d) +#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e) +#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f) +#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080) +#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081) +#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082) +#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083) +#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084) +#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085) +#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086) +#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087) +#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088) +#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089) +#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a) +#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b) +#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c) +#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d) +#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e) +#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f) +#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090) +#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091) +#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092) +#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093) +#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094) +#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095) +#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096) +#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097) +#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098) +#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099) +#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a) +#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b) +#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c) +#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d) +#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e) +#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f) +#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0) +#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1) +#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2) +#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3) +#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4) +#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5) +#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6) +#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7) +#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8) +#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9) +#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa) +#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab) +#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac) +#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad) +#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae) +#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af) +#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0) +#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1) +#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2) +#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3) +#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4) +#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5) +#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6) +#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7) +#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8) +#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9) +#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba) +#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb) +#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc) +#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd) +#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be) +#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf) +#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0) +#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1) +#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2) +#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3) +#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4) +#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5) +#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6) +#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7) +#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8) +#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9) +#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca) +#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb) +#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc) +#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd) +#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce) +#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf) +#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0) +#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1) +#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2) +#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3) +#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4) +#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5) +#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6) +#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7) +#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8) +#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9) +#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da) +#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db) +#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc) +#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd) +#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de) +#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df) +#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0) +#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1) +#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2) +#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3) +#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4) +#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5) +#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6) +#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7) +#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8) +#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9) +#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea) +#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb) +#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec) +#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed) +#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee) +#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef) +#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0) +#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1) +#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2) +#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3) +#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4) +#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5) +#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6) +#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7) +#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8) +#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9) +#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa) +#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb) +#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc) +#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd) +#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe) +#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff) +#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101) +#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102) +#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103) +#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104) +#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105) +#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106) +#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107) +#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108) +#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109) +#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a) +#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b) +#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c) +#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d) +#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e) +#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f) +#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110) +#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111) +#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112) +#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113) +#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114) +#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115) +#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116) +#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117) +#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118) +#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119) +#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a) +#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b) +#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c) +#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d) +#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e) +#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f) +#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120) +#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121) +#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122) +#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123) +#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124) +#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125) +#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126) +#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127) +#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128) +#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129) +#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a) +#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b) +#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c) +#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d) +#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e) +#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f) +#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130) +#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131) +#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132) +#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133) +#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134) +#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135) +#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136) +#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137) +#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138) +#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139) +#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a) +#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b) +#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c) +#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d) +#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e) +#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f) +#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140) +#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141) +#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142) +#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143) +#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144) +#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145) +#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146) +#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147) +#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148) +#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149) +#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a) +#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b) +#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c) +#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d) +#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e) +#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f) +#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150) +#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151) +#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152) +#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153) +#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154) +#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155) +#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156) +#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157) +#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158) +#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159) +#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a) +#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b) +#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c) +#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d) +#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e) +#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f) +#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160) +#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161) +#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162) +#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163) +#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164) +#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165) +#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166) +#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167) +#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168) +#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169) +#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a) +#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b) +#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c) +#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d) +#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172) +#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173) +#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174) +#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175) +#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176) +#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177) +#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178) +#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a) +#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b) +#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c) +#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d) +#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e) +#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f) +#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180) +#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181) +#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182) +#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183) +#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184) +#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185) +#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186) +#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187) +#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188) +#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189) +#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a) +#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b) +#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c) +#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d) +#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e) +#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f) +#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190) +#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191) +#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192) +#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193) +#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194) +#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195) +#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196) +#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197) +#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198) +#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199) +#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a) +#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b) +#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c) +#define NT_STATUS_INVALID_LOCK_RANGE (0xC0000000 | 0x01a1) +#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202) +#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203) +#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204) +#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205) +#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206) +#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207) +#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208) +#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209) +#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a) +#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b) +#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c) +#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d) +#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e) +#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f) +#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210) +#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211) +#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212) +#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213) +#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214) +#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215) +#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216) +#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217) +#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218) +#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219) +#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a) +#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b) +#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c) +#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d) +#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e) +#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f) +#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220) +#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221) +#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222) +#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223) +#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224) +#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225) +#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226) +#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227) +#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228) +#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229) +#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a) +#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b) +#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c) +#define NT_STATUS_RETRY (0xC0000000 | 0x022d) +#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e) +#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f) +#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230) +#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231) +#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232) +#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233) +#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234) +#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235) +#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236) +#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237) +#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238) +#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239) +#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a) +#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b) +#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c) +#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d) +#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e) +#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f) +#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240) +#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241) +#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242) +#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243) +#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244) +#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245) +#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246) +#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247) +#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248) +#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249) +#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250) +#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251) +#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252) +#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253) +#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254) +#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255) +#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256) +#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257) +#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258) +#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259) +#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a) +#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b) +#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c) +#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e) +#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f) +#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260) +#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261) +#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262) +#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263) +#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264) +#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265) +#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266) +#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267) +#define NT_STATUS_NOT_A_REPARSE_POINT (0xC0000000 | 0x0275) +#define NT_STATUS_NETWORK_SESSION_EXPIRED (0xC0000000 | 0x035c) +#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE) /* scheduler */ +#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000) #endif /* _NTERR_H */ diff --git a/fs/smb/common/fscc.h b/fs/smb/common/fscc.h index 35dbacdbb9020d..0123f34db1e891 100644 --- a/fs/smb/common/fscc.h +++ b/fs/smb/common/fscc.h @@ -145,6 +145,62 @@ typedef struct { } __packed FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */ /* + * File Attributes + * See MS-FSCC 2.6 + */ +#define FILE_ATTRIBUTE_READONLY 0x00000001 +#define FILE_ATTRIBUTE_HIDDEN 0x00000002 +#define FILE_ATTRIBUTE_SYSTEM 0x00000004 +#define FILE_ATTRIBUTE_DIRECTORY 0x00000010 +#define FILE_ATTRIBUTE_ARCHIVE 0x00000020 +#define FILE_ATTRIBUTE_NORMAL 0x00000080 +#define FILE_ATTRIBUTE_TEMPORARY 0x00000100 +#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200 +#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400 +#define FILE_ATTRIBUTE_COMPRESSED 0x00000800 +#define FILE_ATTRIBUTE_OFFLINE 0x00001000 +#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000 +#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000 +#define FILE_ATTRIBUTE_INTEGRITY_STREAM 0x00008000 +#define FILE_ATTRIBUTE_NO_SCRUB_DATA 0x00020000 +#define FILE_ATTRIBUTE_MASK (FILE_ATTRIBUTE_READONLY | FILE_ATTRIBUTE_HIDDEN | \ + FILE_ATTRIBUTE_SYSTEM | FILE_ATTRIBUTE_DIRECTORY | \ + FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_NORMAL | \ + FILE_ATTRIBUTE_TEMPORARY | FILE_ATTRIBUTE_SPARSE_FILE | \ + FILE_ATTRIBUTE_REPARSE_POINT | FILE_ATTRIBUTE_COMPRESSED | \ + FILE_ATTRIBUTE_OFFLINE | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED | \ + FILE_ATTRIBUTE_ENCRYPTED | FILE_ATTRIBUTE_INTEGRITY_STREAM | \ + FILE_ATTRIBUTE_NO_SCRUB_DATA) + +#define FILE_ATTRIBUTE_READONLY_LE cpu_to_le32(FILE_ATTRIBUTE_READONLY) +#define FILE_ATTRIBUTE_HIDDEN_LE cpu_to_le32(FILE_ATTRIBUTE_HIDDEN) +#define FILE_ATTRIBUTE_SYSTEM_LE cpu_to_le32(FILE_ATTRIBUTE_SYSTEM) +#define FILE_ATTRIBUTE_DIRECTORY_LE cpu_to_le32(FILE_ATTRIBUTE_DIRECTORY) +#define FILE_ATTRIBUTE_ARCHIVE_LE cpu_to_le32(FILE_ATTRIBUTE_ARCHIVE) +#define FILE_ATTRIBUTE_NORMAL_LE cpu_to_le32(FILE_ATTRIBUTE_NORMAL) +#define FILE_ATTRIBUTE_TEMPORARY_LE cpu_to_le32(FILE_ATTRIBUTE_TEMPORARY) +#define FILE_ATTRIBUTE_SPARSE_FILE_LE cpu_to_le32(FILE_ATTRIBUTE_SPARSE_FILE) +#define FILE_ATTRIBUTE_REPARSE_POINT_LE cpu_to_le32(FILE_ATTRIBUTE_REPARSE_POINT) +#define FILE_ATTRIBUTE_COMPRESSED_LE cpu_to_le32(FILE_ATTRIBUTE_COMPRESSED) +#define FILE_ATTRIBUTE_OFFLINE_LE cpu_to_le32(FILE_ATTRIBUTE_OFFLINE) +#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE cpu_to_le32(FILE_ATTRIBUTE_NOT_CONTENT_INDEXED) +#define FILE_ATTRIBUTE_ENCRYPTED_LE cpu_to_le32(FILE_ATTRIBUTE_ENCRYPTED) +#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE cpu_to_le32(FILE_ATTRIBUTE_INTEGRITY_STREAM) +#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE cpu_to_le32(FILE_ATTRIBUTE_NO_SCRUB_DATA) +#define FILE_ATTRIBUTE_MASK_LE cpu_to_le32(FILE_ATTRIBUTE_MASK) + +/* + * Response contains array of the following structures + * See MS-FSCC 2.7.1 + */ +struct file_notify_information { + __le32 NextEntryOffset; + __le32 Action; + __le32 FileNameLength; + __u8 FileName[]; +} __packed; + +/* * See POSIX Extensions to MS-FSCC 2.3.2.1 * Link: https://gitlab.com/samba-team/smb3-posix-spec/-/blob/master/fscc_posix_extensions.md */ diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 945a8e0cf36c26..3c8d8a4e743935 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -991,6 +991,7 @@ struct smb2_set_info_rsp { /* notify completion filter flags. See MS-FSCC 2.6 and MS-SMB2 2.2.35 */ #define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001 #define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002 +#define FILE_NOTIFY_CHANGE_NAME 0x00000003 #define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004 #define FILE_NOTIFY_CHANGE_SIZE 0x00000008 #define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010 @@ -1002,7 +1003,10 @@ struct smb2_set_info_rsp { #define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400 #define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800 -/* SMB2 Notify Action Flags */ +/* + * SMB2 Notify Action Flags + * See MS-FSCC 2.7.1 + */ #define FILE_ACTION_ADDED 0x00000001 #define FILE_ACTION_REMOVED 0x00000002 #define FILE_ACTION_MODIFIED 0x00000003 @@ -1012,7 +1016,10 @@ struct smb2_set_info_rsp { #define FILE_ACTION_REMOVED_STREAM 0x00000007 #define FILE_ACTION_MODIFIED_STREAM 0x00000008 #define FILE_ACTION_REMOVED_BY_DELETE 0x00000009 +#define FILE_ACTION_ID_NOT_TUNNELLED 0x0000000A +#define FILE_ACTION_TUNNELLED_ID_COLLISION 0x0000000B +/* See MS-SMB2 2.2.35 */ struct smb2_change_notify_req { struct smb2_hdr hdr; __le16 StructureSize; @@ -1024,6 +1031,7 @@ struct smb2_change_notify_req { __u32 Reserved; } __packed; +/* See MS-SMB2 2.2.36 */ struct smb2_change_notify_rsp { struct smb2_hdr hdr; __le16 StructureSize; /* Must be 9 */ @@ -1064,41 +1072,6 @@ struct smb2_server_client_notification { #define IL_IMPERSONATION cpu_to_le32(0x00000002) #define IL_DELEGATE cpu_to_le32(0x00000003) -/* File Attributes */ -#define FILE_ATTRIBUTE_READONLY 0x00000001 -#define FILE_ATTRIBUTE_HIDDEN 0x00000002 -#define FILE_ATTRIBUTE_SYSTEM 0x00000004 -#define FILE_ATTRIBUTE_DIRECTORY 0x00000010 -#define FILE_ATTRIBUTE_ARCHIVE 0x00000020 -#define FILE_ATTRIBUTE_NORMAL 0x00000080 -#define FILE_ATTRIBUTE_TEMPORARY 0x00000100 -#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200 -#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400 -#define FILE_ATTRIBUTE_COMPRESSED 0x00000800 -#define FILE_ATTRIBUTE_OFFLINE 0x00001000 -#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000 -#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000 -#define FILE_ATTRIBUTE_INTEGRITY_STREAM 0x00008000 -#define FILE_ATTRIBUTE_NO_SCRUB_DATA 0x00020000 -#define FILE_ATTRIBUTE__MASK 0x00007FB7 - -#define FILE_ATTRIBUTE_READONLY_LE cpu_to_le32(0x00000001) -#define FILE_ATTRIBUTE_HIDDEN_LE cpu_to_le32(0x00000002) -#define FILE_ATTRIBUTE_SYSTEM_LE cpu_to_le32(0x00000004) -#define FILE_ATTRIBUTE_DIRECTORY_LE cpu_to_le32(0x00000010) -#define FILE_ATTRIBUTE_ARCHIVE_LE cpu_to_le32(0x00000020) -#define FILE_ATTRIBUTE_NORMAL_LE cpu_to_le32(0x00000080) -#define FILE_ATTRIBUTE_TEMPORARY_LE cpu_to_le32(0x00000100) -#define FILE_ATTRIBUTE_SPARSE_FILE_LE cpu_to_le32(0x00000200) -#define FILE_ATTRIBUTE_REPARSE_POINT_LE cpu_to_le32(0x00000400) -#define FILE_ATTRIBUTE_COMPRESSED_LE cpu_to_le32(0x00000800) -#define FILE_ATTRIBUTE_OFFLINE_LE cpu_to_le32(0x00001000) -#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE cpu_to_le32(0x00002000) -#define FILE_ATTRIBUTE_ENCRYPTED_LE cpu_to_le32(0x00004000) -#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE cpu_to_le32(0x00008000) -#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000) -#define FILE_ATTRIBUTE_MASK_LE cpu_to_le32(0x00007FB7) - /* Desired Access Flags */ #define FILE_READ_DATA_LE cpu_to_le32(0x00000001) #define FILE_LIST_DIRECTORY_LE cpu_to_le32(0x00000001) @@ -1537,9 +1510,10 @@ struct duplicate_extents_to_file { __le64 ByteCount; /* Bytes to be copied */ } __packed; -/* See MS-FSCC 2.3.8 */ +/* See MS-FSCC 2.3.9 */ #define DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC 0x00000001 struct duplicate_extents_to_file_ex { + __le64 StructureSize; /* MUST be set to 0x30 */ __u64 PersistentFileHandle; /* source file handle, opaque endianness */ __u64 VolatileFileHandle; __le64 SourceFileOffset; diff --git a/fs/smb/server/nterr.h b/fs/smb/server/nterr.h deleted file mode 100644 index 2f358f88a01883..00000000000000 --- a/fs/smb/server/nterr.h +++ /dev/null @@ -1,543 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Unix SMB/Netbios implementation. - * Version 1.9. - * NT error code constants - * Copyright (C) Andrew Tridgell 1992-2000 - * Copyright (C) John H Terpstra 1996-2000 - * Copyright (C) Luke Kenneth Casson Leighton 1996-2000 - * Copyright (C) Paul Ashton 1998-2000 - */ - -#ifndef _NTERR_H -#define _NTERR_H - -/* Win32 Status codes. */ -#define NT_STATUS_MORE_ENTRIES 0x0105 -#define NT_ERROR_INVALID_PARAMETER 0x0057 -#define NT_ERROR_INSUFFICIENT_BUFFER 0x007a -#define NT_STATUS_1804 0x070c -#define NT_STATUS_NOTIFY_ENUM_DIR 0x010c -#define NT_STATUS_INVALID_LOCK_RANGE (0xC0000000 | 0x01a1) -/* - * Win32 Error codes extracted using a loop in smbclient then printing a netmon - * sniff to a file. - */ - -#define NT_STATUS_OK 0x0000 -#define NT_STATUS_SOME_UNMAPPED 0x0107 -#define NT_STATUS_BUFFER_OVERFLOW 0x80000005 -#define NT_STATUS_NO_MORE_ENTRIES 0x8000001a -#define NT_STATUS_MEDIA_CHANGED 0x8000001c -#define NT_STATUS_END_OF_MEDIA 0x8000001e -#define NT_STATUS_MEDIA_CHECK 0x80000020 -#define NT_STATUS_NO_DATA_DETECTED 0x8000001c -#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d -#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288 -#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288 -#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001) -#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002) -#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003) -#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004) -#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005) -#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006) -#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007) -#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008) -#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009) -#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a) -#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b) -#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c) -#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d) -#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e) -#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f) -#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010) -#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011) -#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012) -#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013) -#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014) -#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015) -#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016) -#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017) -#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018) -#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019) -#define NT_STATUS_UNABLE_TO_FREE_VM (0x80000000 | 0x001a) -#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b) -#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c) -#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d) -#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e) -#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f) -#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020) -#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021) -#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022) -#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023) -#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024) -#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025) -#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026) -#define NT_STATUS_UNWIND (0xC0000000 | 0x0027) -#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028) -#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029) -#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a) -#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b) -#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c) -#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d) -#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e) -#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f) -#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030) -#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031) -#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032) -#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033) -#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034) -#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035) -#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036) -#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037) -#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038) -#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039) -#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a) -#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b) -#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c) -#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d) -#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e) -#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f) -#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040) -#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041) -#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042) -#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043) -#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044) -#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045) -#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046) -#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047) -#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048) -#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049) -#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a) -#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b) -#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c) -#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d) -#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e) -#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f) -#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050) -#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051) -#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052) -#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053) -#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054) -#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055) -#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056) -#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057) -#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058) -#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059) -#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a) -#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b) -#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c) -#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d) -#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e) -#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f) -#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060) -#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061) -#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062) -#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063) -#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064) -#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065) -#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066) -#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067) -#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068) -#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069) -#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a) -#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b) -#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c) -#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d) -#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e) -#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f) -#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070) -#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071) -#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072) -#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073) -#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074) -#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075) -#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076) -#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077) -#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078) -#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079) -#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a) -#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b) -#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c) -#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d) -#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e) -#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f) -#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080) -#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081) -#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082) -#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083) -#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084) -#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085) -#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086) -#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087) -#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088) -#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089) -#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a) -#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b) -#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c) -#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d) -#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e) -#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f) -#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090) -#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091) -#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092) -#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093) -#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094) -#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095) -#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096) -#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097) -#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098) -#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099) -#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a) -#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b) -#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c) -#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d) -#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e) -#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f) -#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0) -#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1) -#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2) -#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3) -#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4) -#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5) -#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6) -#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7) -#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8) -#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9) -#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa) -#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab) -#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac) -#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad) -#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae) -#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af) -#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0) -#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1) -#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2) -#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3) -#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4) -#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5) -#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6) -#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7) -#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8) -#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9) -#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba) -#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb) -#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc) -#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd) -#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be) -#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf) -#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0) -#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1) -#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2) -#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3) -#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4) -#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5) -#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6) -#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7) -#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8) -#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9) -#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca) -#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb) -#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc) -#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd) -#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce) -#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf) -#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0) -#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1) -#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2) -#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3) -#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4) -#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5) -#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6) -#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7) -#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8) -#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9) -#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da) -#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db) -#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc) -#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd) -#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de) -#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df) -#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0) -#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1) -#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2) -#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3) -#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4) -#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5) -#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6) -#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7) -#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8) -#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9) -#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea) -#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb) -#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec) -#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed) -#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee) -#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef) -#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0) -#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1) -#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2) -#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3) -#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4) -#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5) -#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6) -#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7) -#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8) -#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9) -#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa) -#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb) -#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc) -#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd) -#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe) -#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff) -#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101) -#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102) -#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103) -#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104) -#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105) -#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106) -#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107) -#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108) -#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109) -#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a) -#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b) -#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c) -#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d) -#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e) -#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f) -#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110) -#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111) -#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112) -#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113) -#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114) -#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115) -#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116) -#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117) -#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118) -#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119) -#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a) -#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b) -#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c) -#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d) -#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e) -#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f) -#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120) -#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121) -#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122) -#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123) -#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124) -#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125) -#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126) -#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127) -#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128) -#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129) -#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a) -#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b) -#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c) -#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d) -#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e) -#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f) -#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130) -#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131) -#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132) -#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133) -#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134) -#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135) -#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136) -#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137) -#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138) -#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139) -#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a) -#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b) -#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c) -#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d) -#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e) -#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f) -#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140) -#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141) -#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142) -#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143) -#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144) -#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145) -#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146) -#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147) -#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148) -#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149) -#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a) -#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b) -#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c) -#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d) -#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e) -#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f) -#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150) -#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151) -#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152) -#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153) -#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154) -#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155) -#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156) -#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157) -#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158) -#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159) -#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a) -#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b) -#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c) -#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d) -#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e) -#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f) -#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160) -#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161) -#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162) -#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163) -#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164) -#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165) -#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166) -#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167) -#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168) -#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169) -#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a) -#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b) -#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c) -#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d) -#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172) -#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173) -#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174) -#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175) -#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176) -#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177) -#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178) -#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a) -#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b) -#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c) -#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d) -#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e) -#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f) -#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180) -#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181) -#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182) -#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183) -#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184) -#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185) -#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186) -#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187) -#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188) -#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189) -#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a) -#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b) -#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c) -#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d) -#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e) -#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f) -#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190) -#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191) -#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192) -#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193) -#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194) -#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195) -#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196) -#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197) -#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198) -#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199) -#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a) -#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b) -#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c) -#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202) -#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203) -#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204) -#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205) -#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206) -#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207) -#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208) -#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209) -#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a) -#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b) -#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c) -#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d) -#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e) -#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f) -#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210) -#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211) -#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212) -#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213) -#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214) -#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215) -#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216) -#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217) -#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218) -#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219) -#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a) -#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b) -#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c) -#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d) -#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e) -#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f) -#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220) -#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221) -#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222) -#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223) -#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224) -#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225) -#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226) -#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227) -#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228) -#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229) -#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a) -#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b) -#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c) -#define NT_STATUS_RETRY (0xC0000000 | 0x022d) -#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e) -#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f) -#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230) -#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231) -#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232) -#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233) -#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234) -#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235) -#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236) -#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237) -#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238) -#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239) -#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a) -#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b) -#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c) -#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d) -#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e) -#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f) -#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240) -#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241) -#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242) -#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243) -#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244) -#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245) -#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246) -#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247) -#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248) -#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249) -#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250) -#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251) -#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252) -#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253) -#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254) -#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255) -#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256) -#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257) -#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258) -#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259) -#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a) -#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b) -#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c) -#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e) -#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f) -#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260) -#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261) -#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262) -#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263) -#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264) -#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265) -#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266) -#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267) -#define NT_STATUS_NETWORK_SESSION_EXPIRED (0xC0000000 | 0x035c) -#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE) /* scheduler */ -#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000) -#define NT_STATUS_PENDING 0x00000103 -#endif /* _NTERR_H */ diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c index 67a2d7a793f6ed..a1ddca21c47bf9 100644 --- a/fs/smb/server/smb2misc.c +++ b/fs/smb/server/smb2misc.c @@ -5,7 +5,6 @@ */ #include "glob.h" -#include "nterr.h" #include "smb_common.h" #include "../common/smb2status.h" #include "mgmt/user_session.h" diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f3184b21757577..27f87a13f20a75 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -5497,6 +5497,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work, info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS); info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen); + /* + * some application(potableapp) can not run on ksmbd share + * because only NTFS handle security setting on windows. + * So Although local fs(EXT4 or F2fs, etc) is not NTFS, + * ksmbd should show share as NTFS. Later, If needed, we can add + * fs type(s) parameter to change fs type user wanted. + */ len = smbConvertToUTF16((__le16 *)info->FileSystemName, "NTFS", PATH_MAX, conn->local_nls, 0); len = len * 2; diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h index 2baf4aa330ebbe..067b45048c732b 100644 --- a/fs/smb/server/smb_common.h +++ b/fs/smb/server/smb_common.h @@ -3,13 +3,12 @@ * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ -#ifndef __SMB_COMMON_H__ -#define __SMB_COMMON_H__ +#ifndef __SMB_SERVER_COMMON_H__ +#define __SMB_SERVER_COMMON_H__ #include <linux/kernel.h> #include "glob.h" -#include "nterr.h" #include "../common/smbglob.h" #include "../common/smb2pdu.h" #include "../common/fscc.h" @@ -203,4 +202,4 @@ unsigned int ksmbd_server_side_copy_max_chunk_size(void); unsigned int ksmbd_server_side_copy_max_total_size(void); bool is_asterisk(char *p); __le32 smb_map_generic_desired_access(__le32 daccess); -#endif /* __SMB_COMMON_H__ */ +#endif /* __SMB_SERVER_COMMON_H__ */ diff --git a/include/linux/args.h b/include/linux/args.h index 2e8e65d975c778..0562dc51435e2d 100644 --- a/include/linux/args.h +++ b/include/linux/args.h @@ -6,9 +6,9 @@ /* * How do these macros work? * - * In __COUNT_ARGS() _0 to _12 are just placeholders from the start + * In __COUNT_ARGS() _0 to _15 are just placeholders from the start * in order to make sure _n is positioned over the correct number - * from 12 to 0 (depending on X, which is a variadic argument list). + * from 15 to 0 (depending on X, which is a variadic argument list). * They serve no purpose other than occupying a position. Since each * macro parameter must have a distinct identifier, those identifiers * are as good as any. diff --git a/include/linux/file.h b/include/linux/file.h index cf389fde9bc28c..27484b444d3155 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -161,12 +161,10 @@ typedef struct fd_prepare class_fd_prepare_t; /* Do not use directly. */ static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf) { - if (unlikely(fdf->err)) { - if (likely(fdf->__fd >= 0)) - put_unused_fd(fdf->__fd); - if (unlikely(!IS_ERR_OR_NULL(fdf->__file))) - fput(fdf->__file); - } + if (unlikely(fdf->__fd >= 0)) + put_unused_fd(fdf->__fd); + if (unlikely(!IS_ERR_OR_NULL(fdf->__file))) + fput(fdf->__file); } /* Do not use directly. */ @@ -230,7 +228,8 @@ static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf) VFS_WARN_ON_ONCE(fdp->__fd < 0); \ VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \ fd_install(fdp->__fd, fdp->__file); \ - fdp->__fd; \ + retain_and_null_ptr(fdp->__file); \ + take_fd(fdp->__fd); \ }) /* Do not use directly. */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ae7f21aad0ac35..a4d9f964dfdea9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -369,14 +369,13 @@ enum split_type { SPLIT_TYPE_NON_UNIFORM, }; -bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); int folio_split_unmapped(struct folio *folio, unsigned int new_order); -int min_order_for_split(struct folio *folio); +unsigned int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); -bool folio_split_supported(struct folio *folio, unsigned int new_order, - enum split_type split_type, bool warns); +int folio_check_splittable(struct folio *folio, unsigned int new_order, + enum split_type split_type); int folio_split(struct folio *folio, unsigned int new_order, struct page *page, struct list_head *list); @@ -407,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o static inline int try_folio_split_to_order(struct folio *folio, struct page *page, unsigned int new_order) { - if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false)) + if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM)) return split_huge_page_to_order(&folio->page, new_order); return folio_split(folio, new_order, page, NULL); } @@ -631,10 +630,10 @@ static inline int split_huge_page(struct page *page) return -EINVAL; } -static inline int min_order_for_split(struct folio *folio) +static inline unsigned int min_order_for_split(struct folio *folio) { VM_WARN_ON_ONCE_FOLIO(1, folio); - return -EINVAL; + return 0; } static inline int split_folio_to_list(struct folio *folio, struct list_head *list) diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 2fd850f4678b22..58d01ed4cce759 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -417,12 +417,8 @@ struct i3c_bus { * all CCC commands are supported. * @send_ccc_cmd: send a CCC command * This method is mandatory. - * @priv_xfers: do one or several private I3C SDR transfers - * This method is mandatory when i3c_xfers is not implemented. It - * is deprecated. - * @i3c_xfers: do one or several I3C SDR or HDR transfers - * This method is mandatory when priv_xfers is not implemented but - * should be implemented instead of priv_xfers. + * @i3c_xfers: do one or several I3C SDR or HDR transfers. + * This method is mandatory. * @attach_i2c_dev: called every time an I2C device is attached to the bus. * This is a good place to attach master controller specific * data to I2C devices. @@ -478,10 +474,6 @@ struct i3c_master_controller_ops { const struct i3c_ccc_cmd *cmd); int (*send_ccc_cmd)(struct i3c_master_controller *master, struct i3c_ccc_cmd *cmd); - /* Deprecated, please use i3c_xfers() */ - int (*priv_xfers)(struct i3c_dev_desc *dev, - struct i3c_priv_xfer *xfers, - int nxfers); int (*i3c_xfers)(struct i3c_dev_desc *dev, struct i3c_xfer *xfers, int nxfers, enum i3c_xfer_mode mode); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 952d3c8dd6b7a2..62f81bbeb4906b 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -730,22 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig } #endif -static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, - const struct irq_domain_ops *ops, - void *host_data) -{ - struct irq_domain_info info = { - .fwnode = of_fwnode_handle(of_node), - .hwirq_max = ~0U, - .ops = ops, - .host_data = host_data, - }; - struct irq_domain *d; - - d = irq_domain_instantiate(&info); - return IS_ERR(d) ? NULL : d; -} - static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, diff --git a/include/linux/mm.h b/include/linux/mm.h index 7a1819c20643bb..15076261d0c2eb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -438,7 +438,7 @@ enum { #define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) #define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) #define VM_STACK INIT_VM_FLAG(STACK) -#ifdef CONFIG_STACK_GROWS_UP +#ifdef CONFIG_STACK_GROWSUP #define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) #else #define VM_STACK_EARLY VM_NONE diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4398e027f450ec..75ef7c9f9307ff 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -2289,7 +2289,7 @@ void sparse_init(void); #else #define sparse_init() do {} while (0) #define sparse_index_init(_sec, _nid) do {} while (0) -#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0) +#define sparse_vmemmap_init_nid_early(_nid) do {} while (0) #define sparse_vmemmap_init_nid_late(_nid) do {} while (0) #define pfn_in_present_section pfn_valid #define subsection_map_init(_pfn, _nr_pages) do {} while (0) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c585939b6cd60f..a6624edb7226a1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -344,6 +344,7 @@ struct nfs4_copy_state { #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ #define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ +#define NFS_INO_REQ_DIR_DELEG (13) /* Request a directory delegation */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d30c0245031c07..c58b870f31eeaf 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -172,6 +172,11 @@ struct nfs_server { #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 + unsigned int automount_inherit; /* Properties inherited by automount */ +#define NFS_AUTOMOUNT_INHERIT_BSIZE 0x0001 +#define NFS_AUTOMOUNT_INHERIT_RSIZE 0x0002 +#define NFS_AUTOMOUNT_INHERIT_WSIZE 0x0004 + unsigned int caps; /* server capabilities */ __u64 fattr_valid; /* Valid attributes */ unsigned int rsize; /* read size */ @@ -305,6 +310,7 @@ struct nfs_server { #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) #define NFS_CAP_OFFLOAD_STATUS (1U << 9) #define NFS_CAP_ZERO_RANGE (1U << 10) +#define NFS_CAP_DIR_DELEG (1U << 11) #define NFS_CAP_OPEN_XOR (1U << 12) #define NFS_CAP_DELEGTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 31463286402fd2..79fe2dfb470fc6 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1092,12 +1092,19 @@ struct nfs4_getattr_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; + bool get_dir_deleg; +}; + +struct nfs4_gdd_res { + u32 status; + nfs4_stateid deleg; }; struct nfs4_getattr_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; + struct nfs4_gdd_res * gdd_res; }; struct nfs4_link_arg { @@ -1801,7 +1808,8 @@ struct nfs_rpc_ops { int (*unlink_done) (struct rpc_task *, struct inode *); void (*rename_setup) (struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry); + struct dentry *new_dentry, + struct inode *same_parent); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); diff --git a/include/linux/of.h b/include/linux/of.h index 01bb3affcd4977..9bbdcf25a2b448 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -316,6 +316,9 @@ extern struct property *of_find_property(const struct device_node *np, extern bool of_property_read_bool(const struct device_node *np, const char *propname); extern int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size); +extern int of_property_read_u8_index(const struct device_node *np, + const char *propname, + u32 index, u8 *out_value); extern int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value); @@ -648,6 +651,12 @@ static inline int of_property_count_elems_of_size(const struct device_node *np, return -ENOSYS; } +static inline int of_property_read_u8_index(const struct device_node *np, + const char *propname, u32 index, u8 *out_value) +{ + return -ENOSYS; +} + static inline int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value) { diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h index c92167ff8a7fbd..a36b472627dec8 100644 --- a/include/linux/rseq_entry.h +++ b/include/linux/rseq_entry.h @@ -596,7 +596,7 @@ static __always_inline void rseq_exit_to_user_mode_legacy(void) void __rseq_debug_syscall_return(struct pt_regs *regs); -static inline void rseq_debug_syscall_return(struct pt_regs *regs) +static __always_inline void rseq_debug_syscall_return(struct pt_regs *regs) { if (static_branch_unlikely(&rseq_debug_enabled)) __rseq_debug_syscall_return(regs); diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h index 01da4582db6d08..8ec0ebfaef04fa 100644 --- a/include/linux/rtc/ds1685.h +++ b/include/linux/rtc/ds1685.h @@ -324,7 +324,6 @@ struct ds1685_rtc_platform_data { #define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */ #define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */ #define RTC_SQW_32768HZ 32768 /* 1 - - - - */ -#define RTC_MAX_USER_FREQ 8192 /* diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index f22bf915dcf6e6..98939cb664cfa8 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -25,12 +25,14 @@ void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task, void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); +void xprt_enqueue_bc_request(struct rpc_rqst *req); /* Socket backchannel transport methods */ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); +void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv); /* * Determine if a shared backchannel is in use @@ -68,5 +70,10 @@ static inline void set_bc_enabled(struct svc_serv *serv) static inline void xprt_free_bc_request(struct rpc_rqst *req) { } + +static inline void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv) +{ + svc_destroy(serv); +} #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* _LINUX_SUNRPC_BC_XPRT_H */ diff --git a/include/sound/hda-sdw-bpt.h b/include/sound/hda-sdw-bpt.h index f649549b75d52d..9b654c31829ad6 100644 --- a/include/sound/hda-sdw-bpt.h +++ b/include/sound/hda-sdw-bpt.h @@ -30,6 +30,8 @@ int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream, int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream, struct snd_dma_buffer *dmab_tx_bdl, struct hdac_ext_stream *bpt_rx_stream, struct snd_dma_buffer *dmab_rx_bdl); + +unsigned int hda_sdw_bpt_get_buf_size_alignment(unsigned int dma_bandwidth); #else static inline int hda_sdw_bpt_open(struct device *dev, int link_id, struct hdac_ext_stream **bpt_tx_stream, @@ -64,6 +66,11 @@ static inline int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream * WARN_ONCE(1, "SoundWire BPT is disabled"); return -EOPNOTSUPP; } + +static inline unsigned int hda_sdw_bpt_get_buf_size_alignment(unsigned int dma_bandwidth) +{ + return 0; +} #endif #endif /* __HDA_SDW_BPT_H */ diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 4e0c1d8af09f76..f11bfc6b9f428a 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -380,6 +380,9 @@ struct hdac_bus { /* factor used to derive STRIPE control value */ unsigned int sdo_limit; + + /* address offset between host and hadc */ + dma_addr_t addr_offset; }; int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, diff --git a/include/trace/events/ceph.h b/include/trace/events/ceph.h new file mode 100644 index 00000000000000..08cb0659fbfc78 --- /dev/null +++ b/include/trace/events/ceph.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Ceph filesystem support module tracepoints + * + * Copyright (C) 2025 IONOS SE. All Rights Reserved. + * Written by Max Kellermann (max.kellermann@ionos.com) + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ceph + +#if !defined(_TRACE_CEPH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CEPH_H + +#include <linux/tracepoint.h> + +#define ceph_mdsc_suspend_reasons \ + EM(ceph_mdsc_suspend_reason_no_mdsmap, "no-mdsmap") \ + EM(ceph_mdsc_suspend_reason_no_active_mds, "no-active-mds") \ + EM(ceph_mdsc_suspend_reason_rejected, "rejected") \ + E_(ceph_mdsc_suspend_reason_session, "session") + +#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +#undef EM +#undef E_ +#define EM(a, b) a, +#define E_(a, b) a + +enum ceph_mdsc_suspend_reason { ceph_mdsc_suspend_reasons } __mode(byte); + +#endif + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +ceph_mdsc_suspend_reasons; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + +TRACE_EVENT(ceph_mdsc_submit_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(u64, ino) + __field(u64, snap) + ), + + TP_fast_assign( + struct inode *inode; + + __entry->tid = req->r_tid; + __entry->op = req->r_op; + + inode = req->r_inode; + if (inode == NULL && req->r_dentry) + inode = d_inode(req->r_dentry); + + if (inode) { + __entry->ino = ceph_ino(inode); + __entry->snap = ceph_snap(inode); + } else { + __entry->ino = __entry->snap = 0; + } + ), + + TP_printk("R=%llu op=%s ino=%llx,%llx", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->ino, __entry->snap) +); + +TRACE_EVENT(ceph_mdsc_suspend_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_mds_request *req, + enum ceph_mdsc_suspend_reason reason), + + TP_ARGS(mdsc, session, req, reason), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, mds) + __field(enum ceph_mdsc_suspend_reason, reason) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->mds = session ? session->s_mds : -1; + __entry->reason = reason; + ), + + TP_printk("R=%llu op=%s reason=%s", + __entry->tid, + ceph_mds_op_name(__entry->op), + __print_symbolic(__entry->reason, ceph_mdsc_suspend_reasons)) +); + +TRACE_EVENT(ceph_mdsc_resume_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + ), + + TP_printk("R=%llu op=%s", + __entry->tid, + ceph_mds_op_name(__entry->op)) +); + +TRACE_EVENT(ceph_mdsc_send_request, + TP_PROTO(struct ceph_mds_session *session, + struct ceph_mds_request *req), + + TP_ARGS(session, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, mds) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->mds = session->s_mds; + ), + + TP_printk("R=%llu op=%s mds=%d", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->mds) +); + +TRACE_EVENT(ceph_mdsc_complete_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, err) + __field(unsigned long, latency_ns) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->err = req->r_err; + __entry->latency_ns = req->r_end_latency - req->r_start_latency; + ), + + TP_printk("R=%llu op=%s err=%d latency_ns=%lu", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->err, + __entry->latency_ns) +); + +TRACE_EVENT(ceph_handle_caps, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + int op, + const struct ceph_vino *vino, + struct ceph_inode_info *inode, + u32 seq, u32 mseq, u32 issue_seq), + + TP_ARGS(mdsc, session, op, vino, inode, seq, mseq, issue_seq), + + TP_STRUCT__entry( + __field(int, mds) + __field(int, op) + __field(u64, ino) + __field(u64, snap) + __field(u32, seq) + __field(u32, mseq) + __field(u32, issue_seq) + ), + + TP_fast_assign( + __entry->mds = session->s_mds; + __entry->op = op; + __entry->ino = vino->ino; + __entry->snap = vino->snap; + __entry->seq = seq; + __entry->mseq = mseq; + __entry->issue_seq = issue_seq; + ), + + TP_printk("mds=%d op=%s vino=%llx.%llx seq=%u iseq=%u mseq=%u", + __entry->mds, + ceph_cap_op_name(__entry->op), + __entry->ino, + __entry->snap, + __entry->seq, + __entry->issue_seq, + __entry->mseq) +); + +#undef EM +#undef E_ +#endif /* _TRACE_CEPH_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 5a049eeaeccea5..d3ce75ba938a84 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -60,7 +60,7 @@ struct snd_cea_861_aud_if { unsigned char db2_sf_ss; /* sample frequency and size */ unsigned char db3; /* not used, all zeros */ unsigned char db4_ca; /* channel allocation code */ - unsigned char db5_dminh_lsv; /* downmix inhibit & level-shit values */ + unsigned char db5_dminh_lsv; /* downmix inhibit & level-shift values */ }; /**************************************************************************** diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 5d130c57843532..6cb24cdf8e6846 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2536,6 +2536,9 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer) goto out_wake; } + /* any generated CQE posted past this time should wake us up */ + iowq->cq_tail = iowq->cq_min_tail; + hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup); hrtimer_set_expires(timer, iowq->timeout); return HRTIMER_RESTART; diff --git a/kernel/cpu.c b/kernel/cpu.c index b674fdf96208be..8df2d773fe3b52 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -249,6 +249,14 @@ err: return ret; } +/* + * The former STARTING/DYING states, ran with IRQs disabled and must not fail. + */ +static bool cpuhp_is_atomic_state(enum cpuhp_state state) +{ + return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; +} + #ifdef CONFIG_SMP static bool cpuhp_is_ap_state(enum cpuhp_state state) { @@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) complete(done); } -/* - * The former STARTING/DYING states, ran with IRQs disabled and must not fail. - */ -static bool cpuhp_is_atomic_state(enum cpuhp_state state) -{ - return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; -} - /* Synchronization state management */ enum cpuhp_sync_state { SYNC_STATE_DEAD, @@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, else ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #else - ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + if (cpuhp_is_atomic_state(state)) { + guard(irqsave)(); + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + /* STARTING/DYING must not fail! */ + WARN_ON_ONCE(ret); + } else { + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + } #endif BUG_ON(ret && !bringup); return ret; diff --git a/kernel/events/core.c b/kernel/events/core.c index ece716879cbc24..dad0d3d2e85f7d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2317,8 +2317,6 @@ out: perf_event__header_size(leader); } -static void sync_child_event(struct perf_event *child_event); - static void perf_child_detach(struct perf_event *event) { struct perf_event *parent_event = event->parent; @@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event) lockdep_assert_held(&parent_event->child_mutex); */ - sync_child_event(event); list_del_init(&event->child_list); } @@ -4588,6 +4585,7 @@ out: static void perf_remove_from_owner(struct perf_event *event); static void perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx, + struct task_struct *task, bool revoke); /* @@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx) modified = true; - perf_event_exit_event(event, ctx, false); + perf_event_exit_event(event, ctx, ctx->task, false); } raw_spin_lock_irqsave(&ctx->lock, flags); @@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event, /* * De-schedule the event and mark it REVOKED. */ - perf_event_exit_event(event, ctx, true); + perf_event_exit_event(event, ctx, ctx->task, true); /* * All _free_event() bits that rely on event->pmu: @@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); -static void sync_child_event(struct perf_event *child_event) +static void sync_child_event(struct perf_event *child_event, + struct task_struct *task) { struct perf_event *parent_event = child_event->parent; u64 child_val; if (child_event->attr.inherit_stat) { - struct task_struct *task = child_event->ctx->task; - if (task && task != TASK_TOMBSTONE) perf_event_read_event(child_event, task); } @@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event) static void perf_event_exit_event(struct perf_event *event, - struct perf_event_context *ctx, bool revoke) + struct perf_event_context *ctx, + struct task_struct *task, + bool revoke) { struct perf_event *parent_event = event->parent; unsigned long detach_flags = DETACH_EXIT; @@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event, mutex_lock(&parent_event->child_mutex); /* PERF_ATTACH_ITRACE might be set concurrently */ attach_state = READ_ONCE(event->attach_state); + + if (attach_state & PERF_ATTACH_CHILD) + sync_child_event(event, task); } if (revoke) @@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit) perf_event_task(task, ctx, 0); list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry) - perf_event_exit_event(child_event, ctx, false); + perf_event_exit_event(child_event, ctx, exit ? task : NULL, false); mutex_unlock(&ctx->mutex); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f11ceb8be8c419..d546d32390a81d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -79,7 +79,7 @@ struct uprobe { * The generic code assumes that it has two members of unknown type * owned by the arch-specific code: * - * insn - copy_insn() saves the original instruction here for + * insn - copy_insn() saves the original instruction here for * arch_uprobe_analyze_insn(). * * ixol - potentially modified instruction to execute out of @@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list); * allocated. */ struct xol_area { - wait_queue_head_t wq; /* if all slots are busy */ - unsigned long *bitmap; /* 0 = free slot */ + wait_queue_head_t wq; /* if all slots are busy */ + unsigned long *bitmap; /* 0 = free slot */ struct page *page; /* @@ -116,7 +116,7 @@ struct xol_area { * itself. The probed process or a naughty kernel module could make * the vma go away, and we must handle that reasonably gracefully. */ - unsigned long vaddr; /* Page(s) of instruction slots */ + unsigned long vaddr; /* Page(s) of instruction slots */ }; static void uprobe_warn(struct task_struct *t, const char *msg) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0bb29316b4362c..8b1b4c8a4f54c5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -2470,6 +2470,9 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act) if (retval < 0) return retval; + if (!act->affinity) + act->affinity = cpu_online_mask; + retval = __setup_irq(irq, desc, act); if (retval) diff --git a/kernel/liveupdate/Kconfig b/kernel/liveupdate/Kconfig index 9b2515f31afb4b..d2aeaf13c3acb8 100644 --- a/kernel/liveupdate/Kconfig +++ b/kernel/liveupdate/Kconfig @@ -54,6 +54,7 @@ config KEXEC_HANDOVER_ENABLE_DEFAULT config LIVEUPDATE bool "Live Update Orchestrator" depends on KEXEC_HANDOVER + depends on SHMEM help Enable the Live Update Orchestrator. Live Update is a mechanism, typically based on kexec, that allows the kernel to be updated diff --git a/kernel/liveupdate/luo_core.c b/kernel/liveupdate/luo_core.c index f7ecaf7740d19d..944663d99dd9f3 100644 --- a/kernel/liveupdate/luo_core.c +++ b/kernel/liveupdate/luo_core.c @@ -399,10 +399,8 @@ static long luo_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) int err; nr = _IOC_NR(cmd); - if (nr < LIVEUPDATE_CMD_BASE || - (nr - LIVEUPDATE_CMD_BASE) >= ARRAY_SIZE(luo_ioctl_ops)) { + if (nr - LIVEUPDATE_CMD_BASE >= ARRAY_SIZE(luo_ioctl_ops)) return -EINVAL; - } ucmd.ubuffer = (void __user *)arg; err = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c index ddff87917b217c..a32a777f6df8f3 100644 --- a/kernel/liveupdate/luo_file.c +++ b/kernel/liveupdate/luo_file.c @@ -554,17 +554,20 @@ int luo_retrieve_file(struct luo_file_set *file_set, u64 token, { struct liveupdate_file_op_args args = {0}; struct luo_file *luo_file; + bool found = false; int err; if (list_empty(&file_set->files_list)) return -ENOENT; list_for_each_entry(luo_file, &file_set->files_list, list) { - if (luo_file->token == token) + if (luo_file->token == token) { + found = true; break; + } } - if (luo_file->token != token) + if (!found) return -ENOENT; guard(mutex)(&luo_file->mutex); diff --git a/lib/bug.c b/lib/bug.c index edd9041f89f3aa..623c467a8b76c7 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -173,6 +173,9 @@ struct bug_entry *find_bug(unsigned long bugaddr) return module_find_bug(bugaddr); } +__diag_push(); +__diag_ignore(GCC, all, "-Wsuggest-attribute=format", + "Not a valid __printf() conversion candidate."); static void __warn_printf(const char *fmt, struct pt_regs *regs) { if (!fmt) @@ -192,6 +195,7 @@ static void __warn_printf(const char *fmt, struct pt_regs *regs) printk("%s", fmt); } +__diag_pop(); static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs) { @@ -262,7 +266,7 @@ enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs) bool rcu = false; rcu = warn_rcu_enter(); - ret = __report_bug(bug, 0, regs); + ret = __report_bug(bug, bug_addr(bug), regs); warn_rcu_exit(rcu); return ret; diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index a3647352bff600..6871a41e5069f7 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -61,7 +61,8 @@ config CRYPTO_LIB_CHACHA_ARCH default y if ARM64 && KERNEL_MODE_NEON default y if MIPS && CPU_MIPS32_R2 default y if PPC64 && CPU_LITTLE_ENDIAN && VSX - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if X86_64 @@ -184,7 +185,8 @@ config CRYPTO_LIB_SHA256_ARCH default y if ARM64 default y if MIPS && CPU_CAVIUM_OCTEON default y if PPC && SPE - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if SPARC64 default y if X86_64 @@ -202,7 +204,8 @@ config CRYPTO_LIB_SHA512_ARCH default y if ARM && !CPU_V7M default y if ARM64 default y if MIPS && CPU_CAVIUM_OCTEON - default y if RISCV && 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO + default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \ + RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS default y if S390 default y if SPARC64 default y if X86_64 diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index b5346cebbb5519..330ab65b29c409 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o obj-$(CONFIG_CRYPTO_LIB_BLAKE2B) += libblake2b.o libblake2b-y := blake2b.o -CFLAGS_blake2b.o := -Wframe-larger-than=4096 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105930 ifeq ($(CONFIG_CRYPTO_LIB_BLAKE2B_ARCH),y) CFLAGS_blake2b.o += -I$(src)/$(SRCARCH) libblake2b-$(CONFIG_ARM) += arm/blake2b-neon-core.o diff --git a/lib/crypto/blake2b.c b/lib/crypto/blake2b.c index 09c6d65d8a6e63..581b7f8486fae8 100644 --- a/lib/crypto/blake2b.c +++ b/lib/crypto/blake2b.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/unroll.h> #include <linux/types.h> static const u8 blake2b_sigma[12][16] = { @@ -73,31 +74,26 @@ blake2b_compress_generic(struct blake2b_ctx *ctx, b = ror64(b ^ c, 63); \ } while (0) -#define ROUND(r) do { \ - G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ - G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ - G(r, 2, v[2], v[ 6], v[10], v[14]); \ - G(r, 3, v[3], v[ 7], v[11], v[15]); \ - G(r, 4, v[0], v[ 5], v[10], v[15]); \ - G(r, 5, v[1], v[ 6], v[11], v[12]); \ - G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ - G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -} while (0) - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - ROUND(10); - ROUND(11); - +#ifdef CONFIG_64BIT + /* + * Unroll the rounds loop to enable constant-folding of the + * blake2b_sigma values. Seems worthwhile on 64-bit kernels. + * Not worthwhile on 32-bit kernels because the code size is + * already so large there due to BLAKE2b using 64-bit words. + */ + unrolled_full +#endif + for (int r = 0; r < 12; r++) { + G(r, 0, v[0], v[4], v[8], v[12]); + G(r, 1, v[1], v[5], v[9], v[13]); + G(r, 2, v[2], v[6], v[10], v[14]); + G(r, 3, v[3], v[7], v[11], v[15]); + G(r, 4, v[0], v[5], v[10], v[15]); + G(r, 5, v[1], v[6], v[11], v[12]); + G(r, 6, v[2], v[7], v[8], v[13]); + G(r, 7, v[3], v[4], v[9], v[14]); + } #undef G -#undef ROUND for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 6182c21ed943d8..71578a08474233 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> +#include <linux/unroll.h> #include <linux/types.h> static const u8 blake2s_sigma[10][16] = { @@ -71,29 +72,22 @@ blake2s_compress_generic(struct blake2s_ctx *ctx, b = ror32(b ^ c, 7); \ } while (0) -#define ROUND(r) do { \ - G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ - G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ - G(r, 2, v[2], v[ 6], v[10], v[14]); \ - G(r, 3, v[3], v[ 7], v[11], v[15]); \ - G(r, 4, v[0], v[ 5], v[10], v[15]); \ - G(r, 5, v[1], v[ 6], v[11], v[12]); \ - G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ - G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -} while (0) - ROUND(0); - ROUND(1); - ROUND(2); - ROUND(3); - ROUND(4); - ROUND(5); - ROUND(6); - ROUND(7); - ROUND(8); - ROUND(9); - + /* + * Unroll the rounds loop to enable constant-folding of the + * blake2s_sigma values. + */ + unrolled_full + for (int r = 0; r < 10; r++) { + G(r, 0, v[0], v[4], v[8], v[12]); + G(r, 1, v[1], v[5], v[9], v[13]); + G(r, 2, v[2], v[6], v[10], v[14]); + G(r, 3, v[3], v[7], v[11], v[15]); + G(r, 4, v[0], v[5], v[10], v[15]); + G(r, 5, v[1], v[6], v[11], v[12]); + G(r, 6, v[2], v[7], v[8], v[13]); + G(r, 7, v[3], v[4], v[9], v[14]); + } #undef G -#undef ROUND for (i = 0; i < 8; ++i) ctx->h[i] ^= v[i] ^ v[i + 8]; diff --git a/lib/crypto/riscv/chacha-riscv64-zvkb.S b/lib/crypto/riscv/chacha-riscv64-zvkb.S index b777d0b4e37969..3d183ec818f52a 100644 --- a/lib/crypto/riscv/chacha-riscv64-zvkb.S +++ b/lib/crypto/riscv/chacha-riscv64-zvkb.S @@ -60,7 +60,8 @@ #define VL t2 #define STRIDE t3 #define ROUND_CTR t4 -#define KEY0 s0 +#define KEY0 t5 +// Avoid s0/fp to allow for unwinding #define KEY1 s1 #define KEY2 s2 #define KEY3 s3 @@ -143,7 +144,6 @@ // The updated 32-bit counter is written back to state->x[12] before returning. SYM_FUNC_START(chacha_zvkb) addi sp, sp, -96 - sd s0, 0(sp) sd s1, 8(sp) sd s2, 16(sp) sd s3, 24(sp) @@ -280,7 +280,6 @@ SYM_FUNC_START(chacha_zvkb) bnez NBLOCKS, .Lblock_loop sw COUNTER, 48(STATEP) - ld s0, 0(sp) ld s1, 8(sp) ld s2, 16(sp) ld s3, 24(sp) diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index a1eff023e928a0..8cb369b63e08ed 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -924,7 +924,7 @@ static void damos_test_commit_for(struct kunit *test, struct damos *dst, } } -static void damos_test_commit(struct kunit *test) +static void damos_test_commit_pageout(struct kunit *test) { damos_test_commit_for(test, &(struct damos){ @@ -945,6 +945,10 @@ static void damos_test_commit(struct kunit *test) DAMOS_WMARK_FREE_MEM_RATE, 800, 50, 30}, }); +} + +static void damos_test_commit_migrate_hot(struct kunit *test) +{ damos_test_commit_for(test, &(struct damos){ .pattern = (struct damos_access_pattern){ @@ -1230,7 +1234,8 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damos_test_commit_quota), KUNIT_CASE(damos_test_commit_dests), KUNIT_CASE(damos_test_commit_filter), - KUNIT_CASE(damos_test_commit), + KUNIT_CASE(damos_test_commit_pageout), + KUNIT_CASE(damos_test_commit_migrate_hot), KUNIT_CASE(damon_test_commit_target_regions), KUNIT_CASE(damos_test_filter_out), KUNIT_CASE(damon_test_feed_loop_next_input), diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f7c565f11a985a..40cf59301c21aa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3464,23 +3464,6 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, } } -/* Racy check whether the huge page can be split */ -bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) -{ - int extra_pins; - - /* Additional pins from page cache */ - if (folio_test_anon(folio)) - extra_pins = folio_test_swapcache(folio) ? - folio_nr_pages(folio) : 0; - else - extra_pins = folio_nr_pages(folio); - if (pextra_pins) - *pextra_pins = extra_pins; - return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - - caller_pins; -} - static bool page_range_has_hwpoisoned(struct page *page, long nr_pages) { for (; nr_pages; page++, nr_pages--) @@ -3697,15 +3680,40 @@ static int __split_unmapped_folio(struct folio *folio, int new_order, return 0; } -bool folio_split_supported(struct folio *folio, unsigned int new_order, - enum split_type split_type, bool warns) +/** + * folio_check_splittable() - check if a folio can be split to a given order + * @folio: folio to be split + * @new_order: the smallest order of the after split folios (since buddy + * allocator like split generates folios with orders from @folio's + * order - 1 to new_order). + * @split_type: uniform or non-uniform split + * + * folio_check_splittable() checks if @folio can be split to @new_order using + * @split_type method. The truncated folio check must come first. + * + * Context: folio must be locked. + * + * Return: 0 - @folio can be split to @new_order, otherwise an error number is + * returned. + */ +int folio_check_splittable(struct folio *folio, unsigned int new_order, + enum split_type split_type) { + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + /* + * Folios that just got truncated cannot get split. Signal to the + * caller that there was a race. + * + * TODO: this will also currently refuse folios without a mapping in the + * swapcache (shmem or to-be-anon folios). + */ + if (!folio->mapping && !folio_test_anon(folio)) + return -EBUSY; + if (folio_test_anon(folio)) { /* order-1 is not supported for anonymous THP. */ - VM_WARN_ONCE(warns && new_order == 1, - "Cannot split to order-1 folio"); if (new_order == 1) - return false; + return -EINVAL; } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) { if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !mapping_large_folio_support(folio->mapping)) { @@ -3726,9 +3734,7 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order, * case, the mapping does not actually support large * folios properly. */ - VM_WARN_ONCE(warns, - "Cannot split file folio to non-0 order"); - return false; + return -EINVAL; } } @@ -3741,19 +3747,31 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order, * here. */ if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) { - VM_WARN_ONCE(warns, - "Cannot split swapcache folio to non-0 order"); - return false; + return -EINVAL; } - return true; + if (is_huge_zero_folio(folio)) + return -EINVAL; + + if (folio_test_writeback(folio)) + return -EBUSY; + + return 0; +} + +/* Number of folio references from the pagecache or the swapcache. */ +static unsigned int folio_cache_ref_count(const struct folio *folio) +{ + if (folio_test_anon(folio) && !folio_test_swapcache(folio)) + return 0; + return folio_nr_pages(folio); } static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order, struct page *split_at, struct xa_state *xas, struct address_space *mapping, bool do_lru, struct list_head *list, enum split_type split_type, - pgoff_t end, int *nr_shmem_dropped, int extra_pins) + pgoff_t end, int *nr_shmem_dropped) { struct folio *end_folio = folio_next(folio); struct folio *new_folio, *next; @@ -3764,10 +3782,9 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n VM_WARN_ON_ONCE(!mapping && end); /* Prevent deferred_split_scan() touching ->_refcount */ ds_queue = folio_split_queue_lock(folio); - if (folio_ref_freeze(folio, 1 + extra_pins)) { + if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) { struct swap_cluster_info *ci = NULL; struct lruvec *lruvec; - int expected_refs; if (old_order > 1) { if (!list_empty(&folio->_deferred_list)) { @@ -3835,8 +3852,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n zone_device_private_split_cb(folio, new_folio); - expected_refs = folio_expected_ref_count(new_folio) + 1; - folio_ref_unfreeze(new_folio, expected_refs); + folio_ref_unfreeze(new_folio, + folio_cache_ref_count(new_folio) + 1); if (do_lru) lru_add_split_folio(folio, new_folio, lruvec, list); @@ -3879,8 +3896,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n * Otherwise, a parallel folio_try_get() can grab @folio * and its caller can see stale page cache entries. */ - expected_refs = folio_expected_ref_count(folio) + 1; - folio_ref_unfreeze(folio, expected_refs); + folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1); if (do_lru) unlock_page_lruvec(lruvec); @@ -3929,40 +3945,27 @@ static int __folio_split(struct folio *folio, unsigned int new_order, struct folio *new_folio, *next; int nr_shmem_dropped = 0; int remap_flags = 0; - int extra_pins, ret; + int ret; pgoff_t end = 0; - bool is_hzp; VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio); - if (folio != page_folio(split_at) || folio != page_folio(lock_at)) - return -EINVAL; - - /* - * Folios that just got truncated cannot get split. Signal to the - * caller that there was a race. - * - * TODO: this will also currently refuse shmem folios that are in the - * swapcache. - */ - if (!is_anon && !folio->mapping) - return -EBUSY; - - if (new_order >= old_order) - return -EINVAL; - - if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true)) - return -EINVAL; + if (folio != page_folio(split_at) || folio != page_folio(lock_at)) { + ret = -EINVAL; + goto out; + } - is_hzp = is_huge_zero_folio(folio); - if (is_hzp) { - pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); - return -EBUSY; + if (new_order >= old_order) { + ret = -EINVAL; + goto out; } - if (folio_test_writeback(folio)) - return -EBUSY; + ret = folio_check_splittable(folio, new_order, split_type); + if (ret) { + VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio"); + goto out; + } if (is_anon) { /* @@ -4027,7 +4030,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, * Racy check if we can split the page, before unmap_folio() will * split PMDs */ - if (!can_split_folio(folio, 1, &extra_pins)) { + if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) { ret = -EAGAIN; goto out_unlock; } @@ -4050,8 +4053,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order, } ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping, - true, list, split_type, end, &nr_shmem_dropped, - extra_pins); + true, list, split_type, end, &nr_shmem_dropped); fail: if (mapping) xas_unlock(&xas); @@ -4125,20 +4127,20 @@ out: */ int folio_split_unmapped(struct folio *folio, unsigned int new_order) { - int extra_pins, ret = 0; + int ret = 0; VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio); VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio); VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio); - if (!can_split_folio(folio, 1, &extra_pins)) + if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) return -EAGAIN; local_irq_disable(); ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL, NULL, false, NULL, SPLIT_TYPE_UNIFORM, - 0, NULL, extra_pins); + 0, NULL); local_irq_enable(); return ret; } @@ -4230,16 +4232,29 @@ int folio_split(struct folio *folio, unsigned int new_order, SPLIT_TYPE_NON_UNIFORM); } -int min_order_for_split(struct folio *folio) +/** + * min_order_for_split() - get the minimum order @folio can be split to + * @folio: folio to split + * + * min_order_for_split() tells the minimum order @folio can be split to. + * If a file-backed folio is truncated, 0 will be returned. Any subsequent + * split attempt should get -EBUSY from split checking code. + * + * Return: @folio's minimum order for split + */ +unsigned int min_order_for_split(struct folio *folio) { if (folio_test_anon(folio)) return 0; - if (!folio->mapping) { - if (folio_test_pmd_mappable(folio)) - count_vm_event(THP_SPLIT_PAGE_FAILED); - return -EBUSY; - } + /* + * If the folio got truncated, we don't know the previous mapping and + * consequently the old min order. But it doesn't matter, as any split + * attempt will immediately fail with -EBUSY as the folio cannot get + * split until freed. + */ + if (!folio->mapping) + return 0; return mapping_min_folio_order(folio->mapping); } @@ -4631,7 +4646,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, * can be split or not. So skip the check here. */ if (!folio_test_private(folio) && - !can_split_folio(folio, 0, NULL)) + folio_expected_ref_count(folio) != folio_ref_count(folio)) goto next; if (!folio_trylock(folio)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9e7815b4f0583f..51273baec9e5dc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6579,6 +6579,7 @@ long hugetlb_reserve_pages(struct inode *inode, struct resv_map *resv_map; struct hugetlb_cgroup *h_cg = NULL; long gbl_reserve, regions_needed = 0; + int err; /* This should never happen */ if (from > to) { @@ -6612,8 +6613,10 @@ long hugetlb_reserve_pages(struct inode *inode, } else { /* Private mapping. */ resv_map = resv_map_alloc(); - if (!resv_map) + if (!resv_map) { + err = -ENOMEM; goto out_err; + } chg = to - from; @@ -6621,11 +6624,15 @@ long hugetlb_reserve_pages(struct inode *inode, set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER); } - if (chg < 0) + if (chg < 0) { + /* region_chg() above can return -ENOMEM */ + err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL; goto out_err; + } - if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), - chg * pages_per_huge_page(h), &h_cg) < 0) + err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), + chg * pages_per_huge_page(h), &h_cg); + if (err < 0) goto out_err; if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) { @@ -6641,14 +6648,17 @@ long hugetlb_reserve_pages(struct inode *inode, * reservations already in place (gbl_reserve). */ gbl_reserve = hugepage_subpool_get_pages(spool, chg); - if (gbl_reserve < 0) + if (gbl_reserve < 0) { + err = gbl_reserve; goto out_uncharge_cgroup; + } /* * Check enough hugepages are available for the reservation. * Hand the pages back to the subpool if there are not */ - if (hugetlb_acct_memory(h, gbl_reserve) < 0) + err = hugetlb_acct_memory(h, gbl_reserve); + if (err < 0) goto out_put_pages; /* @@ -6667,6 +6677,7 @@ long hugetlb_reserve_pages(struct inode *inode, if (unlikely(add < 0)) { hugetlb_acct_memory(h, -gbl_reserve); + err = add; goto out_put_pages; } else if (unlikely(chg > add)) { /* @@ -6726,7 +6737,7 @@ out_err: kref_put(&resv_map->refs, resv_map_release); set_vma_desc_resv_map(desc, NULL); } - return chg < 0 ? chg : add < 0 ? add : -EINVAL; + return err; } long hugetlb_unreserve_pages(struct inode *inode, long start, long end, diff --git a/mm/shmem.c b/mm/shmem.c index 3f194c9842a8ca..b329b5302c4885 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -5794,8 +5794,15 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); #define shmem_vm_ops generic_file_vm_ops #define shmem_anon_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations -#define shmem_acct_size(flags, size) 0 -#define shmem_unacct_size(flags, size) do {} while (0) + +static inline int shmem_acct_size(unsigned long flags, loff_t size) +{ + return 0; +} + +static inline void shmem_unacct_size(unsigned long flags, loff_t size) +{ +} static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, diff --git a/mm/vmscan.c b/mm/vmscan.c index 900c74b6aa62a5..670fe9fae5baa2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1284,7 +1284,8 @@ retry: goto keep_locked; if (folio_test_large(folio)) { /* cannot split folio, skip it */ - if (!can_split_folio(folio, 1, NULL)) + if (folio_expected_ref_count(folio) != + folio_ref_count(folio) - 1) goto activate_locked; /* * Split partially mapped folios right away. @@ -4540,7 +4541,8 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec, int scanned = 0; int isolated = 0; int skipped = 0; - int remaining = min(nr_to_scan, MAX_LRU_BATCH); + int scan_batch = min(nr_to_scan, MAX_LRU_BATCH); + int remaining = scan_batch; struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); @@ -4600,7 +4602,7 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec, count_memcg_events(memcg, item, isolated); count_memcg_events(memcg, PGREFILL, sorted); __count_vm_events(PGSCAN_ANON + type, isolated); - trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, + trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch, scanned, skipped, isolated, type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); if (type == LRU_GEN_FILE) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 6664ea73ccf81b..3667319b949ddc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1280,8 +1280,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) static struct ceph_osd *get_osd(struct ceph_osd *osd) { if (refcount_inc_not_zero(&osd->o_ref)) { - dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, - refcount_read(&osd->o_ref)); + dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref)); return osd; } else { dout("get_osd %p FAIL\n", osd); @@ -1291,8 +1290,7 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd) static void put_osd(struct ceph_osd *osd) { - dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), - refcount_read(&osd->o_ref) - 1); + dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1); if (refcount_dec_and_test(&osd->o_ref)) { osd_cleanup(osd); kfree(osd); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d245fa508e1cc9..34b3ab59602f73 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) ceph_decode_need(p, end, len, bad); pool_end = *p + len; + ceph_decode_need(p, end, 4 + 4 + 4, bad); pi->type = ceph_decode_8(p); pi->size = ceph_decode_8(p); pi->crush_ruleset = ceph_decode_8(p); pi->object_hash = ceph_decode_8(p); - pi->pg_num = ceph_decode_32(p); pi->pgp_num = ceph_decode_32(p); - *p += 4 + 4; /* skip lpg* */ - *p += 4; /* skip last_change */ - *p += 8 + 4; /* skip snap_seq, snap_epoch */ + /* lpg*, last_change, snap_seq, snap_epoch */ + ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad); /* skip snaps */ - num = ceph_decode_32(p); + ceph_decode_32_safe(p, end, num, bad); while (num--) { - *p += 8; /* snapid key */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* snapid key, pool snap (with versions) */ + ceph_decode_skip_n(p, end, 8 + 2, bad); + ceph_decode_skip_string(p, end, bad); } - /* skip removed_snaps */ - num = ceph_decode_32(p); - *p += num * (8 + 8); + /* removed_snaps */ + ceph_decode_skip_map(p, end, 64, 64, bad); + ceph_decode_need(p, end, 8 + 8 + 4, bad); *p += 8; /* skip auid */ pi->flags = ceph_decode_64(p); *p += 4; /* skip crash_replay_interval */ if (ev >= 7) - pi->min_size = ceph_decode_8(p); + ceph_decode_8_safe(p, end, pi->min_size, bad); else pi->min_size = pi->size - pi->size / 2; if (ev >= 8) - *p += 8 + 8; /* skip quota_max_* */ + /* quota_max_* */ + ceph_decode_skip_n(p, end, 8 + 8, bad); if (ev >= 9) { - /* skip tiers */ - num = ceph_decode_32(p); - *p += num * 8; + /* tiers */ + ceph_decode_skip_set(p, end, 64, bad); + ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad); *p += 8; /* skip tier_of */ *p += 1; /* skip cache_mode */ - pi->read_tier = ceph_decode_64(p); pi->write_tier = ceph_decode_64(p); } else { @@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) pi->write_tier = -1; } - if (ev >= 10) { - /* skip properties */ - num = ceph_decode_32(p); - while (num--) { - len = ceph_decode_32(p); - *p += len; /* key */ - len = ceph_decode_32(p); - *p += len; /* val */ - } - } + if (ev >= 10) + /* properties */ + ceph_decode_skip_map(p, end, string, string, bad); if (ev >= 11) { - /* skip hit_set_params */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* hit_set_params (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad); - *p += 4; /* skip hit_set_period */ - *p += 4; /* skip hit_set_count */ + /* hit_set_period, hit_set_count */ + ceph_decode_skip_n(p, end, 4 + 4, bad); } if (ev >= 12) - *p += 4; /* skip stripe_width */ + /* stripe_width */ + ceph_decode_skip_32(p, end, bad); - if (ev >= 13) { - *p += 8; /* skip target_max_bytes */ - *p += 8; /* skip target_max_objects */ - *p += 4; /* skip cache_target_dirty_ratio_micro */ - *p += 4; /* skip cache_target_full_ratio_micro */ - *p += 4; /* skip cache_min_flush_age */ - *p += 4; /* skip cache_min_evict_age */ - } + if (ev >= 13) + /* target_max_*, cache_target_*, cache_min_* */ + ceph_decode_skip_n(p, end, 16 + 8 + 8, bad); - if (ev >= 14) { - /* skip erasure_code_profile */ - len = ceph_decode_32(p); - *p += len; - } + if (ev >= 14) + /* erasure_code_profile */ + ceph_decode_skip_string(p, end, bad); /* * last_force_op_resend_preluminous, will be overridden if the * map was encoded with RESEND_ON_SPLIT */ if (ev >= 15) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad); else pi->last_force_request_resend = 0; if (ev >= 16) - *p += 4; /* skip min_read_recency_for_promote */ + /* min_read_recency_for_promote */ + ceph_decode_skip_32(p, end, bad); if (ev >= 17) - *p += 8; /* skip expected_num_objects */ + /* expected_num_objects */ + ceph_decode_skip_64(p, end, bad); if (ev >= 19) - *p += 4; /* skip cache_target_dirty_high_ratio_micro */ + /* cache_target_dirty_high_ratio_micro */ + ceph_decode_skip_32(p, end, bad); if (ev >= 20) - *p += 4; /* skip min_write_recency_for_promote */ + /* min_write_recency_for_promote */ + ceph_decode_skip_32(p, end, bad); if (ev >= 21) - *p += 1; /* skip use_gmt_hitset */ + /* use_gmt_hitset */ + ceph_decode_skip_8(p, end, bad); if (ev >= 22) - *p += 1; /* skip fast_read */ + /* fast_read */ + ceph_decode_skip_8(p, end, bad); - if (ev >= 23) { - *p += 4; /* skip hit_set_grade_decay_rate */ - *p += 4; /* skip hit_set_search_last_n */ - } + if (ev >= 23) + /* hit_set_grade_decay_rate, hit_set_search_last_n */ + ceph_decode_skip_n(p, end, 4 + 4, bad); if (ev >= 24) { - /* skip opts */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* opts (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad); } if (ev >= 25) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad); /* ignore the rest */ @@ -1438,7 +1426,7 @@ static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, ceph_decode_32_safe(p, end, len, e_inval); if (len == 0 && incremental) return NULL; /* new_pg_temp: [] to remove */ - if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) + if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, len * sizeof(u32), e_inval); @@ -1619,7 +1607,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, u32 len, i; ceph_decode_32_safe(p, end, len, e_inval); - if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) + if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index caa94cf57123f4..68b1fcdea8f001 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -25,6 +25,22 @@ unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt) } /* + * Helper function to nullify backchannel server pointer in transport. + * We need to synchronize setting the pointer to NULL (done so after + * the backchannel server is shutdown) with the usage of that pointer + * by the backchannel request processing routines + * xprt_complete_bc_request() and rpcrdma_bc_receive_call(). + */ +void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv) +{ + spin_lock(&xprt->bc_pa_lock); + svc_destroy(serv); + xprt->bc_serv = NULL; + spin_unlock(&xprt->bc_pa_lock); +} +EXPORT_SYMBOL_GPL(xprt_svc_destroy_nullify_bc); + +/* * Helper routines that track the number of preallocation elements * on the transport. */ @@ -354,7 +370,6 @@ found: void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) { struct rpc_xprt *xprt = req->rq_xprt; - struct svc_serv *bc_serv = xprt->bc_serv; spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); @@ -365,7 +380,21 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); dprintk("RPC: add callback request to list\n"); + xprt_enqueue_bc_request(req); +} + +void xprt_enqueue_bc_request(struct rpc_rqst *req) +{ + struct rpc_xprt *xprt = req->rq_xprt; + struct svc_serv *bc_serv; + xprt_get(xprt); - lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list); - svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + spin_lock(&xprt->bc_pa_lock); + bc_serv = xprt->bc_serv; + if (bc_serv) { + lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list); + svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + } + spin_unlock(&xprt->bc_pa_lock); } +EXPORT_SYMBOL_GPL(xprt_enqueue_bc_request); diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 8c817e755262df..2f0f9618dd0580 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -9,6 +9,7 @@ #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svc_rdma.h> +#include <linux/sunrpc/bc_xprt.h> #include "xprt_rdma.h" #include <trace/events/rpcrdma.h> @@ -220,7 +221,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) { struct rpc_xprt *xprt = &r_xprt->rx_xprt; - struct svc_serv *bc_serv; struct rpcrdma_req *req; struct rpc_rqst *rqst; struct xdr_buf *buf; @@ -261,11 +261,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, trace_xprtrdma_cb_call(r_xprt, rqst); /* Queue rqst for ULP's callback service */ - bc_serv = xprt->bc_serv; - xprt_get(xprt); - lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list); - - svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + xprt_enqueue_bc_request(rqst); r_xprt->rx_stats.bcall_count++; return; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index d58ca9655ab7e6..c0250244cf7a3c 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -7732,6 +7732,12 @@ sub process { ERROR("MISSING_SENTINEL", "missing sentinel in ID array\n" . "$here\n$stat\n"); } } + +# check for uninitialized pointers with __free attribute + while ($line =~ /\*\s*($Ident)\s+__free\s*\(\s*$Ident\s*\)\s*[,;]/g) { + ERROR("UNINITIALIZED_PTR_WITH_FREE", + "pointer '$1' with __free attribute should be initialized\n" . $herecurr); + } } # If we have no input at all, then there is nothing to report on diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 5f9ccab26e9abb..90cf0e2969df8e 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -934,17 +934,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, #endif if (page != dump->page) { const unsigned int offset = pos % PAGE_SIZE; - /* - * Maybe kmap()/kunmap() should be used here. - * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic(). - * So do I. - */ - char *kaddr = kmap_atomic(page); + char *kaddr = kmap_local_page(page); dump->page = page; memcpy(dump->data + offset, kaddr + offset, PAGE_SIZE - offset); - kunmap_atomic(kaddr); + kunmap_local(kaddr); } /* Same with put_arg_page(page) in fs/exec.c */ #ifdef CONFIG_MMU diff --git a/sound/core/Kconfig b/sound/core/Kconfig index 48db44fa56feb1..4e7bc370ffd7f2 100644 --- a/sound/core/Kconfig +++ b/sound/core/Kconfig @@ -155,7 +155,7 @@ config SND_MAX_CARDS config SND_SUPPORT_OLD_API bool "Support old ALSA API" - default y + default n help Say Y here to support the obsolete ALSA PCM API (ver.0.9.0 rc3 or older). diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c index 981c19430cb0fe..89dc436a065299 100644 --- a/sound/firewire/motu/motu-hwdep.c +++ b/sound/firewire/motu/motu-hwdep.c @@ -75,7 +75,7 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, while (consumed < count && snd_motu_register_dsp_message_parser_copy_event(motu, &ev)) { ptr = (u32 __user *)(buf + consumed); - if (put_user(ev, ptr)) + if (consumed + sizeof(ev) > count || put_user(ev, ptr)) return -EFAULT; consumed += sizeof(ev); } @@ -83,10 +83,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, event.motu_register_dsp_change.type = SNDRV_FIREWIRE_EVENT_MOTU_REGISTER_DSP_CHANGE; event.motu_register_dsp_change.count = (consumed - sizeof(event.motu_register_dsp_change)) / 4; - if (copy_to_user(buf, &event, sizeof(event.motu_register_dsp_change))) + if (copy_to_user(buf, &event, + min_t(long, count, sizeof(event.motu_register_dsp_change)))) return -EFAULT; - count = consumed; + count = min_t(long, count, consumed); } else { spin_unlock_irq(&motu->lock); diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index d90a6c01f63be4..171a71457ec3b9 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -6770,6 +6770,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e60, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e61, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8e62, "HP Trekker ", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e8a, "HP NexusX", ALC245_FIXUP_HP_TAS2781_I2C_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8e9d, "HP 17 Turbine OmniBook X UMA", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8e9e, "HP 17 Turbine OmniBook X UMA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8eb6, "HP Abe A6U", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_GPIO), SND_PCI_QUIRK(0x103c, 0x8eb7, "HP Abe A6U", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_GPIO), SND_PCI_QUIRK(0x103c, 0x8eb8, "HP Abe A6U", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_GPIO), @@ -6842,6 +6845,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301VV/VQ/VU/VJ/VA/VC/VE/VVC/VQC/VUC/VJC/VEC/VCC", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1584, "ASUS UM3406GA ", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1652, "ASUS ROG Zephyrus Do 15 SE", ALC289_FIXUP_ASUS_ZEPHYRUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZI/ZJ/ZQ/ZU/ZV", ALC285_FIXUP_ASUS_HEADSET_MIC), diff --git a/sound/hda/codecs/side-codecs/cs35l41_hda.c b/sound/hda/codecs/side-codecs/cs35l41_hda.c index c0f2a3ff77a1b6..21e00055c0c443 100644 --- a/sound/hda/codecs/side-codecs/cs35l41_hda.c +++ b/sound/hda/codecs/side-codecs/cs35l41_hda.c @@ -1901,6 +1901,8 @@ static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, i cs35l41->dacpi = adev; physdev = get_device(acpi_get_first_physical_node(adev)); + if (!physdev) + return -ENODEV; sub = acpi_get_subsystem_id(ACPI_HANDLE(physdev)); if (IS_ERR(sub)) diff --git a/sound/hda/controllers/Kconfig b/sound/hda/controllers/Kconfig index 34721f50b055af..72855f2df45148 100644 --- a/sound/hda/controllers/Kconfig +++ b/sound/hda/controllers/Kconfig @@ -30,6 +30,20 @@ config SND_HDA_TEGRA To compile this driver as a module, choose M here: the module will be called snd-hda-tegra. +config SND_HDA_CIX_IPBLOQ + tristate "CIX IPBLOQ HD Audio" + depends on ARCH_CIX || COMPILE_TEST + select SND_HDA + select SND_HDA_ALIGNED_MMIO + help + Say Y here to support the HDA controller present in CIX SoCs + + This options enables support for the HD Audio controller + present in some CIX SoCs. + + To compile this driver as a module, choose M here: the module + will be called snd-hda-cix-ipbloq. + config SND_HDA_ACPI tristate "HD Audio ACPI" depends on ACPI diff --git a/sound/hda/controllers/Makefile b/sound/hda/controllers/Makefile index a4bcd055e9aef9..8967b6771d904d 100644 --- a/sound/hda/controllers/Makefile +++ b/sound/hda/controllers/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 snd-hda-intel-y := intel.o snd-hda-tegra-y := tegra.o +snd-hda-cix-ipbloq-y := cix-ipbloq.o snd-hda-acpi-y := acpi.o subdir-ccflags-y += -I$(src)/../common @@ -10,4 +11,5 @@ CFLAGS_intel.o := -I$(src) obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-intel.o obj-$(CONFIG_SND_HDA_TEGRA) += snd-hda-tegra.o +obj-$(CONFIG_SND_HDA_CIX_IPBLOQ) += snd-hda-cix-ipbloq.o obj-$(CONFIG_SND_HDA_ACPI) += snd-hda-acpi.o diff --git a/sound/hda/controllers/cix-ipbloq.c b/sound/hda/controllers/cix-ipbloq.c new file mode 100644 index 00000000000000..99f9f48e91d4b4 --- /dev/null +++ b/sound/hda/controllers/cix-ipbloq.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright 2025 Cix Technology Group Co., Ltd. + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_reserved_mem.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/string.h> + +#include <sound/hda_codec.h> +#include "hda_controller.h" + +#define CIX_IPBLOQ_JACKPOLL_DEFAULT_TIME_MS 1000 +#define CIX_IPBLOQ_POWER_SAVE_DEFAULT_TIME_MS 100 + +#define CIX_IPBLOQ_SKY1_ADDR_HOST_TO_HDAC_OFFSET (-0x90000000ULL) + +struct cix_ipbloq_hda { + struct azx chip; + struct device *dev; + void __iomem *regs; + + struct reset_control *reset; + struct clk_bulk_data clocks[2]; + unsigned int nclocks; +}; + +static const struct hda_controller_ops cix_ipbloq_hda_ops; + +static int cix_ipbloq_hda_dev_disconnect(struct snd_device *device) +{ + struct azx *chip = device->device_data; + + chip->bus.shutdown = 1; + + return 0; +} + +static int cix_ipbloq_hda_dev_free(struct snd_device *device) +{ + struct azx *chip = device->device_data; + + if (azx_bus(chip)->chip_init) { + azx_stop_all_streams(chip); + azx_stop_chip(chip); + } + + azx_free_stream_pages(chip); + azx_free_streams(chip); + snd_hdac_bus_exit(azx_bus(chip)); + + return 0; +} + +static int cix_ipbloq_hda_probe_codec(struct cix_ipbloq_hda *hda) +{ + struct azx *chip = &hda->chip; + struct hdac_bus *bus = azx_bus(chip); + int err; + + to_hda_bus(bus)->bus_probing = 1; + + /* create codec instances */ + err = azx_probe_codecs(chip, 8); + if (err < 0) { + dev_err(hda->dev, "probe codecs failed: %d\n", err); + return err; + } + + err = azx_codec_configure(chip); + if (err < 0) { + dev_err(hda->dev, "codec configure failed: %d\n", err); + return err; + } + + err = snd_card_register(chip->card); + if (err < 0) { + dev_err(hda->dev, "card register failed: %d\n", err); + return err; + } + + chip->running = 1; + + to_hda_bus(bus)->bus_probing = 0; + + snd_hda_set_power_save(&chip->bus, CIX_IPBLOQ_POWER_SAVE_DEFAULT_TIME_MS); + + return 0; +} + +static int cix_ipbloq_hda_init(struct cix_ipbloq_hda *hda, + struct azx *chip, + struct platform_device *pdev) +{ + const char *sname = NULL, *drv_name = "cix-ipbloq-hda"; + struct hdac_bus *bus = azx_bus(chip); + struct snd_card *card = chip->card; + struct resource *res; + unsigned short gcap; + int irq_id, err; + + hda->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(hda->regs)) { + dev_err(hda->dev, "failed to get and ioremap resource\n"); + return PTR_ERR(hda->regs); + } + bus->remap_addr = hda->regs; + bus->addr = res->start; + + irq_id = platform_get_irq(pdev, 0); + if (irq_id < 0) { + dev_err(hda->dev, "failed to get the irq, err = %d\n", irq_id); + return irq_id; + } + + err = devm_request_irq(hda->dev, irq_id, azx_interrupt, + 0, KBUILD_MODNAME, chip); + if (err < 0) + return dev_err_probe(hda->dev, err, + "unable to request IRQ %d : err = %d\n", irq_id, err); + bus->irq = irq_id; + card->sync_irq = bus->irq; + + gcap = azx_readw(chip, GCAP); + chip->capture_streams = (gcap >> 8) & 0x0f; + chip->playback_streams = (gcap >> 12) & 0x0f; + chip->capture_index_offset = 0; + chip->playback_index_offset = chip->capture_streams; + chip->num_streams = chip->playback_streams + chip->capture_streams; + + /* initialize streams */ + err = azx_init_streams(chip); + if (err < 0) { + dev_err(hda->dev, "failed to initialize streams: %d\n", err); + return err; + } + + err = azx_alloc_stream_pages(chip); + if (err < 0) { + dev_err(hda->dev, "failed to allocate stream pages: %d\n", err); + return err; + } + + /* initialize chip */ + azx_init_chip(chip, 1); + + /* codec detection */ + if (!bus->codec_mask) { + dev_err(hda->dev, "no codecs found\n"); + return -ENODEV; + } + dev_dbg(card->dev, "codec detection mask = 0x%lx\n", bus->codec_mask); + + /* driver name */ + strscpy(card->driver, drv_name, sizeof(card->driver)); + + /* shortname for card */ + sname = of_get_property(pdev->dev.of_node, "model", NULL); + if (!sname) + sname = drv_name; + if (strlen(sname) > sizeof(card->shortname)) + dev_dbg(card->dev, "truncating shortname for card\n"); + strscpy(card->shortname, sname, sizeof(card->shortname)); + + /* longname for card */ + snprintf(card->longname, sizeof(card->longname), + "%s at 0x%lx irq %i", + card->shortname, bus->addr, bus->irq); + + return 0; +} + +static int cix_ipbloq_hda_create(struct cix_ipbloq_hda *hda, + struct snd_card *card, + unsigned int driver_caps) +{ + static const struct snd_device_ops ops = { + .dev_disconnect = cix_ipbloq_hda_dev_disconnect, + .dev_free = cix_ipbloq_hda_dev_free, + }; + struct azx *chip; + int err; + + chip = &hda->chip; + chip->card = card; + chip->ops = &cix_ipbloq_hda_ops; + chip->driver_caps = driver_caps; + chip->driver_type = driver_caps & 0xff; + chip->dev_index = 0; + chip->single_cmd = 0; + chip->codec_probe_mask = -1; + chip->align_buffer_size = 1; + chip->jackpoll_interval = msecs_to_jiffies(CIX_IPBLOQ_JACKPOLL_DEFAULT_TIME_MS); + mutex_init(&chip->open_mutex); + INIT_LIST_HEAD(&chip->pcm_list); + + /* + * HD-audio controllers appear pretty inaccurate about the update-IRQ timing. + * The IRQ is issued before actually the data is processed. So use stream + * link position by default instead of dma position buffer. + */ + chip->get_position[0] = chip->get_position[1] = azx_get_pos_lpib; + + err = azx_bus_init(chip, NULL); + if (err < 0) { + dev_err(hda->dev, "failed to init bus, err = %d\n", err); + return err; + } + + /* RIRBSTS.RINTFL cannot be cleared, cause interrupt storm */ + chip->bus.core.polling_mode = 1; + chip->bus.core.not_use_interrupts = 1; + + chip->bus.core.aligned_mmio = 1; + chip->bus.core.dma_stop_delay = 100; + chip->bus.core.addr_offset = (dma_addr_t)CIX_IPBLOQ_SKY1_ADDR_HOST_TO_HDAC_OFFSET; + + chip->bus.jackpoll_in_suspend = 1; + + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); + if (err < 0) { + dev_err(card->dev, "failed to create device, err = %d\n", err); + return err; + } + + return 0; +} + +static int cix_ipbloq_hda_probe(struct platform_device *pdev) +{ + const unsigned int driver_flags = AZX_DCAPS_PM_RUNTIME; + struct cix_ipbloq_hda *hda; + struct snd_card *card; + struct azx *chip; + int err; + + hda = devm_kzalloc(&pdev->dev, sizeof(*hda), GFP_KERNEL); + if (!hda) + return -ENOMEM; + hda->dev = &pdev->dev; + + hda->reset = devm_reset_control_get(hda->dev, NULL); + if (IS_ERR(hda->reset)) + return dev_err_probe(hda->dev, PTR_ERR(hda->reset), + "failed to get reset, err = %ld\n", PTR_ERR(hda->reset)); + + hda->clocks[hda->nclocks++].id = "ipg"; + hda->clocks[hda->nclocks++].id = "per"; + err = devm_clk_bulk_get(hda->dev, hda->nclocks, hda->clocks); + if (err < 0) + return dev_err_probe(hda->dev, err, "failed to get clk, err = %d\n", err); + + dma_set_mask_and_coherent(hda->dev, DMA_BIT_MASK(32)); + + err = of_reserved_mem_device_init(hda->dev); + if (err < 0 && err != -ENODEV) { + dev_err(hda->dev, + "failed to init reserved mem for DMA, err = %d\n", err); + return err; + } + + err = snd_card_new(hda->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, + THIS_MODULE, 0, &card); + if (err < 0) + return dev_err_probe(hda->dev, err, "failed to crate card, err = %d\n", err); + + err = cix_ipbloq_hda_create(hda, card, driver_flags); + if (err < 0) + goto out_free_card; + + chip = &hda->chip; + card->private_data = chip; + dev_set_drvdata(hda->dev, card); + + pm_runtime_enable(hda->dev); + if (!azx_has_pm_runtime(chip)) + pm_runtime_forbid(hda->dev); + + err = pm_runtime_resume_and_get(hda->dev); + if (err < 0) { + dev_err(hda->dev, "runtime resume and get failed, err = %d\n", err); + goto out_free_device; + } + + err = cix_ipbloq_hda_init(hda, chip, pdev); + if (err < 0) + goto out_free_device; + + err = cix_ipbloq_hda_probe_codec(hda); + if (err < 0) + goto out_free_device; + + pm_runtime_put(hda->dev); + + return 0; + +out_free_device: + snd_device_free(card, chip); +out_free_card: + snd_card_free(card); + + return err; +} + +static void cix_ipbloq_hda_remove(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip = card->private_data; + + snd_device_free(card, chip); + snd_card_free(card); + + pm_runtime_disable(&pdev->dev); +} + +static void cix_ipbloq_hda_shutdown(struct platform_device *pdev) +{ + struct snd_card *card = dev_get_drvdata(&pdev->dev); + struct azx *chip; + + if (!card) + return; + + chip = card->private_data; + if (chip && chip->running) + azx_stop_chip(chip); +} + +static int cix_ipbloq_hda_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + int rc; + + rc = pm_runtime_force_suspend(dev); + if (rc < 0) + return rc; + snd_power_change_state(card, SNDRV_CTL_POWER_D3cold); + + return 0; +} + +static int cix_ipbloq_hda_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + int rc; + + rc = pm_runtime_force_resume(dev); + if (rc < 0) + return rc; + snd_power_change_state(card, SNDRV_CTL_POWER_D0); + + return 0; +} + +static int cix_ipbloq_hda_runtime_suspend(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip = card->private_data; + struct cix_ipbloq_hda *hda = container_of(chip, struct cix_ipbloq_hda, chip); + + if (chip && chip->running) { + azx_stop_chip(chip); + azx_enter_link_reset(chip); + } + + clk_bulk_disable_unprepare(hda->nclocks, hda->clocks); + + return 0; +} + +static int cix_ipbloq_hda_runtime_resume(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip = card->private_data; + struct cix_ipbloq_hda *hda = container_of(chip, struct cix_ipbloq_hda, chip); + int rc; + + rc = clk_bulk_prepare_enable(hda->nclocks, hda->clocks); + if (rc) { + dev_err(dev, "failed to enable clk bulk, rc: %d\n", rc); + return rc; + } + + rc = reset_control_assert(hda->reset); + if (rc) { + dev_err(dev, "failed to assert reset, rc: %d\n", rc); + return rc; + } + + rc = reset_control_deassert(hda->reset); + if (rc) { + dev_err(dev, "failed to deassert reset, rc: %d\n", rc); + return rc; + } + + if (chip && chip->running) + azx_init_chip(chip, 1); + + return 0; +} + +static const struct dev_pm_ops cix_ipbloq_hda_pm = { + SYSTEM_SLEEP_PM_OPS(cix_ipbloq_hda_suspend, + cix_ipbloq_hda_resume) + RUNTIME_PM_OPS(cix_ipbloq_hda_runtime_suspend, + cix_ipbloq_hda_runtime_resume, NULL) +}; + +static const struct of_device_id cix_ipbloq_hda_match[] = { + { .compatible = "cix,sky1-ipbloq-hda" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cix_ipbloq_hda_match); + +static struct platform_driver cix_ipbloq_hda_driver = { + .driver = { + .name = "cix-ipbloq-hda", + .pm = pm_ptr(&cix_ipbloq_hda_pm), + .of_match_table = cix_ipbloq_hda_match, + }, + .probe = cix_ipbloq_hda_probe, + .remove = cix_ipbloq_hda_remove, + .shutdown = cix_ipbloq_hda_shutdown, +}; +module_platform_driver(cix_ipbloq_hda_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CIX IPBLOQ HDA bus driver"); +MODULE_AUTHOR("Joakim Zhang <joakim.zhang@cixtech.com>"); diff --git a/sound/hda/core/bus.c b/sound/hda/core/bus.c index 9b196c915f3783..81498f1e413e2a 100644 --- a/sound/hda/core/bus.c +++ b/sound/hda/core/bus.c @@ -47,6 +47,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, INIT_LIST_HEAD(&bus->hlink_list); init_waitqueue_head(&bus->rirb_wq); bus->irq = -1; + bus->addr_offset = 0; /* * Default value of '8' is as per the HD audio specification (Rev 1.0a). diff --git a/sound/hda/core/controller.c b/sound/hda/core/controller.c index a7c00ad801170c..69e11d62bbfa7a 100644 --- a/sound/hda/core/controller.c +++ b/sound/hda/core/controller.c @@ -48,8 +48,8 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) /* CORB set up */ bus->corb.addr = bus->rb.addr; bus->corb.buf = (__le32 *)bus->rb.area; - snd_hdac_chip_writel(bus, CORBLBASE, (u32)bus->corb.addr); - snd_hdac_chip_writel(bus, CORBUBASE, upper_32_bits(bus->corb.addr)); + snd_hdac_chip_writel(bus, CORBLBASE, (u32)(bus->corb.addr + bus->addr_offset)); + snd_hdac_chip_writel(bus, CORBUBASE, upper_32_bits(bus->corb.addr + bus->addr_offset)); /* set the corb size to 256 entries (ULI requires explicitly) */ snd_hdac_chip_writeb(bus, CORBSIZE, 0x02); @@ -70,8 +70,8 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) bus->rirb.buf = (__le32 *)(bus->rb.area + 2048); bus->rirb.wp = bus->rirb.rp = 0; memset(bus->rirb.cmds, 0, sizeof(bus->rirb.cmds)); - snd_hdac_chip_writel(bus, RIRBLBASE, (u32)bus->rirb.addr); - snd_hdac_chip_writel(bus, RIRBUBASE, upper_32_bits(bus->rirb.addr)); + snd_hdac_chip_writel(bus, RIRBLBASE, (u32)(bus->rirb.addr + bus->addr_offset)); + snd_hdac_chip_writel(bus, RIRBUBASE, upper_32_bits(bus->rirb.addr + bus->addr_offset)); /* set the rirb size to 256 entries (ULI requires explicitly) */ snd_hdac_chip_writeb(bus, RIRBSIZE, 0x02); @@ -625,8 +625,8 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) /* program the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { - snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); - snd_hdac_chip_writel(bus, DPUBASE, upper_32_bits(bus->posbuf.addr)); + snd_hdac_chip_writel(bus, DPLBASE, (u32)(bus->posbuf.addr + bus->addr_offset)); + snd_hdac_chip_writel(bus, DPUBASE, upper_32_bits(bus->posbuf.addr + bus->addr_offset)); } bus->chip_init = true; diff --git a/sound/hda/core/intel-dsp-config.c b/sound/hda/core/intel-dsp-config.c index c401c06584213e..0c25e87408de2a 100644 --- a/sound/hda/core/intel-dsp-config.c +++ b/sound/hda/core/intel-dsp-config.c @@ -718,7 +718,8 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci) /* find the configuration for the specific device */ cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table)); if (!cfg) - return SND_INTEL_DSP_DRIVER_ANY; + return IS_ENABLED(CONFIG_SND_HDA_INTEL) ? + SND_INTEL_DSP_DRIVER_LEGACY : SND_INTEL_DSP_DRIVER_ANY; if (cfg->flags & FLAG_SOF) { if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE && diff --git a/sound/hda/core/stream.c b/sound/hda/core/stream.c index 579ec544ef4a48..b471a038b3140d 100644 --- a/sound/hda/core/stream.c +++ b/sound/hda/core/stream.c @@ -288,16 +288,16 @@ int snd_hdac_stream_setup(struct hdac_stream *azx_dev, bool code_loading) /* program the BDL address */ /* lower BDL address */ - snd_hdac_stream_writel(azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr); + snd_hdac_stream_writel(azx_dev, SD_BDLPL, (u32)(azx_dev->bdl.addr + bus->addr_offset)); /* upper BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPU, - upper_32_bits(azx_dev->bdl.addr)); + upper_32_bits(azx_dev->bdl.addr + bus->addr_offset)); /* enable the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { if (!(snd_hdac_chip_readl(bus, DPLBASE) & AZX_DPLBASE_ENABLE)) snd_hdac_chip_writel(bus, DPLBASE, - (u32)bus->posbuf.addr | AZX_DPLBASE_ENABLE); + (u32)(bus->posbuf.addr + bus->addr_offset) | AZX_DPLBASE_ENABLE); } /* set the interrupt enable bits in the descriptor control register */ @@ -464,8 +464,8 @@ static int setup_bdle(struct hdac_bus *bus, addr = snd_sgbuf_get_addr(dmab, ofs); /* program the address field of the BDL entry */ - bdl[0] = cpu_to_le32((u32)addr); - bdl[1] = cpu_to_le32(upper_32_bits(addr)); + bdl[0] = cpu_to_le32((u32)(addr + bus->addr_offset)); + bdl[1] = cpu_to_le32(upper_32_bits(addr + bus->addr_offset)); /* program the size field of the BDL entry */ chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); /* one BDLE cannot cross 4K boundary on CTHDA chips */ diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c index 4ba0a66981ea9d..283a674c7e2c32 100644 --- a/sound/soc/amd/acp/acp-i2s.c +++ b/sound/soc/amd/acp/acp-i2s.c @@ -157,6 +157,8 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas spin_lock_irq(&chip->acp_lock); list_for_each_entry(stream, &chip->stream_list, list) { + if (dai->id != stream->dai_id) + continue; switch (chip->acp_rev) { case ACP_RN_PCI_ID: case ACP_RMB_PCI_ID: diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c index 3078f459e0050b..4e477c48d4bdd1 100644 --- a/sound/soc/amd/acp/acp-legacy-common.c +++ b/sound/soc/amd/acp/acp-legacy-common.c @@ -219,7 +219,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, SP_PB_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_I2S_TX_FIFOADDR(chip); reg_fifo_size = ACP_I2S_TX_FIFOSIZE(chip); - phy_addr = I2S_SP_TX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_SP_TX_MEM_WINDOW_START; + else + phy_addr = I2S_SP_TX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_I2S_TX_RINGBUFADDR(chip)); } else { reg_dma_size = ACP_I2S_RX_DMA_SIZE(chip); @@ -227,7 +230,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, SP_CAPT_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_I2S_RX_FIFOADDR(chip); reg_fifo_size = ACP_I2S_RX_FIFOSIZE(chip); - phy_addr = I2S_SP_RX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_SP_RX_MEM_WINDOW_START; + else + phy_addr = I2S_SP_RX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_I2S_RX_RINGBUFADDR(chip)); } break; @@ -238,7 +244,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, BT_PB_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_BT_TX_FIFOADDR(chip); reg_fifo_size = ACP_BT_TX_FIFOSIZE(chip); - phy_addr = I2S_BT_TX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_BT_TX_MEM_WINDOW_START; + else + phy_addr = I2S_BT_TX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_BT_TX_RINGBUFADDR(chip)); } else { reg_dma_size = ACP_BT_RX_DMA_SIZE(chip); @@ -246,7 +255,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, BT_CAPT_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_BT_RX_FIFOADDR(chip); reg_fifo_size = ACP_BT_RX_FIFOSIZE(chip); - phy_addr = I2S_BT_TX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_BT_RX_MEM_WINDOW_START; + else + phy_addr = I2S_BT_RX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_BT_RX_RINGBUFADDR(chip)); } break; @@ -257,7 +269,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, HS_PB_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_HS_TX_FIFOADDR; reg_fifo_size = ACP_HS_TX_FIFOSIZE; - phy_addr = I2S_HS_TX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_HS_TX_MEM_WINDOW_START; + else + phy_addr = I2S_HS_TX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_HS_TX_RINGBUFADDR); } else { reg_dma_size = ACP_HS_RX_DMA_SIZE; @@ -265,7 +280,10 @@ static int set_acp_i2s_dma_fifo(struct snd_pcm_substream *substream, HS_CAPT_FIFO_ADDR_OFFSET; reg_fifo_addr = ACP_HS_RX_FIFOADDR; reg_fifo_size = ACP_HS_RX_FIFOSIZE; - phy_addr = I2S_HS_RX_MEM_WINDOW_START + stream->reg_offset; + if (chip->acp_rev >= ACP70_PCI_ID) + phy_addr = ACP7x_I2S_HS_RX_MEM_WINDOW_START; + else + phy_addr = I2S_HS_RX_MEM_WINDOW_START + stream->reg_offset; writel(phy_addr, chip->base + ACP_HS_RX_RINGBUFADDR); } break; diff --git a/sound/soc/bcm/bcm63xx-pcm-whistler.c b/sound/soc/bcm/bcm63xx-pcm-whistler.c index e3a4fcc63a56dc..efeb06ddabeb3b 100644 --- a/sound/soc/bcm/bcm63xx-pcm-whistler.c +++ b/sound/soc/bcm/bcm63xx-pcm-whistler.c @@ -358,7 +358,9 @@ static int bcm63xx_soc_pcm_new(struct snd_soc_component *component, i2s_priv = dev_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0)->dev); - of_dma_configure(pcm->card->dev, pcm->card->dev->of_node, 1); + ret = of_dma_configure(pcm->card->dev, pcm->card->dev->of_node, 1); + if (ret) + return ret; ret = dma_coerce_mask_and_coherent(pcm->card->dev, DMA_BIT_MASK(32)); if (ret) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 6087ebde9523eb..061791e6190742 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -777,7 +777,6 @@ config SND_SOC_CQ0093VC config SND_SOC_CROS_EC_CODEC tristate "codec driver for ChromeOS EC" depends on CROS_EC - select CRYPTO select CRYPTO_LIB_SHA256 help If you say yes here you will get support for the @@ -918,7 +917,7 @@ config SND_SOC_CS35L56_CAL_DEBUGFS config SND_SOC_CS35L56_CAL_SET_CTRL bool "CS35L56 ALSA control to restore factory calibration" default N - select SND_SOC_CS35L56_CAL_SYSFS_COMMON + select SND_SOC_CS35L56_CAL_DEBUGFS_COMMON help Allow restoring factory calibration data through an ALSA control. This is only needed on platforms without UEFI or diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c index f0b465f9ded530..783d2ef21c11c1 100644 --- a/sound/soc/codecs/ak4458.c +++ b/sound/soc/codecs/ak4458.c @@ -671,7 +671,15 @@ static int ak4458_runtime_resume(struct device *dev) regcache_cache_only(ak4458->regmap, false); regcache_mark_dirty(ak4458->regmap); - return regcache_sync(ak4458->regmap); + ret = regcache_sync(ak4458->regmap); + if (ret) + goto err; + + return 0; +err: + regcache_cache_only(ak4458->regmap, true); + regulator_bulk_disable(ARRAY_SIZE(ak4458->supplies), ak4458->supplies); + return ret; } static const struct snd_soc_component_driver soc_codec_dev_ak4458 = { diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c index 683f3e472f5000..73684fc5beb1a7 100644 --- a/sound/soc/codecs/ak5558.c +++ b/sound/soc/codecs/ak5558.c @@ -372,7 +372,15 @@ static int ak5558_runtime_resume(struct device *dev) regcache_cache_only(ak5558->regmap, false); regcache_mark_dirty(ak5558->regmap); - return regcache_sync(ak5558->regmap); + ret = regcache_sync(ak5558->regmap); + if (ret) + goto err; + + return 0; +err: + regcache_cache_only(ak5558->regmap, true); + regulator_bulk_disable(ARRAY_SIZE(ak5558->supplies), ak5558->supplies); + return ret; } static const struct dev_pm_ops ak5558_pm = { diff --git a/sound/soc/codecs/cs-amp-lib.c b/sound/soc/codecs/cs-amp-lib.c index 8c9fd9980a7d60..d8f8b0259cd150 100644 --- a/sound/soc/codecs/cs-amp-lib.c +++ b/sound/soc/codecs/cs-amp-lib.c @@ -7,7 +7,6 @@ #include <asm/byteorder.h> #include <kunit/static_stub.h> -#include <linux/cleanup.h> #include <linux/debugfs.h> #include <linux/dev_printk.h> #include <linux/efi.h> @@ -310,8 +309,9 @@ static struct cirrus_amp_efi_data *cs_amp_get_cal_efi_buffer(struct device *dev, efi_guid_t **guid, u32 *attr) { - struct cirrus_amp_efi_data *efi_data __free(kfree) = NULL; + struct cirrus_amp_efi_data *efi_data; unsigned long data_size = 0; + u8 *data; efi_status_t status; int i, ret; @@ -339,18 +339,19 @@ static struct cirrus_amp_efi_data *cs_amp_get_cal_efi_buffer(struct device *dev, } /* Get variable contents into buffer */ - efi_data = kmalloc(data_size, GFP_KERNEL); - if (!efi_data) + data = kmalloc(data_size, GFP_KERNEL); + if (!data) return ERR_PTR(-ENOMEM); status = cs_amp_get_efi_variable(cs_amp_lib_cal_efivars[i].name, cs_amp_lib_cal_efivars[i].guid, - attr, &data_size, efi_data); + attr, &data_size, data); if (status != EFI_SUCCESS) { ret = -EINVAL; goto err; } + efi_data = (struct cirrus_amp_efi_data *)data; dev_dbg(dev, "Calibration: Size=%d, Amp Count=%d\n", efi_data->size, efi_data->count); if ((efi_data->count > 128) || @@ -364,9 +365,10 @@ static struct cirrus_amp_efi_data *cs_amp_get_cal_efi_buffer(struct device *dev, if (efi_data->size == 0) efi_data->size = data_size; - return_ptr(efi_data); + return efi_data; err: + kfree(data); dev_err(dev, "Failed to read calibration data from EFI: %d\n", ret); return ERR_PTR(ret); @@ -389,9 +391,9 @@ static int cs_amp_set_cal_efi_buffer(struct device *dev, static int _cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, int amp_index, struct cirrus_amp_cal_data *out_data) { - struct cirrus_amp_efi_data *efi_data __free(kfree) = NULL; + struct cirrus_amp_efi_data *efi_data; struct cirrus_amp_cal_data *cal = NULL; - int i; + int i, ret; efi_data = cs_amp_get_cal_efi_buffer(dev, NULL, NULL, NULL); if (IS_ERR(efi_data)) @@ -432,14 +434,17 @@ static int _cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, dev_warn(dev, "Calibration entry %d does not match silicon ID", amp_index); } - if (!cal) { + if (cal) { + memcpy(out_data, cal, sizeof(*out_data)); + ret = 0; + } else { dev_warn(dev, "No calibration for silicon ID %#llx\n", target_uid); - return -ENOENT; + ret = -ENOENT; } - memcpy(out_data, cal, sizeof(*out_data)); + kfree(efi_data); - return 0; + return ret; } static int _cs_amp_set_efi_calibration_data(struct device *dev, int amp_index, int num_amps, diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index 3a8a8dd065b772..ee56dfceedeb0a 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -1188,13 +1188,14 @@ static int cs35l41_get_system_name(struct cs35l41_private *cs35l41) } } -err: if (sub) { cs35l41->dsp.system_name = sub; dev_info(cs35l41->dev, "Subsystem ID: %s\n", cs35l41->dsp.system_name); - } else - dev_warn(cs35l41->dev, "Subsystem ID not found\n"); + return 0; + } +err: + dev_warn(cs35l41->dev, "Subsystem ID not found\n"); return ret; } diff --git a/sound/soc/codecs/nau8325.c b/sound/soc/codecs/nau8325.c index 3bfdb448f8bd72..e651263a981217 100644 --- a/sound/soc/codecs/nau8325.c +++ b/sound/soc/codecs/nau8325.c @@ -386,7 +386,8 @@ static int nau8325_clksrc_choose(struct nau8325 *nau8325, const struct nau8325_srate_attr **srate_table, int *n1_sel, int *mult_sel, int *n2_sel) { - int i, j, mclk, mclk_max, ratio, ratio_sel, n2_max; + int i, j, mclk, ratio; + int mclk_max = 0, ratio_sel = 0, n2_max = 0; if (!nau8325->mclk || !nau8325->fs) goto proc_err; @@ -408,7 +409,6 @@ static int nau8325_clksrc_choose(struct nau8325 *nau8325, } /* Get MCLK_SRC through 1/N, Multiplier, and then 1/N2. */ - mclk_max = 0; for (i = 0; i < ARRAY_SIZE(mclk_n1_div); i++) { for (j = 0; j < ARRAY_SIZE(mclk_n3_mult); j++) { mclk = nau8325->mclk << mclk_n3_mult[j].param; diff --git a/sound/soc/codecs/wcd937x.c b/sound/soc/codecs/wcd937x.c index f1dced57a59b25..f4dbcf04be4922 100644 --- a/sound/soc/codecs/wcd937x.c +++ b/sound/soc/codecs/wcd937x.c @@ -2866,7 +2866,7 @@ static int wcd937x_add_slave_components(struct wcd937x_priv *wcd937x, dev_err(dev, "Couldn't parse phandle to qcom,rx-device!\n"); return -ENODEV; } - of_node_get(wcd937x->rxnode); + component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd937x->rxnode); @@ -2875,7 +2875,7 @@ static int wcd937x_add_slave_components(struct wcd937x_priv *wcd937x, dev_err(dev, "Couldn't parse phandle to qcom,tx-device\n"); return -ENODEV; } - of_node_get(wcd937x->txnode); + component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd937x->txnode); diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index f5b7de2bc896da..cb0a0bfdb6e322 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -3464,7 +3464,6 @@ static int wcd938x_add_slave_components(struct wcd938x_priv *wcd938x, return -ENODEV; } - of_node_get(wcd938x->rxnode); component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd938x->rxnode); @@ -3473,7 +3472,7 @@ static int wcd938x_add_slave_components(struct wcd938x_priv *wcd938x, dev_err(dev, "%s: Tx-device node not defined\n", __func__); return -ENODEV; } - of_node_get(wcd938x->txnode); + component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd938x->txnode); return 0; diff --git a/sound/soc/codecs/wcd939x.c b/sound/soc/codecs/wcd939x.c index 7c5dd048438420..01f1a08f48e65d 100644 --- a/sound/soc/codecs/wcd939x.c +++ b/sound/soc/codecs/wcd939x.c @@ -3526,7 +3526,6 @@ static int wcd939x_add_slave_components(struct wcd939x_priv *wcd939x, return -ENODEV; } - of_node_get(wcd939x->rxnode); component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd939x->rxnode); @@ -3535,7 +3534,7 @@ static int wcd939x_add_slave_components(struct wcd939x_priv *wcd939x, dev_err(dev, "%s: Tx-device node not defined\n", __func__); return -ENODEV; } - of_node_get(wcd939x->txnode); + component_match_add_release(dev, matchptr, component_release_of, component_compare_of, wcd939x->txnode); return 0; diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c index 980851a1297608..0b01fc9e13a727 100644 --- a/sound/soc/qcom/qdsp6/q6afe.c +++ b/sound/soc/qcom/qdsp6/q6afe.c @@ -947,7 +947,7 @@ static struct q6afe_port *q6afe_find_port(struct q6afe *afe, int token) struct q6afe_port *p; struct q6afe_port *ret = NULL; - guard(spinlock)(&afe->port_list_lock); + guard(spinlock_irqsave)(&afe->port_list_lock); list_for_each_entry(p, &afe->port_list, node) if (p->token == token) { ret = p; @@ -1807,7 +1807,7 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id) port->cfg_type = cfg_type; kref_init(&port->refcount); - guard(spinlock)(&afe->port_list_lock); + guard(spinlock_irqsave)(&afe->port_list_lock); list_add_tail(&port->node, &afe->port_list); return port; diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index c1ee470ec6079d..c69cdd6f24994c 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -580,7 +580,7 @@ static int rockchip_pdm_probe(struct platform_device *pdev) if (!pdm) return -ENOMEM; - pdm->version = (enum rk_pdm_version)device_get_match_data(&pdev->dev); + pdm->version = (unsigned long)device_get_match_data(&pdev->dev); if (pdm->version == RK_PDM_RK3308) { pdm->reset = devm_reset_control_get(&pdev->dev, "pdm-m"); if (IS_ERR(pdm->reset)) diff --git a/sound/soc/sof/intel/hda-sdw-bpt.c b/sound/soc/sof/intel/hda-sdw-bpt.c index ff5abccf0d88b6..e45dd051ab8c26 100644 --- a/sound/soc/sof/intel/hda-sdw-bpt.c +++ b/sound/soc/sof/intel/hda-sdw-bpt.c @@ -10,6 +10,7 @@ * Hardware interface for SoundWire BPT support with HDA DMA */ +#include <linux/lcm.h> #include <sound/hdaudio_ext.h> #include <sound/hda-mlink.h> #include <sound/hda-sdw-bpt.h> @@ -236,6 +237,18 @@ static int hda_sdw_bpt_dma_disable(struct device *dev, struct hdac_ext_stream *s return ret; } +#define FIFO_ALIGNMENT 64 + +unsigned int hda_sdw_bpt_get_buf_size_alignment(unsigned int dma_bandwidth) +{ + unsigned int num_channels = DIV_ROUND_UP(dma_bandwidth, BPT_FREQUENCY * 32); + unsigned int data_block = num_channels * 4; + unsigned int alignment = lcm(data_block, FIFO_ALIGNMENT); + + return alignment; +} +EXPORT_SYMBOL_NS(hda_sdw_bpt_get_buf_size_alignment, "SND_SOC_SOF_INTEL_HDA_SDW_BPT"); + int hda_sdw_bpt_open(struct device *dev, int link_id, struct hdac_ext_stream **bpt_tx_stream, struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes, u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index f7189990dd6afd..f8eccb1c58da12 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -2545,6 +2545,7 @@ static int snd_rme_get_status1(struct snd_kcontrol *kcontrol, struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol); struct snd_usb_audio *chip = list->mixer->chip; + *status1 = 0; CLASS(snd_usb_lock, pm)(chip); if (pm.err < 0) return pm.err; |
