diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index cf33de56da27dc6bb1ac1e6a1929867584945926..e830370945b6a1c3cdb85f297d5dcb8ff1af4688 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -540,6 +540,27 @@ if leaking kernel pointer values to unprivileged users is a concern. When ``kptr_restrict`` is set to 2, kernel pointers printed using %pK will be replaced with 0s regardless of privileges. +machine_check_safe (arm64 only) +================================ + +Controls the kernel's behaviour when an hardware memory error is +encountered in the following scenarios: + += =================== +1 cow +2 copy_mc_to_kernel +3 copy_from_user +4 copy_to_user +5 get_user +6 put_user += =================== + +Correspondence between sysctl value and behavior: + += ======================= +0 Kernel panic +1 Kill related processes += ======================= modprobe ======== diff --git a/Documentation/devicetree/bindings/dma/phytium,gdma.yaml b/Documentation/devicetree/bindings/dma/phytium,gdma.yaml index b5c70dba5609178ac5b121fcabfae0e47b3a07cd..0aefb13731a45bf8cdc556c922dbed7c6b32304a 100644 --- a/Documentation/devicetree/bindings/dma/phytium,gdma.yaml +++ b/Documentation/devicetree/bindings/dma/phytium,gdma.yaml @@ -37,7 +37,7 @@ properties: max-outstanding: minimum: 1 maximum: 64 - description: set interrupmax-outstandingts according to the programming manual + description: set max-outstanding according to the programming manual required: - compatible diff --git a/Documentation/devicetree/bindings/i2c/phytium,i2c-2.0.yaml b/Documentation/devicetree/bindings/i2c/phytium,i2c-2.0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e02036e33768a378efb93c191dfd9c11a802faca --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/phytium,i2c-2.0.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- + +title: Phytium I2C Controller + +maintainers: + - WuJinyong + +properties: + compatible: + - description: Phytium I2C controller + const: phytium,i2c-2.0 + + reg: + minItems: 1 + items: + - description: Offset and length of the memory mapped registers + - description: Offset and length of the memory mapped share memory address + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description: I2C controller reference clock source + + clock-frequency: + description: Desired I2C bus clock frequency in Hz + enum: [100000, 400000, 1000000, 3400000] + default: 400000 + + i2c-sda-hold-time-ns: + description: | + The property should contain the SDA hold time in nanoseconds. This option + is only supported in hardware blocks version 1.11a or newer or on + Microsemi SoCs. + + i2c-scl-falling-time-ns: + description: | + The property should contain the SCL falling time in nanoseconds. + This value is used to compute the tLOW period. + default: 300 + + i2c-sda-falling-time-ns: + description: | + The property should contain the SDA falling time in nanoseconds. + This value is used to compute the tHIGH period. + default: 300 + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +examples: + - | + i2c@0x2701a000 { + compatible = "phytium,i2c-2.0"; + reg = <0x0 0x2701a000 0x0 0x1000 0x0 0x2701e000 0x0 0x200>; + interrupts = ; + clock-frequency = <400000>; + }; + - | + i2c@2701a000 { + compatible = "phytium,i2c-2.0"; + reg = <0x0 0x2701a000 0x0 0x1000 0x0 0x2701e000 0x0 0x200>; + interrupts = ; + clock-frequency = <400000>; + i2c-sda-hold-time-ns = <300>; + i2c-sda-falling-time-ns = <300>; + i2c-scl-falling-time-ns = <300>; + }; + - | + i2c@2701a000 { + compatible = "phytium,i2c-2.0"; + reg = <0x0 0x2701a000 0x0 0x1000 0x0 0x2701e000 0x0 0x200>; + #address-cells = <1>; + #size-cells = <0>; + clock-frequency = <400000>; + clocks = <&i2cclk>; + interrupts = ; + + eeprom@64 { + compatible = "atmel,24c02"; + reg = <0x64>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/mmc/phytium,mci_v2.yaml b/Documentation/devicetree/bindings/mmc/phytium,mci_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..013e91c65ed5355e165170157a466d33639d5e38 --- /dev/null +++ b/Documentation/devicetree/bindings/mmc/phytium,mci_v2.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mmc/phytium,mci_v2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Multimedia Card Interface controller + +description: | + The highspeed MMC host V2 controller on Phytium SoCs provides an interface + for MMC, SD and SDIO types of memory cards. + +maintainers: + - Lai Xueyu + +allOf: + - $ref: "mmc-controller.yaml" + +properties: + compatible: + const: phytium,mci_2.0 + + reg: + maxItems: 2 + description: mmc controller regfile base registers. + description: mmc controller share memory base registers. + + interrupts: + maxItems: 1 + description: mmc controller v2 interrupt. + + clocks: + maxItems: 1 + description: phandles to input clocks. + + clock-names: + items: + - const: phytium_mci_clk + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + +examples: + - | + mmc0_v2: mmc@27018000 { + compatible = "phytium,mci_2.0"; + reg = <0x0 0x27018000 0x0 0x1000>, + <0x0 0x26fea000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_1200mhz>; + clock-names = "phytium_mci_clk"; + status = "disabled"; + }; + + &mmc0 { + bus-width = <4>; + max-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + sd-uhs-sdr50; + no-mmc; + status = "ok"; + }; diff --git a/Documentation/devicetree/bindings/mtd/phytium,localbus.yaml b/Documentation/devicetree/bindings/mtd/phytium,localbus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e2bb792965d29423477cb3307ae35f8ea9d609f9 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/phytium,localbus.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/phytium,localbus.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Localbus controller + +maintainers: + - Wang Hanmo + +properties: + compatible: + const: phytium,localbus + + reg: + minitem: 2 + description: first contains the register location and length, + second contains the memory mapping address and length. + + reg-names: + minitem: 2 + description: localbus,configuration registers address space, + lbc_mm, memory mapped address space. + +required: + - compatible + - reg + - reg-names + +examples: + - | + lbc: localbus@28003000 { + compatible = "phytium,localbus"; + #address-cells = <2>; + #size-cells = <1>; + reg = <0x00 0x28003000 0x00 0x1000 0x01 0x10000000 0x0 0x10000000>; + reg-names = "localbus", "lbc_mm"; + status = "disabled"; + norflash@0 { + device-type = <1>; + device-time-seq = <0x0003 0x043043 0x3f1f01 0x3fc40f>; + device-size = <0x10000000>; + bank-width = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/reset/phytium,reset.yaml b/Documentation/devicetree/bindings/reset/phytium,reset.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82171da5f5e9a636d477a611eed6f01c0635be25 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/phytium,reset.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +title: Phytium Reset Controller +====================================== + +This binding describes a reset-controller for Phytium SoCs. + +Required properties: +- compatible: + Usage: required + Value type: + Definition: must be: + "phytium,reset" + +- reg: + Usage: required + Value type: + Definition: must specify the base address and size of the register + space. + +- #reset-cells: + Usage: required + Value type: + Definition: must be 1; cell entry represents the reset index. + +Example: + +reset: reset@2807e000 { + compatible = "phytium,reset"; + reg = <0x0 0x2807e000 0x0 0x10>; + #reset-cells = <1>; +}; + +Specifying reset lines connected to IP modules +============================================== + +Device nodes that need access to reset lines should +specify them as a reset phandle in their corresponding node. + +Example: + +i2c0: i2c@28011000 { + ... + + resets = <&reset 0>; + + ... +}; diff --git a/Documentation/devicetree/bindings/serial/phytium,uart-2.0.yaml b/Documentation/devicetree/bindings/serial/phytium,uart-2.0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df2c8bca6a6778db6a1f2dcba60c899a966b07e3 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/phytium,uart-2.0.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/serial/phytium,uart-v2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium serial UART v2 + +maintainers: + - Hengyu Lan + +allOf: + - $ref: serial.yaml# + +# Need a custom select here or 'arm,primecell' will match on lots of nodes +select: + properties: + compatible: + contains: + enum: + - phytium,uart-2.0 + required: + - compatible + +properties: + compatible: + items: + - const: phytium,uart-2.0 + + reg: + maxItems: 2 + + interrupts: + maxItems: 1 + + clocks: + description: + When present, the first clock listed must correspond to + the clock named UARTCLK on the IP block, i.e. the clock + to the external serial line, whereas the second clock + must correspond to the PCLK clocking the internal logic + of the block. Just listing one clock (the first one) is + deprecated. + maxItems: 2 + + clock-names: + items: + - const: uartclk + - const: apb_pclk + +required: + - compatible + - reg + - interrupts + +unevaluatedProperties: false + +examples: + - | + uart@27011000 { + compatible = "phytium,uart-2.0"; + reg = <0x0 0x27011000 0x0 0x1000>, + <0x0 0x26fe4000 0x0 0x1000>; + interrupts = ; + clocks = <&sysclk_100mhz &sysclk_100mhz>; + clock-names = "uartclk", "apb_pclk"; + status = "disabled"; + }; +... diff --git a/Documentation/devicetree/bindings/sound/phytium,codec-2.0.yaml b/Documentation/devicetree/bindings/sound/phytium,codec-2.0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..06b075b21b4629caeb0d09ea860f1a6506775613 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/phytium,codec-2.0.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/phytium,codec-2.0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Codec V2 Controller + +maintainers: + - Dai Jingtao + - Zhou Zheng + +properties: + compatible: + enum: + - phytium,codec-2.0 + + reg: + items: + - first is for physical base address and length of regfile. + - second is for sharemem to send command. + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +examples: + - | + codec_v2: codec@27010800 { + compatible = "phytium,codec-2.0"; + #sound-dai-cells = <0x0>; + reg = <0x0 0x27010800 0x0 0x800>, + < 0x0 0x26fe3100 0x0 0x100>; + interrupts = ; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/sound/phytium,i2s-2.0.yaml b/Documentation/devicetree/bindings/sound/phytium,i2s-2.0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6dfd5c85d7f82c7f40fbce3c1b2da981c74ccfde --- /dev/null +++ b/Documentation/devicetree/bindings/sound/phytium,i2s-2.0.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/sound/phytium,virt-i2s.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium I2S V2 Controller + +maintainers: + - Dai Jingtao + +properties: + compatible: + enum: + - phytium,i2s-2.0 + + reg: + items: + - first is for physical base address and length of I2S regfile. + - second is for sharemem to send command. + - third is for physical base address and length of DMA_BDL controller. + + interrupts: + maxItems: 2 + + clocks: + items: + - description: Bus Clock + + clock-names: + items: + - const: i2s_clk + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + + +examples: + - | + i2s_v2: i2s@27010000 { + compatible = "phytium,i2s-2.0"; + reg = <0x0 0x27010000 0x0 0x800>, + <0x0 0x26fe3000 0x0 0x100>, + <0x0 0x18000000 0x0 0x1000>; + interrupts = , + ; + clocks = <&sysclk_1200mhz>; + clock-names = "i2s_clk"; + }; diff --git a/Documentation/devicetree/bindings/spi/phytium,spi-v2.yaml b/Documentation/devicetree/bindings/spi/phytium,spi-v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9dc1c19bd7a3ea168f021b690a2b14ec7dd3b2ea --- /dev/null +++ b/Documentation/devicetree/bindings/spi/phytium,spi-v2.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +title: Phytium SPI-V2 controller + +maintainers: + - Peng Min + +allOf: + - $ref: spi-v2-controller.yaml# + +properties: + compatible: + const: phytium,spi-v2 + + reg: + minItems: 1 + description: address and length of the sharememory and regfile + + interrupts: + maxItems: 1 + description: should contain one interrupt + + clocks: + maxItems: 1 + description: spi clock phandle + +required: + - compatible + - "#address-cells" + - "#size-cells" + - reg + - interrupts + - clocks + - num-cs + +examples: + - | + + spi0: spi@2800c000 { + compatible = "phytium,spi-2.0"; + interrupts = ; + reg = <0x0 0x27014000 0x0 0x1000 0x0 0x27007000 0x0 0x1000>; + clocks = <&sysclk_48mhz>; + num-cs = <4>; + }; diff --git a/Documentation/devicetree/bindings/staging/phytium-npu.yaml b/Documentation/devicetree/bindings/staging/phytium-npu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a8aa0fb20da48490b258bf22b5c7254400864862 --- /dev/null +++ b/Documentation/devicetree/bindings/staging/phytium-npu.yaml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/staging/phytium-npu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# +title: Phytium NPU controller +maintainers: + - Cheng Quan +properties: + compatible: + const: phytium,npu + + reg: + items: 2 + - description: NPU registers and PM registers + + interrupts: + maxItems: 1 + + dma-mask: + maxItems: 1 + - description: NPU dma capability + +required: + - compatible + - reg + - interrupts + +examples: + - | + npu@26d00000 { + compatible = "phytium,npu"; + reg = <0x0 0x26d00000 0x0 0x80000 + 0x0 0x26fcc060 0x0 0x20>; + interrupts = <0 74 4>; + dma-mask=<0xff 0xffffffff>; + }; diff --git a/Documentation/devicetree/bindings/usb/phytium,usb2-2.0.yaml b/Documentation/devicetree/bindings/usb/phytium,usb2-2.0.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81ed29c7d347ef573ac3d84e03af0167abc0948a --- /dev/null +++ b/Documentation/devicetree/bindings/usb/phytium,usb2-2.0.yaml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/phytium,usb2-2.0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium USBHS-DRD controller bindings + +maintainers: + - Chen Zhenhua + +properties: + compatible: + const: phytium,usb2-2.0 + + reg: + items: + - description: USB controller registers + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + usb2_0: usb2@26f00000 { + compatible = "phytium,usb2-2.0"; + reg = <0x0 0x26f00000 0x0 0x4000>, + <0x0 0x26f04000 0x0 0x4000>, + <0x0 0x26f08000 0x0 0x18000>; + interrupts = , //host & device interrupt + ; //otg interrupt + }; diff --git a/Documentation/devicetree/bindings/usb/phytium.role-sw.yaml b/Documentation/devicetree/bindings/usb/phytium.role-sw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e42e293dbb1ab65ec7dc728f4e6d81307a5053d2 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/phytium.role-sw.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- + +title:Phytium USB Type-C controller + +maintainers: + - Wu Jinyong + +properties: + compatible: + enum: + - phytium,role-sw + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + connector: + type: object + $ref: ../connector/usb-connector.yaml + unevaluatedProperties: false + + description: + Properties for usb c connector. + + properties: + compatible: + const: usb-c-connector + + power-role: true + + data-role: true + + try-power-role: true + + required: + - compatible + +required: + - compatible + - reg + - connector + +additionalProperties: false + +examples: + - | + #include + i2c { + #address-cells = <1>; + #size-cells = <0>; + + typec@2c { + compatible = "phytium,role-sw"; + reg = <0x2c>; + interrupts = <8 IRQ_TYPE_EDGE_FALLING>; + interrupt-parent = <&gpio0>; + + typec_con: connector { + compatible = "usb-c-connector"; + power-role = "dual"; + data-role = "dual"; + try-power-role = "source"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + typec_con_ep: endpoint { + remote-endpoint = <&usbotg_hs_ep>; + }; + }; + }; + }; + }; + }; +... diff --git a/MAINTAINERS b/MAINTAINERS index f92fd53bf357fd680068cc229d6c6c2a223c690c..3df7cd72785c816f82cc3dd5d900605f648151e0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6151,9 +6151,11 @@ S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/driver-api/dma-buf.rst F: drivers/dma-buf/ +F: drivers/dma-buf/heaps/phytium_npu_heap.c F: include/linux/*fence.h F: include/linux/dma-buf.h F: include/linux/dma-resv.h +F: include/linux/phytium_npu_dma_buf_heap.h K: \bdma_(?:buf|fence|resv)\b DMA GENERIC OFFLOAD ENGINE SUBSYSTEM @@ -17436,6 +17438,7 @@ F: Documentation/devicetree/bindings/gpu/phytium,dc.yaml F: drivers/hwmon/tacho-phytium.c F: Documentation/devicetree/bindings/hwlock/phytium,hwspinlock.yaml F: Documentation/devicetree/bindings/hwmon/phytium,tacho.yaml +F: Documentation/devicetree/bindings/i2c/phytium,i2c-2.0.yaml F: Documentation/devicetree/bindings/i2c/phytium,i2c.yaml F: Documentation/devicetree/bindings/i3c/phytium,i3c-master.yaml F: Documentation/devicetree/bindings/media/phytium,jpeg.yaml @@ -17448,19 +17451,29 @@ F: Documentation/devicetree/bindings/leds/phytnet_led.yaml F: Documentation/devicetree/bindings/mailbox/phytium,mbox.yaml F: Documentation/devicetree/bindings/misc/phytium,snoop-ctrl.yaml F: Documentation/devicetree/bindings/mmc/phytium,mci.yaml +F: Documentation/devicetree/bindings/mmc/phytium,mci_v2.yaml F: Documentation/devicetree/bindings/mmc/phytium,sdci.yaml +F: Documentation/devicetree/bindings/mtd/phytium,localbus.yaml F: Documentation/devicetree/bindings/mtd/phytium,nfc.yaml F: Documentation/devicetree/bindings/net/can/phytium,can.yaml F: Documentation/devicetree/bindings/net/phytmac.yaml F: Documentation/devicetree/bindings/pci/phytium,pd2008-pcie-ep.yaml F: Documentation/devicetree/bindings/pwm/phytium,pwm.yaml +F: Documentation/devicetree/bindings/reset/phytium,reset.yaml F: Documentation/devicetree/bindings/rng/phytium,rng.yaml +F: Documentation/devicetree/bindings/serial/phytium,uart-2.0.yaml +F: Documentation/devicetree/bindings/sound/phytium,codec-2.0.yaml F: Documentation/devicetree/bindings/sound/phytium,hda.yaml +F: Documentation/devicetree/bindings/sound/phytium,i2s-2.0.yaml F: Documentation/devicetree/bindings/sound/phytium,i2s.yaml F: Documentation/devicetree/bindings/spi/phytium,qspi-nor.yaml +F: Documentation/devicetree/bindings/spi/phytium,spi-v2.yaml F: Documentation/devicetree/bindings/spi/phytium,spi.yaml +F: Documentation/devicetree/bindings/usb/phytium,usb2-2.0.yaml F: Documentation/devicetree/bindings/usb/phytium,usb2.yaml +F: Documentation/devicetree/bindings/usb/phytium.role-sw.yaml F: Documentation/devicetree/bindings/w1/phytium,w1.yaml +F: Documentation/devicetree/bindings/staging/phytium-npu.yaml F: arch/arm64/boot/dts/phytium/* F: arch/arm64/include/asm/ras.h F: arch/arm64/kernel/ras.c @@ -17478,6 +17491,7 @@ F: drivers/gpio/gpio-phytium-sgpio.c F: drivers/gpu/drm/phytium/* F: drivers/hwspinlock/phytium_hwspinlock.c F: drivers/i2c/busses/i2c-phytium-* +F: drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-* F: drivers/i3c/master/i3c-master-acpi.c F: drivers/i3c/master/i3c-master-acpi.h F: drivers/i3c/master/i3c-master-phytium.c @@ -17491,7 +17505,10 @@ F: drivers/mfd/phytium_px210_i2s_lsd.c F: drivers/mfd/phytium_px210_i2s_mmd.c F: drivers/misc/phytium-snoop-ctrl.c F: drivers/mmc/host/phytium-mci* +F: drivers/mmc/host/phytium-mci-plat-v2.c +F: drivers/mmc/host/phytium-mci-v2* F: drivers/mmc/host/phytium-sdci.* +F: drivers/mtd/maps/phytium_lbc.c F: drivers/mtd/nand/raw/phytium_nand* F: drivers/mtd/parsers/acpipart_core.c F: drivers/net/can/phytium/* @@ -17501,16 +17518,37 @@ F: drivers/net/ethernet/phytium/* F: drivers/net/ethernet/stmicro/stmmac/dwmac-phytium.c F: drivers/pci/controller/pcie-phytium* F: drivers/perf/phytium/* +F: drivers/perf/phytium/phytium_dmu_pmu_pd2408.c F: drivers/perf/phytium/phytium_pcie_pmu.c F: drivers/pwm/pwm-phytium.c +F: drivers/reset/reset-phytium.c F: drivers/spi/spi-phytium* +F: drivers/spi/spi-phytium-common.c F: drivers/spi/spi-phytium-dma.c +F: drivers/spi/spi-phytium-plat-v2.c F: drivers/spi/spi-phytium-qspi.c +F: drivers/spi/spi-phytium-v2.c +F: drivers/tty/serial/phytium-uart-v2.c F: drivers/tty/serial/phytium-uart.c F: drivers/usb/phytium/* +F: drivers/usb/phytium/phytium_usb_v2* +F: drivers/usb/typec/role-switch-phytium.c F: drivers/w1/masters/phytium_w1.c +F: drivers/staging/phytium-npu/* F: sound/pci/hda/hda_phytium.* +F: sound/soc/codecs/phytium-codec-v2.* F: sound/soc/phytium/* +F: include/linux/pswiotlb.h +F: include/trace/events/pswiotlb.h +F: kernel/dma/Kconfig +F: kernel/dma/Makefile +F: kernel/dma/phytium/Kconfig +F: kernel/dma/phytium/Makefile +F: kernel/dma/phytium/pswiotlb-direct.c +F: kernel/dma/phytium/pswiotlb-dma.h +F: kernel/dma/phytium/pswiotlb-iommu.c +F: kernel/dma/phytium/pswiotlb-mapping.c +F: kernel/dma/phytium/pswiotlb.c QAT DRIVER M: Giovanni Cabiddu diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 48b99b6fc3d5bc8985943bb1b894a3e8802fcb10..e30dc74b6dcdb6153c67ff577ffed9017f352be7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_COPY_MC if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -31,6 +32,7 @@ config ARM64 select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_KEEPINITRD + select ARCH_HAS_MC_EXTABLE if ARCH_HAS_COPY_MC select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE @@ -2372,6 +2374,9 @@ config ARCH_HIBERNATION_HEADER def_bool y depends on HIBERNATION +config ARCH_HAS_MC_EXTABLE + def_bool y + config ARCH_SUSPEND_POSSIBLE def_bool y diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 9040497a8f70c1c09a17e724342d72ab9b1072f3..b3be4336813784d38ae0f56e43623e5fc9dd184d 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -247,6 +247,7 @@ config ARCH_NPCM config ARCH_PHYTIUM bool "Phytium SoC Family" select ARM_GIC_PHYTIUM_2500 + select ARCH_MIGHT_HAVE_PC_SERIO help This enables support for Phytium ARMv8 SoC family, including: - Phytium Server SoC Family diff --git a/arch/arm64/boot/dts/phytium/pe2202.dtsi b/arch/arm64/boot/dts/phytium/pe2202.dtsi index 4e6446da32901c573073749efa08ffc73c67d046..5d42cccc6cb23e5ebb3e8362570315f50d4d3b02 100644 --- a/arch/arm64/boot/dts/phytium/pe2202.dtsi +++ b/arch/arm64/boot/dts/phytium/pe2202.dtsi @@ -233,3 +233,7 @@ macb3: ethernet@32012000 { status = "disabled"; }; }; + +&i2s_dp1 { + dai-name = "phytium-i2s-dp0"; +}; diff --git a/arch/arm64/configs/cobalt.config b/arch/arm64/configs/cobalt.config index eed0b27a3053b080c523daf4bc34e716205c1267..e60c58ee8cfe5f39a6ca3eac24dbbb57b1dc922d 100644 --- a/arch/arm64/configs/cobalt.config +++ b/arch/arm64/configs/cobalt.config @@ -1,4 +1,4 @@ -CONFIG_LOCALVERSION="-dovetail2-phytium-embeded-v3.1" +CONFIG_LOCALVERSION="-dovetail2-phytium-embeded-v3.2" # CONFIG_DRM_ETNAVIV is not set # CONFIG_DRM_RADEON is not set # CONFIG_DRM_AMDGPU is not set diff --git a/arch/arm64/configs/phytium_defconfig b/arch/arm64/configs/phytium_defconfig index 9797228499f2b8dae92611888c23047280eebb36..21c153d16238d58ba9371fa31f66d9c9020d0b5b 100644 --- a/arch/arm64/configs/phytium_defconfig +++ b/arch/arm64/configs/phytium_defconfig @@ -1,4 +1,4 @@ -CONFIG_LOCALVERSION="-phytium-embedded-v3.1" +CONFIG_LOCALVERSION="-phytium-embedded-v3.2" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -194,6 +194,7 @@ CONFIG_VEXPRESS_CONFIG=y CONFIG_MHI_BUS_PCI_GENERIC=m CONFIG_ARM_SCMI_PROTOCOL=y CONFIG_ARM_SCPI_PROTOCOL=y +CONFIG_ARM_SDE_INTERFACE=y CONFIG_EFI_CAPSULE_LOADER=y CONFIG_GNSS=m CONFIG_GNSS_MTK_SERIAL=m @@ -346,6 +347,8 @@ CONFIG_MWIFIEX_PCIE=m CONFIG_MT7921E=m CONFIG_RTW88=m CONFIG_RTW88_8821CS=m +CONFIG_RTW89=m +CONFIG_RTW89_8852BE=m CONFIG_RSI_91X=m CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m @@ -382,6 +385,7 @@ CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_PHYTIUM_PCI=y +CONFIG_SERIAL_PHYTIUM_UART_V2=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_SERIAL_FSL_LPUART=y @@ -408,6 +412,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=m CONFIG_I2C_PHYTIUM_PCI=y CONFIG_I2C_PHYTIUM_PLATFORM=y +CONFIG_I2C_V2_PLATFORM=y CONFIG_I2C_RK3X=y CONFIG_I2C_CROS_EC_TUNNEL=y CONFIG_I3C=y @@ -420,6 +425,7 @@ CONFIG_SPI_DW_MMIO=m CONFIG_SPI_PHYTIUM_PLAT=m CONFIG_SPI_PHYTIUM_PCI=m CONFIG_SPI_PHYTIUM_QSPI=m +CONFIG_SPI_PHYTIUM_PLAT_V2=m CONFIG_SPI_PL022=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y @@ -590,6 +596,7 @@ CONFIG_DRM_PANFROST=m CONFIG_DRM_TIDSS=m CONFIG_DRM_PHYTIUM=m CONFIG_FB=y +CONFIG_FB_EFI=y CONFIG_FB_MODE_HELPERS=y CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m @@ -602,6 +609,8 @@ CONFIG_SND_ALOOP=m CONFIG_SND_HDA_PHYTIUM=y CONFIG_SND_HDA_CODEC_REALTEK=y CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_AUDIO_MIDI_V2=y CONFIG_SND_SOC=y CONFIG_SND_SOC_FSL_ASRC=m CONFIG_SND_SOC_FSL_SAI=m @@ -615,6 +624,8 @@ CONFIG_SND_SOC_IMX_AUDMUX=m CONFIG_SND_SOC_PHYTIUM_I2S=y CONFIG_SND_PMDK_ES8388=y CONFIG_SND_PMDK_ES8336=y +CONFIG_SND_SOC_PHYTIUM_I2S_V2=y +CONFIG_SND_SOC_PHYTIUM_MACHINE_V2=y CONFIG_SND_PMDK_DP=y CONFIG_SND_SOC_ADAU7002=m CONFIG_SND_SOC_AK4613=m @@ -631,6 +642,7 @@ CONFIG_SND_SOC_MAX98927=m CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m CONFIG_SND_SOC_PCM3168A_I2C=m +CONFIG_SND_SOC_PHYTIUM_CODEC_V2=y CONFIG_SND_SOC_RK817=m CONFIG_SND_SOC_RT5640=m CONFIG_SND_SOC_RT5659=m @@ -687,6 +699,8 @@ CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_ISP1760=y CONFIG_USB_PHYTIUM=m +CONFIG_USB_PHYTIUM_PCI_V2=y +CONFIG_USB_PHYTIUM_V2=y CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_CP210X=m CONFIG_USB_SERIAL_FTDI_SIO=m @@ -698,7 +712,7 @@ CONFIG_USB_ULPI=y CONFIG_USB_GADGET=y CONFIG_USB_SNP_UDC_PLAT=y CONFIG_USB_BDC_UDC=y -CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS=y CONFIG_USB_CONFIGFS_SERIAL=y CONFIG_USB_CONFIGFS_ACM=y CONFIG_USB_CONFIGFS_OBEX=y @@ -709,14 +723,16 @@ CONFIG_USB_CONFIGFS_RNDIS=y CONFIG_USB_CONFIGFS_EEM=y CONFIG_USB_CONFIGFS_MASS_STORAGE=y CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_TYPEC=m +CONFIG_TYPEC=y CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_CCG=m +CONFIG_UCSI_ACPI=m CONFIG_UCSI_PMIC_GLINK=m CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_PHYTIUM_RS=y CONFIG_TYPEC_HD3SS3220=m CONFIG_TYPEC_MUX_FSA4480=m CONFIG_TYPEC_MUX_GPIO_SBU=m @@ -787,6 +803,8 @@ CONFIG_PHYTIUM_GDMA=y CONFIG_PL330_DMA=y CONFIG_QCOM_HIDMA_MGMT=y CONFIG_QCOM_HIDMA=y +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_NPU_SYSTEM=y CONFIG_UIO=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m @@ -801,12 +819,16 @@ CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_STAGING=y CONFIG_STAGING_MEDIA=y CONFIG_VIDEO_MAX96712=m +CONFIG_PHYTIUM_NPU=y +CONFIG_NPU_PLATFORM=y CONFIG_CHROME_PLATFORMS=y CONFIG_CROS_EC=y CONFIG_CROS_EC_I2C=y CONFIG_CROS_EC_RPMSG=m CONFIG_CROS_EC_SPI=y CONFIG_CROS_EC_CHARDEV=m +CONFIG_CROS_EC_TYPEC=m +CONFIG_CROS_TYPEC_SWITCH=m CONFIG_CLK_VEXPRESS_OSC=y CONFIG_COMMON_CLK_RK808=y CONFIG_COMMON_CLK_SCMI=y @@ -860,6 +882,7 @@ CONFIG_IIO_CROS_EC_SENSORS=m CONFIG_IIO_ST_LSM6DSX=m CONFIG_IIO_CROS_EC_LIGHT_PROX=m CONFIG_SENSORS_ISL29018=m +CONFIG_STK3310=y CONFIG_VCNL4000=m CONFIG_IIO_ST_MAGN_3AXIS=m CONFIG_IIO_CROS_EC_BARO=m diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 980d1dd8e1a32bfc249af74a8719bc56dfee4111..84654a1797edf5820adeba5e8e46889d161bef2e 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -69,6 +69,21 @@ .endif .endm +/* + * Emit an entry into the machine check exception table + */ +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + .macro _asm_mc_extable, from, to + .pushsection __mc_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection + .endm +#else + .macro _asm_mc_extable, from, to + .endm +#endif + #else /* __ASSEMBLY__ */ #include @@ -82,18 +97,38 @@ ".short (" data ")\n" \ ".popsection\n" +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +#define __ASM_MC_EXTABLE(insn, fixup, type, data) \ + ".pushsection __mc_ex_table, \"a\"\n" \ + ".align 2\n" \ + ".long ((" insn ") - .)\n" \ + ".long ((" fixup ") - .)\n" \ + ".short (" type ")\n" \ + ".short (" data ")\n" \ + ".popsection\n" +#else +#define __ASM_MC_EXTABLE(insn, fixup, type, data) +#endif + #define EX_DATA_REG(reg, gpr) \ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" #define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ __DEFINE_ASM_GPR_NUMS \ __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") \ + __ASM_MC_EXTABLE(#insn, #fixup, \ __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ "(" \ EX_DATA_REG(ERR, err) " | " \ EX_DATA_REG(ZERO, zero) \ ")") + #define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero) \ __DEFINE_ASM_GPR_NUMS \ __ASM_EXTABLE_RAW(#insn, #fixup, \ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 5b6efe8abeeb4462ccfb033bdf71663f3aedfb5d..a9b1740cc2bc117380c6eb22e97003cb5760de7d 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -59,7 +59,12 @@ alternative_else_nop_endif #define USER(l, x...) \ 9999: x; \ - _asm_extable_uaccess 9999b, l + _asm_extable_uaccess 9999b, l; \ + _asm_mc_extable 9999b, l + +#define USER_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l /* * Generate the assembly for LDTR/STTR with exception table entries. @@ -73,6 +78,9 @@ alternative_else_nop_endif _asm_extable_uaccess 8888b, \l; _asm_extable_uaccess 8889b, \l; + + _asm_mc_extable 8888b, \l; + _asm_mc_extable 8889b, \l; .endm .macro user_stp l, reg1, reg2, addr, post_inc @@ -89,5 +97,7 @@ alternative_else_nop_endif add \addr, \addr, \post_inc; _asm_extable_uaccess 8888b, \l; + + _asm_mc_extable 8888b, \l; .endm #endif diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 72b0e71cc3de88bbc3673d19feeb56e924ae2069..fe3104242f7b4efcc4895f1c7389fdcc0a6ae61b 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex, #endif /* !CONFIG_BPF_JIT */ bool fixup_exception(struct pt_regs *regs); +extern int fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index e5bc54522e7112772e294b7cc0e34a01948f741f..035424ca1cff9d1bbc2b64043ad6e8bac78e2db8 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -91,6 +91,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ +extern int sysctl_machine_check_safe; + #ifndef CONFIG_ARM64_FORCE_52BIT #define arch_get_mmap_end(addr, len, flags) \ (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 34e3179075244932422dc708508cd2d097b498fe..c0b2ccdbcbad626c052c04c1acc19151dac69af1 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -25,7 +25,7 @@ .endm .macro strb1 reg, ptr, val - strb \reg, [\ptr], \val + USER_MC(9998f, strb \reg, [\ptr], \val) .endm .macro ldrh1 reg, ptr, val @@ -33,7 +33,7 @@ .endm .macro strh1 reg, ptr, val - strh \reg, [\ptr], \val + USER_MC(9998f, strh \reg, [\ptr], \val) .endm .macro ldr1 reg, ptr, val @@ -41,7 +41,7 @@ .endm .macro str1 reg, ptr, val - str \reg, [\ptr], \val + USER_MC(9998f, str \reg, [\ptr], \val) .endm .macro ldp1 reg1, reg2, ptr, val @@ -49,7 +49,7 @@ .endm .macro stp1 reg1, reg2, ptr, val - stp \reg1, \reg2, [\ptr], \val + USER_MC(9998f, stp \reg1, \reg2, [\ptr], \val) .endm end .req x5 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 80223177260857c97246774530e5779cf07f6c37..9cb4f9dcf56f5f3aa9a11c53604d03ff9ed2bd56 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -20,7 +20,7 @@ * x0 - bytes not copied */ .macro ldrb1 reg, ptr, val - ldrb \reg, [\ptr], \val + USER_MC(9998f, ldrb \reg, [\ptr], \val) .endm .macro strb1 reg, ptr, val @@ -28,7 +28,7 @@ .endm .macro ldrh1 reg, ptr, val - ldrh \reg, [\ptr], \val + USER_MC(9998f, ldrh \reg, [\ptr], \val) .endm .macro strh1 reg, ptr, val @@ -36,7 +36,7 @@ .endm .macro ldr1 reg, ptr, val - ldr \reg, [\ptr], \val + USER_MC(9998f, ldr \reg, [\ptr], \val) .endm .macro str1 reg, ptr, val @@ -44,7 +44,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - ldp \reg1, \reg2, [\ptr], \val + USER_MC(9998f, ldp \reg1, \reg2, [\ptr], \val) .endm .macro stp1 reg1, reg2, ptr, val @@ -64,7 +64,7 @@ SYM_FUNC_START(__arch_copy_to_user) 9997: cmp dst, dstin b.ne 9998f // Before being absolutely sure we couldn't copy anything, try harder - ldrb tmp1w, [srcin] +USER_MC(9998f, ldrb tmp1w, [srcin]) USER(9998f, sttrb tmp1w, [dst]) add dst, dst, #1 9998: sub x0, end, dst // bytes not copied diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 228d681a871594f05acea36f572b31363f8000c6..4c0e23ddf9f064bccef11bb89e169bfa8e391678 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -76,3 +76,15 @@ bool fixup_exception(struct pt_regs *regs) BUG(); } + +int fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_mc_exception_tables(instruction_pointer(regs)); + if (!fixup) + return 0; + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + return 1; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 510f6cb0b7fb3d2d8d5c7a54b696fde7f6f8f6d9..ff2566fea0c756ab9f81597f99d83493fca84b9c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -43,6 +43,8 @@ #include #include +int sysctl_machine_check_safe = 1; + struct fault_info { int (*fn)(unsigned long far, unsigned long esr, struct pt_regs *regs); @@ -740,6 +742,34 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) + return false; + + if (!sysctl_machine_check_safe) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -767,7 +797,8 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); out: mark_trap_exit(ARM64_TRAP_SEA, regs); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 8a0f8604348b877e6cccd08a833638c50fa39ae7..1463dc657a98114a22ea8f6493daba1b6af28bfd 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -32,6 +32,10 @@ #include #include +#ifdef CONFIG_PSWIOTLB +#include +#endif + #include #include #include @@ -498,6 +502,13 @@ void __init mem_init(void) swiotlb_init(swiotlb, SWIOTLB_VERBOSE); +#ifdef CONFIG_PSWIOTLB + /* enable pswiotlb default */ + if ((pswiotlb_force_disable != true) && + is_phytium_ps_socs()) + pswiotlb_init(1, PSWIOTLB_VERBOSE); +#endif + /* this will put all unused low memory onto the freelists */ memblock_free_all(); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index a81bd825087cda44511b73279ccb4edbf2c8a4e4..7767724f92dee9fec5d58a42aca366319ef3a7e0 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -392,6 +392,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n) return n; } +#define copy_mc_to_user copy_mc_to_user #endif extern long __copy_from_user_flushcache(void *dst, const void __user *src, diff --git a/drivers/base/core.c b/drivers/base/core.c index c9fb3243e353eaecafe4890a7f31891be4b3f800..e300539ff4d6ce5c33da32ce0a143bcbd1855b26 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -32,6 +32,10 @@ #include #include /* for dma_default_coherent */ +#ifdef CONFIG_PSWIOTLB +#include +#endif + #include "base.h" #include "physical_location.h" #include "power/power.h" @@ -3134,6 +3138,11 @@ void device_initialize(struct device *dev) dev->dma_coherent = dma_default_coherent; #endif swiotlb_dev_init(dev); +#ifdef CONFIG_PSWIOTLB + if ((pswiotlb_force_disable != true) && + is_phytium_ps_socs()) + pswiotlb_dev_init(dev); +#endif } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/drivers/devfreq/phytium_dmu.c b/drivers/devfreq/phytium_dmu.c index 1fc23b486962b3e6c1fe58891958fdc94881c118..46062b2a533695b0d01649166ff816efe4407b16 100644 --- a/drivers/devfreq/phytium_dmu.c +++ b/drivers/devfreq/phytium_dmu.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #define DEBUG @@ -25,6 +27,20 @@ #define UPDATE_INTERVAL_MS 10 +#define DMU_PMU_STRIDE 0x80000 + +#define AXI_MONITOR2_L 0x084 +#define AXI_MONITOR3_L 0x08c +#define AXI_MONITOR_EN 0X01c +#define TIMER_START 0X000 +#define TIMER_STOP 0X004 +#define CLEAR_EVENT 0X008 + +#define MCU_STRIDE 0x00080000 +/* PMU notifier event */ +#define DDR_PMU_NOTICE_START 0x0 +#define DDR_PMU_NOTICE_STOP 0x1 + #define DMUFREQ_DRIVER_VERSION "1.0.0" struct phytium_dmufreq { @@ -36,14 +52,72 @@ struct phytium_dmufreq { unsigned long rate, target_rate; unsigned long bandwidth; + int max_count; + int cnt; + + void __iomem **base; + + unsigned long *read_bw; + unsigned long *write_bw; struct timer_list sampling; struct work_struct work; + struct notifier_block nb; + + /*dmu to pmu operation status identification 0: not operable, 1: operable*/ + bool pmu_active; + + unsigned long last_bust_time; + unsigned int freq_count; unsigned long freq_table[]; }; +struct acpi_result { + int status; + unsigned long long value; +}; + +static inline void dmu_write32(struct phytium_dmufreq *priv, int dmu, + unsigned long offest, unsigned long value) +{ + writel_relaxed(value, priv->base[dmu] + offest); +} + +static inline unsigned long dmu_read32(struct phytium_dmufreq *priv, int dmu, + unsigned long offest) +{ + return readl_relaxed(priv->base[dmu] + offest); +} + +#if IS_ENABLED(CONFIG_PHYT_DMU_PMU_PD2408) +BLOCKING_NOTIFIER_HEAD(dmu_pmu_notifier_chain); +EXPORT_SYMBOL(dmu_pmu_notifier_chain); + +static int dmu_pmu_notifier_call(struct notifier_block *nb, unsigned long event, void *data) +{ + struct phytium_dmufreq *priv = container_of(nb, struct phytium_dmufreq, nb); + struct device *dev = priv->dev; + + switch (event) { + case DDR_PMU_NOTICE_START: + priv->pmu_active = false; + dev_dbg(dev, "DDR PMU START: Stopping monitoring\n"); + break; + case DDR_PMU_NOTICE_STOP: + priv->cnt = 0; + priv->pmu_active = true; + dev_dbg(dev, "DDR PMU STOP: Resuming monitoring\n"); + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif + static ktime_t stop; static int phytium_dmu_set_freq(struct device *dev, unsigned long freq) @@ -119,24 +193,104 @@ static int phytium_dmu_get_cur_freq(struct device *dev, unsigned long *freq) return 0; } -static int phytium_read_perf_counter(struct device *dev) +struct acpi_result phytium_current_enabled_channels(struct device *dev) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *package, *elements; acpi_handle handle = ACPI_HANDLE(dev); acpi_status status; + unsigned long long enabled_channels; + struct acpi_result result; - status = acpi_evaluate_object(handle, "PDMU", NULL, &buffer); + status = acpi_evaluate_integer(handle, "CHAN", NULL, &enabled_channels); if (ACPI_FAILURE(status)) { - dev_err(dev, "No PDMU method\n"); - return -EIO; + dev_err(dev, "Failed to evaluate CHAN method: ACPI status 0x%x\n", status); + result.status = -EIO; + result.value = 0; + return result; } + dev_dbg(dev, "enabled_channels = %lld\n", enabled_channels); + result.status = 0; + result.value = enabled_channels; + return result; +} - package = buffer.pointer; +struct acpi_result phytium_controller_bit_width(struct device *dev) +{ + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status status; + unsigned long long single_bit_width; + struct acpi_result result; - elements = package->package.elements; + status = acpi_evaluate_integer(handle, "BITW", NULL, &single_bit_width); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to evaluate BITW method: ACPI status 0x%x\n", status); + result.status = -EIO; + result.value = 0; + return result; + } + dev_dbg(dev, "single_bit_width = %lld(MB/s)\n", single_bit_width); + result.status = 0; + result.value = single_bit_width; + return result; +} - return elements[0].integer.value + elements[1].integer.value; +struct acpi_result phytium_dmufreq_state(struct device *dev) +{ + struct acpi_result result; + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status status; + unsigned long long dmufreq_state; + + status = acpi_evaluate_integer(handle, "STAT", NULL, &dmufreq_state); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to evaluate STAT method: ACPI status 0x%x\n", status); + result.status = -EIO; + result.value = 0; + return result; + } + dev_dbg(dev, "dmufreq_state = %lld\n", dmufreq_state); + result.status = 0; + result.value = dmufreq_state; + return result; +} + +struct acpi_result phytium_read_threshold_value(struct device *dev) +{ + acpi_handle handle = ACPI_HANDLE(dev); + acpi_status status; + unsigned long long single_threshold_value; + struct acpi_result result; + + status = acpi_evaluate_integer(handle, "BAND", NULL, &single_threshold_value); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to evaluate BAND method: ACPI status 0x%x\n", status); + result.status = -EIO; + result.value = 0; + return result; + } + dev_dbg(dev, "single_threshold_value = %llu\n", single_threshold_value); + result.status = 0; + result.value = single_threshold_value; + return result; +} + +static u64 phytium_dmufreq_get_real_bw(struct phytium_dmufreq *priv) +{ + unsigned long peak_bw = 0; + unsigned long sum_peak_bw = 0; + + for (int i = 0; i < priv->max_count; i++) { + priv->read_bw[i] = dmu_read32(priv, i, AXI_MONITOR2_L); + priv->write_bw[i] = dmu_read32(priv, i, AXI_MONITOR3_L); + + /*clear the counter(only pmu_reg active)*/ + dmu_write32(priv, i, CLEAR_EVENT, 0x1); + dmu_write32(priv, i, TIMER_START, 0x1); + sum_peak_bw = priv->read_bw[i] + priv->write_bw[i]; + if (sum_peak_bw > peak_bw) + peak_bw = sum_peak_bw; + } + dev_dbg(priv->dev, "peak_bw = %lu\n", peak_bw); + return peak_bw; } static void sampling_timer_callback(struct timer_list *t) @@ -149,18 +303,28 @@ static void sampling_timer_callback(struct timer_list *t) static void sampling_work_handle(struct work_struct *work) { struct phytium_dmufreq *priv = container_of(work, struct phytium_dmufreq, work); - struct device *dev = priv->dev; static unsigned long load_counter; static int count; unsigned long current_load; - current_load = phytium_read_perf_counter(dev); - - load_counter += current_load; - count += 1; - + /*if the pmu_reg is not active, return the last busy time(pmu_reg not work)*/ + if (!priv->pmu_active) { + priv->bandwidth = priv->last_bust_time; + mod_timer(&priv->sampling, jiffies + msecs_to_jiffies(UPDATE_INTERVAL_MS)); + return; + } + if (priv->cnt > 0) { + for (int i = 0; i < priv->max_count ; i++) { + dmu_write32(priv, i, AXI_MONITOR_EN, 0x101); + dmu_write32(priv, i, TIMER_STOP, 0x1); + } + current_load = phytium_dmufreq_get_real_bw(priv); + load_counter += current_load; + count += 1; + } + priv->cnt = 1; if (ktime_after(ktime_get(), stop)) { - priv->bandwidth = (load_counter / count) / 2; + priv->bandwidth = div64_u64(load_counter, count); load_counter = 0; count = 0; stop = ktime_add_ms(ktime_get(), priv->profile.polling_ms); @@ -173,14 +337,24 @@ static int phytium_dmu_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct phytium_dmufreq *priv = dev_get_drvdata(dev); + struct acpi_result result; + unsigned long long single_threshold_value; + + result = phytium_read_threshold_value(dev); + if (result.status) { + dev_err(dev, "Failed to get threshold value\n"); + return -EINVAL; + } + single_threshold_value = result.value; + single_threshold_value = (single_threshold_value * 1024 * 1024) / 100; stat->busy_time = priv->bandwidth; - stat->total_time = (75000000 * priv->rate) / priv->freq_table[0]; - dev_dbg(dev, "busy_time = %lu, total_time = %lu\n", - stat->busy_time, stat->total_time); + stat->total_time = (single_threshold_value * priv->rate) / priv->freq_table[0]; + priv->last_bust_time = priv->bandwidth; + dev_dbg(dev, "busy_time = %lu, total_time = %lu,single_threshold_value = %llu\n", + stat->busy_time, stat->total_time, single_threshold_value); stat->current_frequency = priv->rate; - return 0; } @@ -260,32 +434,117 @@ static int get_freq_count(struct device *dev) return freq_count; } +static __maybe_unused int phytium_dmufreq_suspend(struct device *dev) +{ + struct phytium_dmufreq *priv = dev_get_drvdata(dev); + int ret = 0; + + dev_dbg(dev, "DMU is being suspended\n"); + + del_timer_sync(&priv->sampling); + flush_work(&priv->work); + + ret = devfreq_suspend_device(priv->devfreq); + if (ret < 0) { + dev_err(dev, "failed to suspend the devfreq devices\n"); + return ret; + } + + return 0; +} + +static __maybe_unused int phytium_dmufreq_resume(struct device *dev) +{ + struct phytium_dmufreq *priv = dev_get_drvdata(dev); + int ret = 0; + + dev_dbg(dev, "DMU is being resumed\n"); + + ret = devfreq_resume_device(priv->devfreq); + if (ret < 0) { + dev_err(dev, "failed to resume the devfreq devices\n"); + return ret; + } + + if (!timer_pending(&priv->sampling)) + mod_timer(&priv->sampling, jiffies + msecs_to_jiffies(UPDATE_INTERVAL_MS)); + else + dev_warn(dev, "Sampling timer already active ,skipping reinitialization\n"); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(phytium_dmufreq_pm, phytium_dmufreq_suspend, + phytium_dmufreq_resume); + static int phytium_dmufreq_probe(struct platform_device *pdev) { struct phytium_dmufreq *priv; struct device *dev = &pdev->dev; - const char *gov = DEVFREQ_GOV_SIMPLE_ONDEMAND; + const char *gov = DEVFREQ_GOV_PERFORMANCE; int i, ret; unsigned int max_state = get_freq_count(dev); + struct acpi_result result; + struct resource *res; + + result = phytium_dmufreq_state(dev); + if (result.value == 0) { + dev_err(dev, "DMUFREQ is not enabled\n"); + return -ENODEV; + } + + priv = devm_kzalloc(dev, struct_size(priv, freq_table, max_state), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + result = phytium_current_enabled_channels(dev); + if (result.status) { + dev_err(dev, "Failed to get enabled channels\n"); + return -EINVAL; + } + + priv->max_count = result.value; if (max_state <= 0) return max_state; dev->init_name = "dmufreq"; - priv = kzalloc(sizeof(struct phytium_dmufreq) + - max_state * sizeof(unsigned long), GFP_KERNEL); - if (!priv) + priv->base = devm_kcalloc(dev, priv->max_count, sizeof(void __iomem *), GFP_KERNEL); + priv->read_bw = devm_kcalloc(dev, priv->max_count, sizeof(unsigned long), GFP_KERNEL); + priv->write_bw = devm_kcalloc(dev, priv->max_count, sizeof(unsigned long), GFP_KERNEL); + if (!priv->base || !priv->read_bw || !priv->write_bw) { + dev_err(dev, "failed to allocate memory\n"); return -ENOMEM; - + } platform_set_drvdata(pdev, priv); +#if IS_ENABLED(CONFIG_PHYT_DMU_PMU_PD2408) + /* Register the notifier */ + priv->nb.notifier_call = dmu_pmu_notifier_call; + ret = blocking_notifier_chain_register(&dmu_pmu_notifier_chain, &priv->nb); + if (ret) { + dev_err(dev, "Failed to register notifier\n"); + return ret; + } +#endif + + /* Get the base address of the DMU PMU */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + for (int i = 0; i < priv->max_count; i++) { + priv->base[i] = ioremap(res->start + i * DMU_PMU_STRIDE, resource_size(res)); + if (!priv->base[i]) + return -ENOMEM; + } + ret = phytium_dmu_get_freq_info(dev); if (ret) { dev_err(dev, "failed to get ddr frequency info\n"); return -EIO; } + priv->pmu_active = true; + priv->cnt = 1; priv->profile.initial_freq = priv->freq_table[0]; priv->profile.polling_ms = 100; priv->profile.timer = DEVFREQ_TIMER_DELAYED; @@ -307,13 +566,22 @@ static int phytium_dmufreq_probe(struct platform_device *pdev) } priv->devfreq = devm_devfreq_add_device(dev, &priv->profile, - gov, &priv->ondemand_data); + gov, NULL); if (IS_ERR(priv->devfreq)) { ret = PTR_ERR(priv->devfreq); dev_err(dev, "failed to add devfreq device: %d\n", ret); goto err; } + /*Enable PMU*/ + if (priv->pmu_active) { + for (int i = 0; i < priv->max_count; i++) { + dmu_write32(priv, i, AXI_MONITOR_EN, 0x101); + dmu_write32(priv, i, CLEAR_EVENT, 0x1); + dmu_write32(priv, i, TIMER_START, 0x1); + } + } + INIT_WORK(&priv->work, sampling_work_handle); timer_setup(&priv->sampling, sampling_timer_callback, 0); stop = ktime_add_ms(ktime_get(), priv->profile.polling_ms); @@ -334,10 +602,21 @@ static int phytium_dmufreq_remove(struct platform_device *pdev) struct phytium_dmufreq *priv = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + for (int i = 0; i < priv->max_count; i++) { + dmu_write32(priv, i, AXI_MONITOR_EN, 0x0); + dmu_write32(priv, i, TIMER_STOP, 0x1); + } + +#if IS_ENABLED(CONFIG_PHYT_DMU_PMU_PD2408) + /*Unregister the notifier*/ + blocking_notifier_chain_unregister(&dmu_pmu_notifier_chain, &priv->nb); +#endif + if (!priv->devfreq) return 0; flush_work(&priv->work); del_timer_sync(&priv->sampling); + dev_pm_opp_remove_all_dynamic(dev); kfree(priv); @@ -356,11 +635,18 @@ MODULE_DEVICE_TABLE(acpi, phytium_dmufreq_acpi_ids); #define phytium_dmu_acpi_ids NULL #endif +#if IS_ENABLED(CONFIG_PHYT_DMU_PMU_PD2408) +struct notifier_block nb = { + .notifier_call = dmu_pmu_notifier_call, +}; +#endif + static struct platform_driver phytium_dmufreq_driver = { .probe = phytium_dmufreq_probe, .remove = phytium_dmufreq_remove, .driver = { .name = "phytium_dmufreq", + .pm = &phytium_dmufreq_pm, .acpi_match_table = ACPI_PTR(phytium_dmufreq_acpi_ids), .suppress_bind_attrs = true, }, diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c422644e8aadaf5aff2bd9a33c49c1ba3..ad325287de807341b9c7a40ae4cbcd384ef13718 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,11 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_NPU_SYSTEM + bool "DMA-BUF for Phytium NPU system Heap" + depends on DMABUF_HEAPS && ARCH_PHYTIUM + help + Choose this option to enable a new dma-buf system heap for the + Phytium's NPU. The npu system heap is backed by pages from the buddy + allocator. If your system has NPU driver, you should say Y here. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 974467791032ffb8a7aba17b1407d9a19b3f3b44..3c31cbd56809e891e38a801a2a8b0b8f61f0808a 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o +obj-$(CONFIG_DMABUF_HEAPS_NPU_SYSTEM) += phytium_npu_heap.o diff --git a/drivers/dma-buf/heaps/phytium_npu_heap.c b/drivers/dma-buf/heaps/phytium_npu_heap.c new file mode 100644 index 0000000000000000000000000000000000000000..6e4b8c410d1bc1857d54f4068b9b407d3e7598c3 --- /dev/null +++ b/drivers/dma-buf/heaps/phytium_npu_heap.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL +/* DMABUF NPU system heap exporter + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/phytium_npu_dma_buf_heap.h" + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *sgtable; + struct list_head attach_list; + bool mapped; +}; + +static struct dma_heap *npu_unified_heap; + +static gfp_t gfp_type = GFP_KERNEL | __GFP_COMP | __GFP_NOWARN | ___GFP_ZERO; +/* + * The orders used for allocation (1MB, 64K, 4K) + */ +static const unsigned int orders[] = {0, 4, 8};//use 4k for test + +static struct sg_table *duplicate_sg_table(struct sg_table *table) +{ + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; + + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; +} + +static int npu_system_heap_attach(struct dma_buf *pdmabuf, + struct dma_buf_attachment *attachment) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct sg_table *sgtable; + + struct dma_heap_attachment *attach = kzalloc(sizeof(struct dma_heap_attachment), + GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgtable = duplicate_sg_table(&buffer->sgt); + if (IS_ERR(sgtable)) { + kfree(attach); + return -ENOMEM; + } + + INIT_LIST_HEAD(&attach->attach_list); + attach->sgtable = sgtable; + attach->dev = attachment->dev; + + attachment->priv = attach; + attach->mapped = false; + mutex_lock(&buffer->lock); + list_add(&attach->attach_list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void npu_system_heap_detach(struct dma_buf *pdmabuf, + struct dma_buf_attachment *attachment) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct dma_heap_attachment *attach = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&attach->attach_list); + mutex_unlock(&buffer->lock); + + sg_free_table(attach->sgtable); + kfree(attach->sgtable); + kfree(attach); +} + +static struct sg_table *npu_system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *attach = attachment->priv; + struct sg_table *sgtable = attach->sgtable; + int ret; + + ret = dma_map_sgtable(attachment->dev, sgtable, direction, 0); + if (ret) + return ERR_PTR(ret); + + attach->mapped = true; + return sgtable; +} + +static void npu_system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int npu_system_heap_dma_buf_begin_cpu_access(struct dma_buf *pdmabuf, + enum dma_data_direction direction) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct dma_heap_attachment *attach; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->req_len); + + list_for_each_entry(attach, &buffer->attachments, attach_list) { + if (!attach->mapped) + continue; + dma_sync_sgtable_for_cpu(attach->dev, attach->sgtable, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int npu_system_heap_dma_buf_end_cpu_access(struct dma_buf *pdmabuf, + enum dma_data_direction direction) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct dma_heap_attachment *attach; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->req_len); + + list_for_each_entry(attach, &buffer->attachments, attach_list) { + if (!attach->mapped) + continue; + dma_sync_sgtable_for_device(attach->dev, attach->sgtable, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int npu_system_heap_mmap(struct dma_buf *pdmabuf, struct vm_area_struct *vma) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct sg_table *sgtable = &buffer->sgt; + unsigned long new_addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + for_each_sgtable_page(sgtable, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, new_addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + new_addr += PAGE_SIZE; + if (new_addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *npu_system_heap_do_vmap(struct npu_unified_heap_buffer *buffer) +{ + struct sg_table *sgtable = &buffer->sgt; + int npages = PAGE_ALIGN(buffer->req_len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + for_each_sgtable_page(sgtable, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int npu_system_heap_vmap(struct dma_buf *pdmabuf, struct iosys_map *map) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); + goto unlock; + } + + vaddr = npu_system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto unlock; + } + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + iosys_map_set_vaddr(map, buffer->vaddr); +unlock: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void npu_system_heap_vunmap(struct dma_buf *pdmabuf, struct iosys_map *map) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + iosys_map_clear(map); +} + +static void npu_system_heap_dma_buf_release(struct dma_buf *pdmabuf) +{ + struct npu_unified_heap_buffer *buffer = pdmabuf->priv; + struct sg_table *table; + struct scatterlist *sg; + int i; + + table = &buffer->sgt; + for_each_sgtable_sg(table, sg, i) { + struct page *page = sg_page(sg); + + __free_pages(page, compound_order(page)); + } + sg_free_table(table); + kfree(buffer); +} + +static const struct dma_buf_ops npu_system_heap_buf_ops = { + .attach = npu_system_heap_attach, + .detach = npu_system_heap_detach, + .map_dma_buf = npu_system_heap_map_dma_buf, + .unmap_dma_buf = npu_system_heap_unmap_dma_buf, + .begin_cpu_access = npu_system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = npu_system_heap_dma_buf_end_cpu_access, + .mmap = npu_system_heap_mmap, + .vmap = npu_system_heap_vmap, + .vunmap = npu_system_heap_vunmap, + .release = npu_system_heap_dma_buf_release, +}; + +static struct dma_buf *npu_system_heap_alloc(struct dma_heap *heap, + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) +{ + struct npu_unified_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct dma_buf *pdmabuf; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; + unsigned int order = orders[0]; + unsigned long size_remaining = len; + + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->req_len = len; + + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { + /* + * Avoid trying to allocate memory if the process + * has been killed by SIGKILL + */ + if (fatal_signal_pending(current)) { + ret = -EINTR; + goto free_buffer; + } + + page = alloc_pages(gfp_type, order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + order = compound_order(page); + i++; + } + + table = &buffer->sgt; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; + + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); + } + + /* create the pdmabuf */ + exp_info.exp_name = dma_heap_get_name(heap); + exp_info.ops = &npu_system_heap_buf_ops; + exp_info.size = buffer->req_len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + pdmabuf = dma_buf_export(&exp_info); + if (IS_ERR(pdmabuf)) { + ret = PTR_ERR(pdmabuf); + goto free_pages; + } + return pdmabuf; + +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); + } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); + + return ERR_PTR(ret); +} + +static const struct dma_heap_ops npu_system_heap_ops = { + .allocate = npu_system_heap_alloc, +}; + +static int npu_system_heap_create(void) +{ + struct dma_heap_export_info exp_info; + + exp_info.name = "npu_heap"; + exp_info.ops = &npu_system_heap_ops; + exp_info.priv = NULL; + + npu_unified_heap = dma_heap_add(&exp_info); + if (IS_ERR(npu_unified_heap)) + return PTR_ERR(npu_unified_heap); + + return 0; +} +module_init(npu_system_heap_create); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium NPU DMA BUF heap driver"); diff --git a/drivers/dma/phytium/phytium-gdmac.c b/drivers/dma/phytium/phytium-gdmac.c index b705aab24452d8af53ccb8feaa8e03787bfe01bf..e7077884fddb1b9d9401c2ee42e79f6d86b919e5 100644 --- a/drivers/dma/phytium/phytium-gdmac.c +++ b/drivers/dma/phytium/phytium-gdmac.c @@ -5,6 +5,8 @@ * Copyright (c) 2023-2024 Phytium Technology Co., Ltd. */ +#include +#include #include #include #include @@ -29,7 +31,7 @@ #include #include "phytium-gdmac.h" -#define PHYTIUM_GDMA_DRIVER_VERSION "1.0.3" +#define PHYTIUM_GDMA_DRIVER_VERSION "1.0.4" static inline struct phytium_gdma_device *to_gdma_device(struct dma_chan *chan) { @@ -782,6 +784,32 @@ static struct dma_chan *phytium_gdma_of_xlate(struct of_phandle_args *dma_spec, return c; } +static struct dma_chan *phytium_gdma_acpi_xlate(struct acpi_dma_spec *dma_spec, + struct acpi_dma *acpidma) +{ + struct phytium_gdma_device *gdma = (struct phytium_gdma_device *)acpidma->data; + struct device *dev = gdma->dev; + struct phytium_gdma_chan *chan = NULL; + struct dma_chan *c = NULL; + u32 channel_id = 0; + + channel_id = dma_spec->chan_id; + + if (channel_id >= gdma->dma_channels) { + dev_err(dev, "bad channel %d\n", channel_id); + return NULL; + } + + chan = &gdma->chan[channel_id]; + c = &chan->vchan.chan; + if (!c) { + dev_err(dev, "no more channels available\n"); + return NULL; + } + + return c; +} + static int phytium_gdma_probe(struct platform_device *pdev) { struct phytium_gdma_device *gdma; @@ -820,8 +848,8 @@ static int phytium_gdma_probe(struct platform_device *pdev) goto out; } - ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", - &nr_channels); + ret = device_property_read_u32(&pdev->dev, "dma-channels", + &nr_channels); if (ret < 0) { dev_err(&pdev->dev, "can't get the number of dma channels: %d\n", ret); @@ -829,8 +857,8 @@ static int phytium_gdma_probe(struct platform_device *pdev) } gdma->dma_channels = nr_channels; - ret = of_property_read_u32(pdev->dev.of_node, "max-outstanding", - &max_outstanding); + ret = device_property_read_u32(&pdev->dev, "max-outstanding", + &max_outstanding); if (ret < 0) { dev_err(&pdev->dev, "can't get max outstanding %d\n", ret); goto out; @@ -911,8 +939,11 @@ static int phytium_gdma_probe(struct platform_device *pdev) if (ret) goto out; - ret = of_dma_controller_register(pdev->dev.of_node, - phytium_gdma_of_xlate, gdma); + if (has_acpi_companion(&pdev->dev)) + ret = acpi_dma_controller_register(&pdev->dev, phytium_gdma_acpi_xlate, gdma); + else + ret = of_dma_controller_register(pdev->dev.of_node, + phytium_gdma_of_xlate, gdma); if (ret < 0) { dev_err(&pdev->dev, "phytium gdma of register failed %d\n", ret); @@ -993,12 +1024,19 @@ static const struct of_device_id phytium_dma_of_id_table[] = { }; MODULE_DEVICE_TABLE(of, phytium_dma_of_id_table); +static const struct acpi_device_id phytium_gdma_acpi_match[] = { + { "PHYT0026", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_gdma_acpi_match); + static struct platform_driver phytium_gdma_driver = { .probe = phytium_gdma_probe, .remove = phytium_gdma_remove, .driver = { .name = "phytium-gdma", .of_match_table = of_match_ptr(phytium_dma_of_id_table), + .acpi_match_table = ACPI_PTR(phytium_gdma_acpi_match), .pm = &phytium_gdma_pm_ops, }, }; diff --git a/drivers/edac/phytium_edac.c b/drivers/edac/phytium_edac.c index 49230b89c01ff6003ae94d0dcfb386bdf86ec8f6..00a5134f02d60c667edc7208eaf015b25da3531c 100644 --- a/drivers/edac/phytium_edac.c +++ b/drivers/edac/phytium_edac.c @@ -103,9 +103,9 @@ static const struct ras_error_info pe220x_ras_soc_error[] = { { 37, UNCORRECTED_ERROR, "nINTERRIRQ_clust1" }, { 38, UNCORRECTED_ERROR, "nEXTERRIRQ_clust2" }, { 39, UNCORRECTED_ERROR, "nINTERRIRQ_clust2" }, - { 40, UNCORRECTED_ERROR, "ams_ame0_ras_err" }, - { 41, UNCORRECTED_ERROR, "ams_ame1_ras_err" }, - { 42, UNCORRECTED_ERROR, "ams_amer_ras_err" }, + { 40, UNCORRECTED_ERROR, "ras_err_amu0" }, + { 41, UNCORRECTED_ERROR, "ras_err_amu1" }, + { 42, UNCORRECTED_ERROR, "ras_err_ame0" }, { 43, UNCORRECTED_ERROR, "ras_err_ame1" }, }; diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c index 1bda08c8567f4c798927202be3bd14f09442197b..33928c4af8eb5ebea3821a5d8ce16ba237ded7c8 100644 --- a/drivers/gpio/gpio-phytium-core.c +++ b/drivers/gpio/gpio-phytium-core.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "gpio-phytium-core.h" @@ -378,12 +379,47 @@ int phytium_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) } EXPORT_SYMBOL_GPL(phytium_gpio_get_direction); +int phytium_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct phytium_gpio *gpio = gpiochip_get_data(gc); + struct phytium_gpio_ctx *ctx = &gpio->ctx; + irq_hw_number_t bit = irqd_to_hwirq(d); + int ret; + + if (gpio->irq[bit]) + ret = irq_set_irq_wake(gpio->irq[bit], enable); + else + ret = irq_set_irq_wake(gpio->irq[0], enable); + + if (ret < 0) + dev_err(gc->parent, "set gpio irq wake failed!\n"); + + if (enable) { + ctx->wake_en |= BIT(bit); + if (gpio->is_resuming == 1) { + writel(~ctx->wake_en, gpio->regs + GPIO_INTMASK); + writel(ctx->wake_en, gpio->regs + GPIO_INTEN); + } + } else + ctx->wake_en &= ~BIT(bit); + + return 0; +} +EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_wake); + int phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { int hwirq = irqd_to_hwirq(d); struct gpio_chip *chip_data = irq_data_get_irq_chip_data(d); - struct irq_chip *chip = irq_get_chip(chip_data->irq.parents[hwirq]); - struct irq_data *data = irq_get_irq_data(chip_data->irq.parents[hwirq]); + struct irq_chip *chip; + struct irq_data *data; + + if ((chip_data->irq.num_parents) == 1) + hwirq = 0; + + chip = irq_get_chip(chip_data->irq.parents[hwirq]); + data = irq_get_irq_data(chip_data->irq.parents[hwirq]); if (chip && chip->irq_set_affinity) return chip->irq_set_affinity(data, mask_val, force); diff --git a/drivers/gpio/gpio-phytium-core.h b/drivers/gpio/gpio-phytium-core.h index 381fc841de70a1213984f89553df552dbc293bae..739ddc18a0c779e338fbd330276a34d73dc8adcf 100644 --- a/drivers/gpio/gpio-phytium-core.h +++ b/drivers/gpio/gpio-phytium-core.h @@ -33,6 +33,8 @@ #define NGPIO_MAX 32 #define GPIO_PORT_STRIDE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) +#define PHYTIUM_GPIO_DRIVER_VERSION "1.1.1" + struct pin_loc { unsigned int port; unsigned int offset; @@ -54,6 +56,7 @@ struct phytium_gpio_ctx { u32 raw_intstatus; u32 ls_sync; u32 debounce; + u32 wake_en; }; #endif @@ -84,5 +87,6 @@ void phytium_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p); void phytium_gpio_irq_enable(struct irq_data *d); void phytium_gpio_irq_disable(struct irq_data *d); void phytium_gpio_irq_handler(struct irq_desc *desc); +int phytium_gpio_irq_set_wake(struct irq_data *d, unsigned int enable); int phytium_gpio_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force); #endif diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c index 3e4c1edd97cb04d620b317ccb7bcdba735d8f325..be4670936b104abc802b9266c52b6d35050f097c 100644 --- a/drivers/gpio/gpio-phytium-pci.c +++ b/drivers/gpio/gpio-phytium-pci.c @@ -21,6 +21,8 @@ static const struct irq_chip phytium_gpio_irq_chip = { .irq_print_chip = phytium_gpio_irq_print_chip, .irq_enable = phytium_gpio_irq_enable, .irq_disable = phytium_gpio_irq_disable, + .irq_set_affinity = phytium_gpio_irq_set_affinity, + .irq_set_wake = phytium_gpio_irq_set_wake, .flags = IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; @@ -71,6 +73,9 @@ static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_ /* irq_chip support */ raw_spin_lock_init(&gpio->lock); + writel(0, gpio->regs + GPIO_INTEN); + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + gpio->gc.base = -1; gpio->gc.get_direction = phytium_gpio_get_direction; gpio->gc.direction_input = phytium_gpio_direction_input; @@ -139,7 +144,8 @@ static int phytium_gpio_pci_suspend(struct device *dev) gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); - writel(0, gpio->regs + GPIO_INTEN); + writel(~gpio->ctx.wake_en, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.wake_en, gpio->regs + GPIO_INTEN); raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; @@ -194,3 +200,4 @@ module_pci_driver(phytium_gpio_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan "); MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); +MODULE_VERSION(PHYTIUM_GPIO_DRIVER_VERSION); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c index 45e9504b366988157b794660bdf5b92bf9b5cd8c..364136fb1d9b277f72fa7684ba2c9d801226e925 100644 --- a/drivers/gpio/gpio-phytium-platform.c +++ b/drivers/gpio/gpio-phytium-platform.c @@ -41,6 +41,7 @@ static const struct irq_chip phytium_gpio_irq_chip = { .irq_print_chip = phytium_gpio_irq_print_chip, .irq_enable = phytium_gpio_irq_enable, .irq_disable = phytium_gpio_irq_disable, + .irq_set_wake = phytium_gpio_irq_set_wake, .irq_set_affinity = phytium_gpio_irq_set_affinity, .flags = IRQCHIP_IMMUTABLE | IRQCHIP_PIPELINE_SAFE, GPIOCHIP_IRQ_RESOURCE_HELPERS, @@ -91,6 +92,9 @@ static int phytium_gpio_probe(struct platform_device *pdev) /* irq_chip support */ raw_spin_lock_init(&gpio->lock); + writel(0, gpio->regs + GPIO_INTEN); + writel(0xffffffff, gpio->regs + GPIO_PORTA_EOI); + gpio->gc.base = -1; gpio->gc.get_direction = phytium_gpio_get_direction; gpio->gc.direction_input = phytium_gpio_direction_input; @@ -156,7 +160,8 @@ static int phytium_gpio_suspend(struct device *dev) gpio->ctx.int_polarity = readl(gpio->regs + GPIO_INT_POLARITY); gpio->ctx.debounce = readl(gpio->regs + GPIO_DEBOUNCE); - writel(0, gpio->regs + GPIO_INTEN); + writel(~gpio->ctx.wake_en, gpio->regs + GPIO_INTMASK); + writel(gpio->ctx.wake_en, gpio->regs + GPIO_INTEN); raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; @@ -211,3 +216,4 @@ module_platform_driver(phytium_gpio_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium GPIO driver"); +MODULE_VERSION(PHYTIUM_GPIO_DRIVER_VERSION); diff --git a/drivers/gpio/gpio-phytium-sgpio.c b/drivers/gpio/gpio-phytium-sgpio.c index c661a86ae2ab319c572fd690ce579bd1bfe7e05b..cbdc1f21bdcb16f90080dd85c83578b46dd7ed73 100644 --- a/drivers/gpio/gpio-phytium-sgpio.c +++ b/drivers/gpio/gpio-phytium-sgpio.c @@ -49,6 +49,8 @@ #define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) +#define GPIO_SGPIO_DRIVER_VERSION "1.1.0" + struct phytium_sgpio { struct gpio_chip gc; void __iomem *regs; @@ -317,3 +319,4 @@ module_platform_driver(phytium_sgpio_driver); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium SGPIO driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(GPIO_SGPIO_DRIVER_VERSION); diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c index b0c1f49806815654bb2e9ca3f540ada07e252781..68094a2a7c15c0ed13cd9d170d88f7b9ad3a016d 100644 --- a/drivers/gpu/drm/phytium/phytium_crtc.c +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -599,6 +599,12 @@ phytium_crtc_atomic_enable(struct drm_crtc *crtc, else phytium_crtc_gamma_init(crtc); + /* enable dither*/ + DRM_DEBUG_KMS("Enable dither on DC-%d\n", phys_pipe); + phytium_writel_reg(priv, DITHER_TABLE_LOW, group_offset, DC_DITHER_TABLE_LOW); + phytium_writel_reg(priv, DITHER_TABLE_HIGH, group_offset, DC_DITHER_TABLE_HIGH); + phytium_writel_reg(priv, ENABLE, group_offset, DC_DITHER_CONFIG); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); drm_crtc_vblank_on(crtc); } diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c index 78eac4fde18388504951f292ce28d9fdd316a3ad..da3757541d7bb3d5d8a734207b710c9a9faf4fb0 100644 --- a/drivers/gpu/drm/phytium/phytium_dp.c +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -2413,8 +2413,7 @@ static const struct hdmi_codec_ops phytium_audio_codec_ops = { .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, }; -static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp, - const int port) +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) { struct device *dev = phytium_dp->dev->dev; struct hdmi_codec_pdata codec_data = { @@ -2426,8 +2425,10 @@ static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp, }; phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, - codec_id + port, + codec_id, &codec_data, sizeof(codec_data)); + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + codec_id += 1; return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); } @@ -2438,6 +2439,7 @@ static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp) if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) platform_device_unregister(phytium_dp->audio_pdev); phytium_dp->audio_pdev = NULL; + codec_id -= 1; } static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) @@ -2630,7 +2632,7 @@ int phytium_dp_init(struct drm_device *dev, int port) drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); - ret = phytium_dp_audio_codec_init(phytium_dp, port); + ret = phytium_dp_audio_codec_init(phytium_dp); if (ret) { pr_err("failed to initialize audio codec\n"); goto failed_connector_init; diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c index 69dedbb75a853a420d604a36382594ab83ebbf7d..c0810d9ab5c98215dd260a4d23791e6de73e5f77 100644 --- a/drivers/gpu/drm/phytium/phytium_platform.c +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -5,6 +5,7 @@ * Copyright (c) 2021-2024 Phytium Technology Co., Ltd. */ +#include "drm/drm_aperture.h" #include #include #include @@ -266,6 +267,8 @@ static int phytium_platform_probe(struct platform_device *pdev) } } + drm_aperture_remove_framebuffers(&phytium_display_drm_driver); + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); if (IS_ERR(dev)) { DRM_ERROR("failed to allocate drm_device\n"); diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h index f5d4a6945c35b6313156955c56ef4367f86da5dc..14a481090679b7535430505a1cceaeae9d49305e 100644 --- a/drivers/gpu/drm/phytium/phytium_reg.h +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -29,6 +29,12 @@ #define PANEL_DATAENABLE_ENABLE (1<<0) #define PANEL_DATA_ENABLE (1<<4) #define PANEL_CLOCK_ENABLE (1<<8) +#define DC_DITHER_CONFIG 0X1410 + #define ENABLE 0x80000000 +#define DC_DITHER_TABLE_LOW 0x1420 + #define DITHER_TABLE_LOW 0x7B48F3C0 +#define DC_DITHER_TABLE_HIGH 0X1428 + #define DITHER_TABLE_HIGH 0x596AD1E2 #define PHYTIUM_DC_HDISPLAY 0x1430 #define HDISPLAY_END_SHIFT 0 #define HDISPLAY_END_MASK 0x7fff diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index ba10a5fd87d95472a38c140f27c9f2b6647d23a4..93c6d598436cf2d564a90fb16bc71fee06d7aaf0 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -966,6 +966,8 @@ config I2C_PHYTIUM_PLATFORM This driver can also be built as a module. If so, the module will be called i2c-phytium-platform. +source "drivers/i2c/busses/phytium_i2c_v2/Kconfig" + config I2C_PNX tristate "I2C bus support for Philips PNX and NXP LPC targets" depends on ARCH_LPC32XX || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 36e48488f7b78c3d7b1ef75b51bf6612db2e7278..36ac680cc7765f8ee31be88ad68ed10ceab192e0 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -159,5 +159,6 @@ obj-$(CONFIG_I2C_XGENE_SLIMPRO) += i2c-xgene-slimpro.o obj-$(CONFIG_SCx200_ACB) += scx200_acb.o obj-$(CONFIG_I2C_FSI) += i2c-fsi.o obj-$(CONFIG_I2C_VIRTIO) += i2c-virtio.o +obj-$(CONFIG_I2C_V2_PLATFORM) += phytium_i2c_v2/ ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c index b008e6a10bb7c2f49d1d1e6b1f2c733b42f16d7d..809aba7974640d24e3b852126cc99a39104874df 100644 --- a/drivers/i2c/busses/i2c-phytium-common.c +++ b/drivers/i2c/busses/i2c-phytium-common.c @@ -117,7 +117,7 @@ EXPORT_SYMBOL_GPL(i2c_phytium_prepare_clk); int i2c_phytium_check_bus_not_busy(struct phytium_i2c_dev *dev) { - if (dev->slave_state != SLAVE_STATE_IDLE) + if (dev->slave_status != SLAVE_STATE_IDLE) return -EAGAIN; if (phytium_readl(dev, IC_STATUS) & IC_STATUS_ACTIVITY) return -ETIMEDOUT; diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h index 62900e11153241357f3adc83c86859311af522da..565410d9b4feb24782e405ace9125ff4e0327355 100644 --- a/drivers/i2c/busses/i2c-phytium-core.h +++ b/drivers/i2c/busses/i2c-phytium-core.h @@ -11,7 +11,7 @@ #include -#define I2C_PHYTIUM_DRV_VERSION "1.0.0" +#define I2C_PHYTIUM_DRV_VERSION "1.0.1" #define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ I2C_FUNC_SMBUS_BYTE | \ @@ -141,13 +141,13 @@ #define PHYTIUM_IC_SLAVE 1 #if IS_ENABLED(CONFIG_I2C_SLAVE) -enum i2c_slave_state { - SLAVE_STATE_IDLE, - SLAVE_STATE_RECV, - SLAVE_STATE_SEND, - SLAVE_STATE_REQUEST, - SLAVE_STATE_RESPONSE -}; +/* Slave status */ +#define SLAVE_STATE_IDLE 0x0 +#define SLAVE_WRITE_IN_PROGRESS BIT(0) +#define SLAVE_READ_IN_PROGRESS BIT(1) +#define SLAVE_STATE_ACTIVE BIT(2) + +#define REQ_MIN_LEN 6 #endif #define ABRT_7B_ADDR_NOACK 0 #define ABRT_10ADDR1_NOACK 1 @@ -199,7 +199,8 @@ struct phytium_i2c_dev { u32 capability; #if IS_ENABLED(CONFIG_I2C_SLAVE) - enum i2c_slave_state slave_state; + u32 slave_status; + struct completion slave_complete; #endif spinlock_t i2c_lock; struct i2c_client *slave; diff --git a/drivers/i2c/busses/i2c-phytium-mix.c b/drivers/i2c/busses/i2c-phytium-mix.c index 748042c9feb6b6edb849e0ee212a3385d95d3093..4a157deceaa052e688e6a9688918b76e332e854d 100644 --- a/drivers/i2c/busses/i2c-phytium-mix.c +++ b/drivers/i2c/busses/i2c-phytium-mix.c @@ -24,6 +24,9 @@ #define I2C_QUICK_CMD_BIT_SET BIT(13) #define DEFAULT_TIMEOUT (DEFAULT_CLOCK_FREQUENCY / 1000 * 35) +static int i2c_phytium_init_master(struct phytium_i2c_dev *dev); +static void i2c_phytium_change_mode(int target_mode, struct phytium_i2c_dev *dev); + static int i2c_phytium_recover_controller(struct phytium_i2c_dev *dev) { unsigned long flags; @@ -31,14 +34,20 @@ static int i2c_phytium_recover_controller(struct phytium_i2c_dev *dev) spin_lock_irqsave(&dev->i2c_lock, flags); reset_control_reset(dev->rst); + + if (!dev->slave) { + i2c_phytium_init_master(dev); #if IS_ENABLED(CONFIG_I2C_SLAVE) - dev->slave_state = SLAVE_STATE_IDLE; - dev->status = STATUS_IDLE; - if (dev->slave) + } else { + dev->slave_status = SLAVE_STATE_IDLE; phytium_writel(dev, dev->slave->addr, IC_SAR); + i2c_phytium_change_mode(PHYTIUM_IC_SLAVE, dev); #endif + } + dev->status = STATUS_IDLE; spin_unlock_irqrestore(&dev->i2c_lock, flags); + return 0; } @@ -131,7 +140,6 @@ static int i2c_phytium_init_master(struct phytium_i2c_dev *dev) static void i2c_phytium_change_mode(int target_mode, struct phytium_i2c_dev *dev) { if (target_mode == PHYTIUM_IC_MASTER) { - dev->disable_int(dev); dev->disable(dev); i2c_phytium_init_master(dev); } else { @@ -371,31 +379,41 @@ static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], ret = pm_runtime_get_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "pm runtime get sync err.\n"); - goto pm_exit; + goto done; } spin_lock_irqsave(&dev->i2c_lock, flags); - reinit_completion(&dev->cmd_complete); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (dev->slave && dev->slave_status != SLAVE_STATE_IDLE) { + reinit_completion(&dev->slave_complete); + spin_unlock_irqrestore(&dev->i2c_lock, flags); + + /* Waiting for slave to complete. */ + if (!wait_for_completion_timeout(&dev->slave_complete, + adapter->timeout)) { + dev_err(dev->dev, "Slave is timeout, recover\n"); + i2c_phytium_recover_controller(dev); + ret = -ETIMEDOUT; + goto done; + } + spin_lock_irqsave(&dev->i2c_lock, flags); + } + + if (dev->mode == PHYTIUM_IC_SLAVE) + i2c_phytium_change_mode(PHYTIUM_IC_MASTER, dev); +#endif + + reinit_completion(&dev->cmd_complete); dev->msgs = msgs; dev->msgs_num = num; dev->cmd_err = 0; dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; - dev->status = STATUS_IDLE; dev->abort_source = 0; dev->rx_outstanding = 0; - ret = i2c_phytium_check_bus_not_busy(dev); - if (ret < 0) - goto done; - -#if IS_ENABLED(CONFIG_I2C_SLAVE) - if (dev->slave_cfg) - i2c_phytium_change_mode(PHYTIUM_IC_MASTER, dev); -#endif - /* Start the transfers */ i2c_phytium_xfer_init(dev); spin_unlock_irqrestore(&dev->i2c_lock, flags); @@ -404,15 +422,19 @@ static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], adapter->timeout)) { dev_err(dev->dev, "controller timed out\n"); i2c_phytium_recover_controller(dev); - spin_lock_irqsave(&dev->i2c_lock, flags); - if (!dev->slave_cfg) - i2c_phytium_init_master(dev); ret = -ETIMEDOUT; goto done; } - spin_lock_irqsave(&dev->i2c_lock, flags); - __i2c_phytium_disable_nowait(dev); + if (!dev->slave) + __i2c_phytium_disable_nowait(dev); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* Do nothing. */ + if (dev->slave && dev->msgs->len >= REQ_MIN_LEN) { + reinit_completion(&dev->slave_complete); + wait_for_completion_timeout(&dev->slave_complete, adapter->timeout); + } +#endif if (dev->msg_err) { ret = dev->msg_err; goto done; @@ -425,25 +447,13 @@ static int i2c_phytium_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], /* We have got an error */ if (dev->cmd_err == IC_ERR_TX_ABRT) { - spin_unlock_irqrestore(&dev->i2c_lock, flags); ret = i2c_phytium_handle_tx_abort(dev); - spin_lock_irqsave(&dev->i2c_lock, flags); goto done; } ret = -EIO; done: -#if IS_ENABLED(CONFIG_I2C_SLAVE) - if (dev->slave_cfg) - i2c_phytium_change_mode(PHYTIUM_IC_SLAVE, dev); - dev->status = STATUS_IDLE; - dev->slave_state = SLAVE_STATE_IDLE; -#endif - spin_unlock_irqrestore(&dev->i2c_lock, flags); - -pm_exit: - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -483,7 +493,7 @@ static int i2c_phytium_reg_slave(struct i2c_client *slave) dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; - dev->status = STATUS_IDLE; + dev->slave_status = SLAVE_STATE_IDLE; dev->abort_source = 0; dev->rx_outstanding = 0; @@ -603,8 +613,7 @@ static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) phytium_readl(dev, IC_CLR_TX_ABRT); phytium_writel(dev, 0, IC_INTR_MASK); - complete(&dev->cmd_complete); - return 0; + goto abort; } } @@ -617,6 +626,10 @@ static int i2c_phytium_irq_handler_master(struct phytium_i2c_dev *dev) abort: if ((stat & IC_INTR_TX_ABRT) || (stat & IC_INTR_STOP_DET) || dev->msg_err) { complete(&dev->cmd_complete); +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (dev->slave) + i2c_phytium_change_mode(PHYTIUM_IC_SLAVE, dev); +#endif } else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { /* Workaround to trigger pending interrupt */ stat = phytium_readl(dev, IC_INTR_MASK); @@ -644,11 +657,10 @@ static int i2c_phytium_irq_handler_slave(struct phytium_i2c_dev *dev) stat = i2c_phytium_read_clear_intrbits(dev); if (stat & IC_INTR_RX_FULL) { - if (dev->status != STATUS_WRITE_IN_PROGRESS) { - dev->status = STATUS_WRITE_IN_PROGRESS; + if (dev->slave_status != SLAVE_WRITE_IN_PROGRESS) { + dev->slave_status = SLAVE_WRITE_IN_PROGRESS; i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); - dev->slave_state = SLAVE_STATE_RECV; } do { val = phytium_readl(dev, IC_DATA_CMD); @@ -662,13 +674,11 @@ static int i2c_phytium_irq_handler_slave(struct phytium_i2c_dev *dev) if (slave_activity) { phytium_readl(dev, IC_CLR_RD_REQ); - if (!(dev->status & STATUS_READ_IN_PROGRESS)) { + if (!(dev->slave_status & SLAVE_READ_IN_PROGRESS)) { i2c_slave_event(dev->slave, I2C_SLAVE_READ_REQUESTED, &val); - dev->status |= STATUS_READ_IN_PROGRESS; - dev->status &= ~STATUS_WRITE_IN_PROGRESS; - dev->slave_state = SLAVE_STATE_SEND; - + dev->slave_status |= SLAVE_READ_IN_PROGRESS; + dev->slave_status &= ~SLAVE_WRITE_IN_PROGRESS; } else { i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &val); @@ -680,8 +690,8 @@ static int i2c_phytium_irq_handler_slave(struct phytium_i2c_dev *dev) if (stat & IC_INTR_STOP_DET) { i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); #if IS_ENABLED(CONFIG_I2C_SLAVE) - dev->status = STATUS_IDLE; - dev->slave_state = SLAVE_STATE_IDLE; + dev->slave_status = SLAVE_STATE_IDLE; + complete(&dev->slave_complete); #endif } @@ -697,6 +707,8 @@ static irqreturn_t i2c_phytium_isr(int this_irq, void *dev_id) #if IS_ENABLED(CONFIG_I2C_SLAVE) if (dev->mode == PHYTIUM_IC_SLAVE) { + if (dev->slave_status == SLAVE_STATE_IDLE) + dev->slave_status = SLAVE_STATE_ACTIVE; i2c_phytium_irq_handler_slave(dev); spin_unlock(&dev->i2c_lock); return IRQ_HANDLED; @@ -829,6 +841,10 @@ int i2c_phytium_probe(struct phytium_i2c_dev *dev) "Phytium I2C Slave Adapter"); dev->init = i2c_phytium_init_slave; } +#if IS_ENABLED(CONFIG_I2C_SLAVE) + init_completion(&dev->slave_complete); + dev->slave_status = SLAVE_STATE_IDLE; +#endif dev->disable = i2c_phytium_disable; dev->disable_int = i2c_phytium_disable_int; diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c index 91d8cabcd96b95dc805504e40e42f56d7f6bf504..db1600668e3393c518c7f8ebb8b1cc0fd3f7287c 100644 --- a/drivers/i2c/busses/i2c-phytium-pci.c +++ b/drivers/i2c/busses/i2c-phytium-pci.c @@ -184,7 +184,7 @@ static int i2c_phytium_pci_probe(struct pci_dev *pdev, dev->flags |= controller->flags; #if IS_ENABLED(CONFIG_I2C_SLAVE) - dev->slave_state = SLAVE_STATE_IDLE; + dev->slave_status = SLAVE_STATE_IDLE; #endif spin_lock_init(&dev->i2c_lock); dev->functionality = controller->functionality | IC_DEFAULT_FUNCTIONALITY; diff --git a/drivers/i2c/busses/i2c-phytium-platform.c b/drivers/i2c/busses/i2c-phytium-platform.c index 084135a71895141d07150a3c7c4fefab5025bef2..2dfe919fdda439b035aeda568923acc95f561957 100644 --- a/drivers/i2c/busses/i2c-phytium-platform.c +++ b/drivers/i2c/busses/i2c-phytium-platform.c @@ -153,7 +153,7 @@ static int phytium_i2c_plat_probe(struct platform_device *pdev) dev->irq = irq; dev->first_time_init_master = false; #if IS_ENABLED(CONFIG_I2C_SLAVE) - dev->slave_state = SLAVE_STATE_IDLE; + dev->slave_status = SLAVE_STATE_IDLE; #endif spin_lock_init(&dev->i2c_lock); platform_set_drvdata(pdev, dev); diff --git a/drivers/i2c/busses/phytium_i2c_v2/Kconfig b/drivers/i2c/busses/phytium_i2c_v2/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c08fd8f9f0e335c549b252a9df01a17e64278ccc --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Sensor device configuration +# + +config I2C_V2_PLATFORM + tristate "Phytium I2C_V2 Platform" + depends on (ACPI && COMMON_CLK) || !ACPI + select I2C_SMBUS + + help + If you say yes to this option, support will be included for the + Phytium I2C adapter. Only master mode is supported. + + This driver can also be built as a module.If so,the module + will be called i2c-phytium-v2-platform. diff --git a/drivers/i2c/busses/phytium_i2c_v2/Makefile b/drivers/i2c/busses/phytium_i2c_v2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..74161298d7c83129b67db4ea5a3c3ae211d77855 --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the i2c bus drivers. +# + +obj-$(CONFIG_I2C_V2_PLATFORM) += i2c-phytium-v2-platform.o +i2c-phytium-v2-platform-objs := i2c-phyt-platform.o i2c-phyt-common.o i2c-phyt-master.o i2c-phyt-slave.o diff --git a/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-common.c b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-common.c new file mode 100644 index 0000000000000000000000000000000000000000..1d00ba706f27ea215d0916e5f929d23a1ee58681 --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-common.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2023-2024, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phyt-core.h" + +#define FT_LOG_LINE_MAX_LEN 400 +#define FT_LOG_MAX_SIZE 8192 + +static void i2c_phyt_send_msg(struct i2c_phyt_dev *dev, + struct phyt_msg_info *set_msg, bool complete_flag); + +static char *i2c_ft_abort_sources[] = { + [FT_I2C_TIMEOUT] = "I2C cmd not acknowledged", + [FT_I2C_CNT_ERR] = "I2C timings count error", + [FT_I2C_TX_ABRT] = "Tx abort", + [FT_I2C_INT_ERR] = "Interrupt error", + [FT_I2C_BLOCK_SIZE] = "smbus block error", + [FT_I2C_INVALID_ADDR] = "slave address invalid", + [FT_I2C_CHECK_STATUS_ERR] = "Uncomplete status", +}; + +u32 i2c_phyt_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) +{ + if (cond) + return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + else + return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 - 3 + offset; +} + +u32 i2c_phyt_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) +{ + return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; +} + +unsigned long i2c_phyt_clk_rate(struct i2c_phyt_dev *dev) +{ + if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) + return 0; + + return dev->get_clk_rate_khz(dev); +} + +int i2c_phyt_prepare_clk(struct i2c_phyt_dev *dev, bool prepare) +{ + if (IS_ERR(dev->clk)) + return PTR_ERR(dev->clk); + + if (prepare) + return clk_prepare_enable(dev->clk); + + clk_disable_unprepare(dev->clk); + + return 0; +} + +void i2c_phyt_handle_tx_abort(struct i2c_phyt_dev *dev) +{ + unsigned long abort_source = dev->abort_source; + int i; + + for_each_set_bit(i, &abort_source, ARRAY_SIZE(i2c_ft_abort_sources)) + dev_info(dev->dev, "%s: %s\n", __func__, i2c_ft_abort_sources[i]); +} + +u32 i2c_phyt_func(struct i2c_adapter *adapter) +{ + struct i2c_phyt_dev *dev = i2c_get_adapdata(adapter); + + return dev->functionality; +} + +void i2c_phyt_common_watchdog(struct i2c_phyt_dev *dev) +{ + u32 reg; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, + reg | FT_I2C_REGFILE_HEARTBIT_VAL); +} + +void i2c_phyt_enable_alive(struct i2c_phyt_dev *dev) +{ + u32 reg; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, + reg | FT_I2C_REGFILE_HEARTBIT_VAL); + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, + reg | FT_I2C_LOG_ADDR_LOG_ALIVE); +} + +void i2c_phyt_disable_alive(struct i2c_phyt_dev *dev) +{ + u32 reg; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + reg &= ~FT_I2C_LOG_ADDR_LOG_ALIVE; + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, reg); +} + +void i2c_phyt_enable_debug(struct i2c_phyt_dev *dev) +{ + u32 reg; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, + reg | FT_I2C_LOG_ADDR_LOG_DEBUG); +} + +void i2c_phyt_disable_debug(struct i2c_phyt_dev *dev) +{ + u32 reg; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + reg &= ~FT_I2C_LOG_ADDR_LOG_DEBUG; + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, reg); +} + + +void i2c_phyt_show_log(struct i2c_phyt_dev *dev) +{ + u32 i, reg, len; + u8 *plog; + + if (!dev->log_addr) + return; + + /*set lock*/ + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, + reg | FT_I2C_LOG_ADDR_LOCK_VAL); + + plog = dev->log_addr; + if (reg & FT_I2C_LOG_ADDR_LOG_FLAG) { + len = strnlen((char *)dev->log_addr, FT_LOG_MAX_SIZE); + dev_info(dev->dev, "log len :%d,addr: 0x%llx,size:%d\n", len, (u64)dev->log_addr, + dev->log_size); + if (len > FT_LOG_LINE_MAX_LEN) { + for (i = 0; i + FT_LOG_LINE_MAX_LEN < len; i += FT_LOG_LINE_MAX_LEN) + dev_info(dev->dev, "(log)%.*s\n", FT_LOG_LINE_MAX_LEN, &plog[i]); + } else { + dev_info(dev->dev, "(log)%.*s\n", FT_LOG_LINE_MAX_LEN, &plog[0]); + } + + for (i = 0; i < dev->log_size; i++) + plog[i] = 0; + } + /*clear log flag*/ + reg &= ~FT_I2C_LOG_ADDR_LOG_FLAG; + /*unset lock*/ + reg &= ~FT_I2C_LOG_ADDR_LOCK_VAL; + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_DEBUG, reg); +} + +int i2c_phyt_malloc_log_mem(struct i2c_phyt_dev *dev) +{ + u32 reg; + u64 phy_addr; + + reg = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_DEBUG); + phy_addr = ((reg & FT_I2C_LOG_ADDR_MASK) >> FT_I2C_LOG_ADDR_LOW_SHIFT) << + FT_I2C_LOG_ADDR_SHIFT; + dev->log_size = ((reg & FT_I2C_LOG_SIZE_MASK) >> FT_I2C_LOG_SIZE_LOW_SHIFT) * 1024; + + dev->log_addr = devm_ioremap(dev->dev, phy_addr, dev->log_size); + + if (IS_ERR(dev->log_addr)) { + dev_err(dev->dev, "log_addr is err\n"); + return -ENOMEM; + } + + return 0; +} + +void i2c_phyt_common_regfile_disable_int(struct i2c_phyt_dev *dev) +{ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_MASK, + FT_I2C_REGFILE_DISABLE_INTR_VAL); +} + +void i2c_phyt_common_regfile_enable_int(struct i2c_phyt_dev *dev) +{ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_MASK, + FT_I2C_REGFILE_ENABLE_INTR_VAL); +} + +void i2c_phyt_common_regfile_clear_rv2ap_int(struct i2c_phyt_dev *dev, + u32 stat) +{ + /*For Desktop type,this opt is useful*/ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_STAT, + stat & ~FT_I2C_RV2AP_INTR_BIT4); + /*For Service type,this opt is useful*/ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_CLEAR, + stat | FT_I2C_RV2AP_INTR_BIT4); +} + +void i2c_phyt_common_set_cmd(struct i2c_phyt_dev *dev, + struct phyt_msg_info *i2c_mng_msg, u8 cmd, u8 sub_cmd) +{ + i2c_mng_msg->head.cmd_type = cmd; + i2c_mng_msg->head.cmd_subid = sub_cmd; + i2c_mng_msg->head.status0 = FT_I2C_MSG_COMPLETE_UNKNOW; +} + +void i2c_phyt_set_cmd8(struct i2c_phyt_dev *dev, + u8 sub_cmd, u8 data) +{ + struct phyt_msg_info i2c_mng_msg; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_SET, sub_cmd); + i2c_mng_msg.data[0] = data; + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +void i2c_phyt_set_cmd16(struct i2c_phyt_dev *dev, + u8 sub_cmd, u16 data) +{ + struct phyt_msg_info i2c_mng_msg; + u16 *ctrl = (u16 *)&i2c_mng_msg.data[0]; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_SET, sub_cmd); + *ctrl = data; + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +void i2c_phyt_set_cmd32(struct i2c_phyt_dev *dev, + u8 sub_cmd, u32 data) +{ + struct phyt_msg_info i2c_mng_msg; + u32 *cmd_data = (u32 *)&i2c_mng_msg.data[0]; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_SET, sub_cmd); + *cmd_data = data; + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +void i2c_phyt_data_cmd8_array(struct i2c_phyt_dev *dev, + u8 sub_cmd, u8 *data, int len) +{ + struct phyt_msg_info i2c_mng_msg; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + /*set cmd*/ + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_DATA, sub_cmd); + /*set data*/ + memcpy(&i2c_mng_msg.data[0], &data[0], len); + /*set len*/ + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, false); +} + +void i2c_phyt_default_cfg(struct i2c_phyt_dev *dev, + struct i2c_ft_default_cfg_msg *buf) +{ + struct phyt_msg_info i2c_mng_msg; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_DEFAULT, + PHYTI2C_MSG_CMD_DEFAULT_RESUME); + memcpy(&i2c_mng_msg.data[0], (char *)buf, sizeof(struct i2c_ft_default_cfg_msg)); + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +void i2c_phyt_disable_int(struct i2c_phyt_dev *dev) +{ + i2c_phyt_set_cmd32(dev, PHYTI2C_MSG_CMD_SET_INTERRUPT, 0); +} + +void i2c_phyt_trig_rv_intr(struct i2c_phyt_dev *dev) +{ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_AP2RV_INTR_STATE, + FT_I2C_REGFILE_AP2RV_SET_INTR_VAL); +} + +static void i2c_phyt_send_msg(struct i2c_phyt_dev *dev, + struct phyt_msg_info *set_msg, bool complete_flag) +{ + u64 *p_dest_msg = (u64 *)dev->tx_shmem_addr; + u64 *p_src_msg = (u64 *)set_msg; + int i, len; + + if (complete_flag) { + reinit_completion(&dev->cmd_complete); + dev->complete_flag = true; + dev->mng.is_need_check = false; + } + len = DIV_ROUND_UP(dev->total_shmem_len, 8); + + for (i = 0; i < len; i++) + p_dest_msg[i] = p_src_msg[i]; + /*For config cmd,no use tail and head,set this value to zero*/ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_TAIL, 0); + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_HEAD, 0); + dev->mng.tx_cmd_cnt = 1; + dev->mng.cur_cmd_cnt = 0; + i2c_phyt_trig_rv_intr(dev); +} + +void i2c_phyt_notify_rv(struct i2c_phyt_dev *dev, + bool need_check) +{ + reinit_completion(&dev->cmd_complete); + dev->complete_flag = true; + + dev->mng.is_need_check = need_check; + i2c_phyt_trig_rv_intr(dev); +} + +void i2c_phyt_set_suspend(struct i2c_phyt_dev *dev) +{ + i2c_phyt_set_cmd32(dev, PHYTI2C_MSG_CMD_SET_SUSPEND, 0); + + if (i2c_phyt_check_result(dev)) + dev_warn(dev->dev, "fail to set suspend\n"); +} + +void i2c_phyt_set_sda_hold(struct i2c_phyt_dev *dev, u32 data) +{ + i2c_phyt_set_cmd32(dev, PHYTI2C_MSG_CMD_SET_SDA_HOLD, data); +} + +int i2c_phyt_check_status(struct i2c_phyt_dev *dev, + struct phyt_msg_info *msg) +{ + int result = FT_I2C_CHECK_STATUS_ERR; + + dev->abort_source = 1 << FT_I2C_CHECK_STATUS_ERR; + /* RV Has update the result,but not sure the result is OK*/ + if (msg->head.status0 != FT_I2C_MSG_COMPLETE_UNKNOW) { + if (!dev->mng.is_need_check || + ((msg->head.status0 == FT_I2C_MSG_COMPLETE_OK) && + (msg->head.status1 == FT_I2C_SUCCESS))) { + dev->abort_source = 0; + result = FT_I2C_SUCCESS; + } else { + i2c_phyt_show_log(dev); + dev->abort_source = 1 << msg->head.status1; + } + } + + return result; +} + +int i2c_phyt_check_result(struct i2c_phyt_dev *dev) +{ + if (!wait_for_completion_timeout(&dev->cmd_complete, + dev->adapter.timeout)) { + dev_err(dev->dev, "check timed out\n"); + i2c_phyt_show_log(dev); + dev->abort_source = BIT(FT_I2C_TIMEOUT); + return -EINVAL; + } + if (!dev->abort_source) + return 0; + return -EINVAL; +} + +void i2c_phyt_disable(struct i2c_phyt_dev *dev) +{ + i2c_phyt_set_module_en(dev, FT_I2C_ADAPTER_MODULE_OFF); + + i2c_phyt_check_result(dev); +} + +void i2c_phyt_set_module_en(struct i2c_phyt_dev *dev, u8 data) +{ + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_MODULE_EN, data); +} + +void i2c_phyt_set_int_tl(struct i2c_phyt_dev *dev, + u8 tx_threshold, u8 rx_threshold) +{ + struct phyt_msg_info i2c_mng_msg; + struct i2c_phyt_fifo_threshold *ctrl = + (struct i2c_phyt_fifo_threshold *)&i2c_mng_msg.data[0]; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_SET, + PHYTI2C_MSG_CMD_SET_INT_TL); + + ctrl->tx_fifo_threshold = tx_threshold; + ctrl->rx_fifo_threshold = rx_threshold; + + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +void i2c_phyt_set_int_interrupt(struct i2c_phyt_dev *dev, + u32 is_enable, u32 intr_mask) +{ + struct phyt_msg_info i2c_mng_msg; + u32 *data = (u32 *)i2c_mng_msg.data; + + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_phyt_common_set_cmd(dev, &i2c_mng_msg, PHYTI2C_MSG_CMD_SET, + PHYTI2C_MSG_CMD_SET_INTERRUPT); + + data[0] = is_enable; + data[1] = intr_mask; + dev->total_shmem_len = FT_I2C_MSG_CMDDATA_SIZE; + + i2c_phyt_send_msg(dev, &i2c_mng_msg, true); +} + +static int i2c_phyt_report_cmd_handle(struct i2c_phyt_dev *dev, + struct phyt_msg_info *rx_msg, u32 head, u32 tail) +{ + u16 sub_cmd = rx_msg->head.cmd_subid; + int ret = -EINVAL; + + switch (sub_cmd) { + case PHYTI2C_MSG_CMD_SLAVE_EVENT: + ret = i2c_phyt_slave_event_process(dev, rx_msg, head, tail); + break; + case PHYTI2C_MSG_CMD_SMBALERT_IN_N: + ret = i2c_phyt_master_smbus_alert_process(dev); + break; + default: + break; + } + + return ret; +} + +void i2c_phyt_slave_isr_handle(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info *rx_msg = (struct phyt_msg_info *)dev->rx_shmem_addr; + struct phyt_msg_info msg_buf; + u32 head, tail; + + tail = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_TAIL); + head = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_HEAD); + + /*There is only one rx_msg*/ + memcpy(&msg_buf, &rx_msg[0], sizeof(msg_buf)); + + if (msg_buf.head.cmd_type == PHYTI2C_MSG_CMD_REPORT) + i2c_phyt_report_cmd_handle(dev, &msg_buf, head, tail); +} + +static void i2c_phyt_set_complete(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info *set_msg = (struct phyt_msg_info *)dev->rx_shmem_addr; + struct phyt_msg_info tmp_msg; + + /*There is only one rx_msg*/ + memcpy(&tmp_msg, &set_msg[0], sizeof(struct phyt_msg_info)); + tmp_msg.head.status0 = FT_I2C_MSG_COMPLETE_OK; + tmp_msg.head.cmd_type = 0xff; + tmp_msg.head.cmd_subid = 0xff; + memcpy(&set_msg[0], &tmp_msg, sizeof(struct phyt_msg_info)); +} + +void i2c_phyt_master_isr_handle(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info *rx_msg = (struct phyt_msg_info *)dev->rx_shmem_addr; + struct phyt_msg_info msg_buf; + + memcpy(&msg_buf, &rx_msg[0], sizeof(struct phyt_msg_info)); + i2c_phyt_set_complete(dev); + + if (msg_buf.head.cmd_type == PHYTI2C_MSG_CMD_REPORT) + i2c_phyt_report_cmd_handle(dev, &msg_buf, 0, 0); +} + +MODULE_AUTHOR("Wu Jinyong "); +MODULE_DESCRIPTION("Phytium I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-core.h b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-core.h new file mode 100644 index 0000000000000000000000000000000000000000..8c44e505868e297118f3ea9fff91496df95769a7 --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-core.h @@ -0,0 +1,512 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2023-2024, Phytium Technology Co., Ltd. + */ +#ifndef I2C_PHYT_CORE_H__ +#define I2C_PHYT_CORE_H__ + +#include +#include +#include +#include +#include +#include + +#define I2C_PHYTIUM_V2_DRV_VERSION "1.0.2" + +#define FT_I2C_MSG_UNIT_SIZE 10 +#define FT_I2C_DATA_RESV_LEN 2 +#define FT_I2C_SHMEM_RX_ADDR_OFFSET 384 +#define FT_I2C_MSG_CMDDATA_SIZE 64 +#define FT_I2C_IN_INTERRUPT_MODE 0 +#define FT_I2C_ENABLE_INTERRUPT 1 +#define FT_I2C_REGFILE_AP2RV_INTR_MASK 0x20 +#define FT_I2C_REGFILE_AP2RV_INTR_STATE 0x24 +#define FT_I2C_REGFILE_RV2AP_INTR_MASK 0x28 +#define FT_I2C_REGFILE_RV2AP_INTR_STAT 0x2C +#define FT_I2C_REGFILE_RING 0x48 +#define FT_I2C_REGFILE_DEBUG 0x58 +#define FT_I2C_REGFILE_RV2AP_INTR_CLEAR 0x74 +#define FT_I2C_TRANS_FRAME_START (BIT(0)) +#define FT_I2C_TRANS_FRAME_END (BIT(1)) +#define FT_I2C_TRANS_FRAME_RESTART (BIT(2)) +#define FT_I2C_REGFILE_TX_RING_OFFSET 8 +#define FT_I2C_REGFILE_TX_RING_MASK GENMASK(13, 8) + +#define FT_I2C_REGFILE_HEARTBIT_VAL BIT(2) +#define FT_I2C_LOG_SIZE_LOW_SHIFT 4 +#define FT_I2C_LOG_SIZE_MASK GENMASK(7, FT_I2C_LOG_SIZE_LOW_SHIFT) +#define FT_I2C_LOG_ADDR_SHIFT 10 +#define FT_I2C_LOG_ADDR_LOW_SHIFT 8 +#define FT_I2C_LOG_ADDR_MASK GENMASK(29, FT_I2C_LOG_ADDR_LOW_SHIFT) +#define FT_I2C_LOG_ADDR_LOCK_VAL BIT(31) +#define FT_I2C_LOG_ADDR_LOG_FLAG BIT(3) +#define FT_I2C_LOG_ADDR_LOG_ALIVE BIT(1) +#define FT_I2C_LOG_ADDR_LOG_DEBUG BIT(0) +#define FT_I2C_REGFILE_DISABLE_INTR_VAL GENMASK(31, 0) +#define FT_I2C_REGFILE_ENABLE_INTR_VAL 0 +#define FT_I2C_REGFILE_AP2RV_SET_INTR_VAL BIT(4) + +#define FT_I2C_RV2AP_INTR_BIT4 BIT(4) + +#define FT_I2C_ADAPTER_MODULE_ON 1 +#define FT_I2C_ADAPTER_MODULE_OFF 2 +#define FT_I2C_ADAPTER_MODULE_RESET 3 + +#define FT_I2C_BUS_SPEED_STANARD_MODE 1 +#define FT_I2C_BUS_SPEED_FAST_MODE 2 +#define FT_I2C_BUS_SPEED_HIGH_MODE 3 +#define FT_I2C_BUS_SPEED_TRANS_PARAM_MODE 0 +#define FT_I2C_BUS_SPEED_CALC_MODE 1 + +#define FT_I2C_REGFILE_TX_HEAD 0 +#define FT_I2C_REGFILE_TX_TAIL 0x04 +#define FT_I2C_REGFILE_HEAD 0x08 +#define FT_I2C_REGFILE_TAIL 0x0C + +#define FT_IC_DEFAULT_FUNCTIONALITY \ + (I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |\ + I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |\ + I2C_FUNC_SMBUS_I2C_BLOCK) + +#define FT_I2C_VIRT_RV_SHMEM_TX_MSG_MAX_CNT 1 + +#define FT_I2C_MASTER_MODE_FLAG 1 +#define FT_I2C_SLAVE_MODE_FLAG 0 + +#define FT_I2C_RESTART_FLAG 1 +#define FT_I2C_NOT_RESTART_FLAG 0 + +#define FT_I2C_SLAVE_DATA_IN 0 +#define FT_I2C_SLAVE_DATA_OUT 1 + +#define FT_I2C_SPEED_100K 100000 +#define FT_I2C_SPEED_400K 400000 +#define FT_I2C_SPEED_1000K 1000000 +#define FT_I2C_SPEED_3400K 3400000 + +#define FT_IC_CON_MASTER 0x1 +#define FT_IC_CON_SPEED_STD 0x2 +#define FT_IC_CON_SPEED_FAST 0x4 +#define FT_IC_CON_SPEED_HIGH 0x6 +#define FT_IC_CON_SPEED_MASK 0x6 +#define FT_IC_CON_10BITADDR_SLAVE 0x8 +#define FT_IC_CON_10BITADDR_MASTER 0x10 +#define FT_IC_CON_RESTART_EN 0x20 +#define FT_IC_CON_SLAVE_DISABLE 0x40 +#define FT_IC_CON_STOP_DET_IFADDRESSED 0x80 +#define FT_I2C_CON_STOP_DET_IFADDR_MASK 0x80 +#define FT_IC_CON_TX_EMPTY_CTRL 0x100 +#define FT_IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 +#define FT_I2C_CON_RX_FIFO_FULL_HLD_MASK 0x200 + +#define FT_IC_CON 0x0 +#define FT_IC_TAR 0x4 +#define FT_IC_SAR 0x8 +#define FT_IC_DATA_CMD 0x10 +#define FT_IC_SS_SCL_HCNT 0x14 +#define FT_IC_SS_SCL_LCNT 0x18 +#define FT_IC_FS_SCL_HCNT 0x1c +#define FT_IC_FS_SCL_LCNT 0x20 +#define FT_IC_HS_SCL_HCNT 0x24 +#define FT_IC_HS_SCL_LCNT 0x28 +#define FT_IC_INTR_STAT 0x2c +#define FT_IC_INTR_MASK 0x30 +#define FT_IC_RAW_INTR_STAT 0x34 +#define FT_IC_RX_TL 0x38 +#define FT_IC_TX_TL 0x3c +#define FT_IC_CLR_INTR 0x40 +#define FT_IC_CLR_RX_UNDER 0x44 +#define FT_IC_CLR_RX_OVER 0x48 +#define FT_IC_CLR_TX_OVER 0x4c +#define FT_IC_CLR_RD_REQ 0x50 +#define FT_IC_CLR_TX_ABRT 0x54 +#define FT_IC_CLR_RX_DONE 0x58 +#define FT_IC_CLR_ACTIVITY 0x5c +#define FT_IC_CLR_STOP_DET 0x60 +#define FT_IC_CLR_START_DET 0x64 +#define FT_IC_CLR_GEN_CALL 0x68 +#define FT_IC_ENABLE 0x6c +#define FT_IC_STATUS 0x70 +#define FT_IC_TXFLR 0x74 +#define FT_IC_RXFLR 0x78 +#define FT_IC_SDA_HOLD 0x7c +#define FT_IC_TX_ABRT_SOURCE 0x80 +#define FT_IC_ENABLE_STATUS 0x9c +#define FT_IC_SMBCLK_LOW_MEXT 0xa8 +#define FT_IC_SMBCLK_LOW_TIMEOUT 0xac +#define FT_IC_SMBDAT_STUCK_TIMEOUT 0xb4 +#define FT_IC_CLR_SMBCLK_EXT_LOW_TIMEOUT 0xbc +#define FT_IC_CLR_SMBCLK_TMO_LOW_TIMEOUT 0xc0 +#define FT_IC_CLR_SMBDAT_LOW_TIMEOUT 0xc4 +#define FT_IC_CLR_SMBALERT_IN_N 0xd0 + +#define FT_IC_INTR_RX_UNDER 0x001 +#define FT_IC_INTR_RX_OVER 0x002 +#define FT_IC_INTR_RX_FULL 0x004 +#define FT_IC_INTR_TX_OVER 0x008 +#define FT_IC_INTR_TX_EMPTY 0x010 +#define FT_IC_INTR_RD_REQ 0x020 +#define FT_IC_INTR_TX_ABRT 0x040 +#define FT_IC_INTR_RX_DONE 0x080 +#define FT_IC_INTR_ACTIVITY 0x100 +#define FT_IC_INTR_STOP_DET 0x200 +#define FT_IC_INTR_START_DET 0x400 +#define FT_IC_INTR_GEN_CALL 0x800 +#define FT_IC_INTR_SMBCLK_EXT_LOW_TIMEOUT 0x1000 +#define FT_IC_INTR_SMBCLK_TMO_LOW_TIMEOUT 0x2000 +#define FT_IC_INTR_SMBSDA_LOW_TIMEOUT 0x4000 +#define FT_IC_INTR_SMBALERT_IN_N 0x20000 + +#define FT_IC_INTR_DEFAULT_MASK \ + (FT_IC_INTR_RX_FULL | FT_IC_INTR_TX_ABRT | FT_IC_INTR_STOP_DET) +#define FT_IC_INTR_MASTER_MASK (FT_IC_INTR_DEFAULT_MASK | FT_IC_INTR_TX_EMPTY) +#define FT_IC_INTR_SLAVE_MASK \ + (FT_IC_INTR_DEFAULT_MASK | FT_IC_INTR_RX_DONE | FT_IC_INTR_RX_UNDER | FT_IC_INTR_RD_REQ) +#define FT_IC_INTR_SMBUS_MASK \ + (FT_IC_INTR_MASTER_MASK | FT_IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ + FT_IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | FT_IC_INTR_SMBSDA_LOW_TIMEOUT) + +#define FT_IC_INTR_SMBUS_TIME_MASK \ + (FT_IC_INTR_SMBCLK_EXT_LOW_TIMEOUT | \ + FT_IC_INTR_SMBCLK_TMO_LOW_TIMEOUT | FT_IC_INTR_SMBSDA_LOW_TIMEOUT) + +#define FT_IC_STATUS_ACTIVITY 0x1 +#define FT_IC_STATUS_TFE BIT(2) +#define FT_IC_STATUS_MASTER_ACTIVITY BIT(5) +#define FT_IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define FT_IC_SDA_HOLD_RX_SHIFT 16 +#define FT_IC_SDA_HOLD_RX_MASK GENMASK(23, IC_SDA_HOLD_RX_SHIFT) + +#define FT_IC_ERR_TX_ABRT 3 + +#define FT_IC_TAR_10BITADDR_MASTER BIT(12) + +#define FT_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) +#define FT_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) + +#define FT_STATUS_IDLE 0x0 +#define FT_STATUS_WRITE_IN_PROGRESS 0x1 +#define FT_STATUS_READ_IN_PROGRESS 0x2 + +/* + * operation modes + */ +#define phyt_IC_MASTER 0 +#define phyt_IC_SLAVE 1 + +#define FT_ABRT_7B_ADDR_NOACK 0 +#define FT_ABRT_10ADDR1_NOACK 1 +#define FT_ABRT_10ADDR2_NOACK 2 +#define FT_ABRT_TXDATA_NOACK 3 +#define FT_ABRT_GCALL_NOACK 4 +#define FT_ABRT_GCALL_READ 5 +#define FT_ABRT_SBYTE_ACKDET 7 +#define FT_ABRT_SBYTE_NORSTRT 9 +#define FT_ABRT_10B_RD_NORSTRT 10 +#define FT_ABRT_MASTER_DIS 11 +#define FT_ARB_LOST 12 +#define FT_ABRT_SLAVE_FLUSH_TXFIFO 13 +#define FT_ABRT_SLAVE_ARBLOST 14 +#define FT_ABRT_SLAVE_RD_INTX 15 + +#define FT_IC_TX_ABRT_7B_ADDR_NOACK (1UL << FT_ABRT_7B_ADDR_NOACK) +#define FT_IC_TX_ABRT_10ADDR1_NOACK (1UL << FT_ABRT_10ADDR1_NOACK) +#define FT_IC_TX_ABRT_10ADDR2_NOACK (1UL << FT_ABRT_10ADDR2_NOACK) +#define FT_IC_TX_ABRT_TXDATA_NOACK (1UL << FT_ABRT_TXDATA_NOACK) +#define FT_IC_TX_ABRT_GCALL_NOACK (1UL << FT_ABRT_GCALL_NOACK) +#define FT_IC_TX_ABRT_GCALL_READ (1UL << FT_ABRT_GCALL_READ) +#define FT_IC_TX_ABRT_SBYTE_ACKDET (1UL << FT_ABRT_SBYTE_ACKDET) +#define FT_IC_TX_ABRT_SBYTE_NORSTRT (1UL << FT_ABRT_SBYTE_NORSTRT) +#define FT_IC_TX_ABRT_10B_RD_NORSTRT (1UL << FT_ABRT_10B_RD_NORSTRT) +#define FT_IC_TX_ABRT_MASTER_DIS (1UL << FT_ABRT_MASTER_DIS) +#define FT_IC_TX_ARB_LOST (1UL << FT_ARB_LOST) +#define FT_IC_RX_ABRT_SLAVE_RD_INTX (1UL << FT_ABRT_SLAVE_RD_INTX) +#define FT_IC_RX_ABRT_SLAVE_ARBLOST (1UL << FT_ABRT_SLAVE_ARBLOST) +#define FT_IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << FT_ABRT_SLAVE_FLUSH_TXFIFO) + +#define FT_IC_TX_ABRT_NOACK \ + (FT_IC_TX_ABRT_7B_ADDR_NOACK | FT_IC_TX_ABRT_10ADDR1_NOACK |\ + FT_IC_TX_ABRT_10ADDR2_NOACK | FT_IC_TX_ABRT_TXDATA_NOACK |\ + FT_IC_TX_ABRT_GCALL_NOACK) +#define FT_CONTROLLER_TYPE_IIC 0 +#define FT_CONTROLLER_TYPE_SMBUS 1 + +#define FT_I2C_MSG_COMPLETE_OK 1 +#define FT_I2C_MSG_COMPLETE_UNKNOW 0 + +#define FT_I2C_CON_MASTER_MODE_MASK 0x01 +#define FT_I2C_CON_SLAVE_MODE_MASK (1 << 6) +#define FT_I2C_CON_RESTART_MASK 0x20 + +#define FT_I2C_CON_ADDR_MODE_MASK (1 << 3) +#define FT_I2C_ADDR_7BIT_MODE (0) + +#define FT_I2C_TRANS_TIMEOUT 0xF0 + +#define FT_ACCESS_INTR_MASK 0x00000004 +#define FT_DEFAULT_CLOCK_FREQUENCY 100000000 + +#define FT_I2C_MSG_DATA_LEN 56 +#define FT_I2C_SINGLE_BUF_LEN 51 +#define FT_I2C_SINGLE_FRAME_CNT 32 + +#define RV_ANSWER_DATA_SIZE 122 +#define RV_ANSWER_DATA_CONTINUE 1 +#define RV_ANSWER_DATA_END 0 + +enum phyti2c_status_code { + FT_I2C_SUCCESS = 0, + FT_I2C_TIMEOUT, + FT_I2C_CNT_ERR, + FT_I2C_TX_ABRT, + FT_I2C_INT_ERR, + FT_I2C_BLOCK_SIZE, + FT_I2C_INVALID_ADDR, + FT_I2C_TRANS_PACKET_FAIL, + /*The RV result must put above*/ + FT_I2C_RUNNING, + FT_I2C_CHECK_STATUS_ERR +}; + +struct i2c_ft_rv_event_mng { + u8 event_cnt; +}; + +enum phyti2c_msg_cmd_id { + PHYTI2C_MSG_CMD_DEFAULT = 0, + PHYTI2C_MSG_CMD_SET, + PHYTI2C_MSG_CMD_GET, + PHYTI2C_MSG_CMD_DATA, + PHYTI2C_MSG_CMD_REPORT, + PHYTI2C_SYS_PROTOCOL, +}; + +enum phyti2c_msg_default_subid { + PHYTI2C_MSG_CMD_DEFAULT_RESV = 0, + PHYTI2C_MSG_CMD_DEFAULT_RESUME, + PHYTI2C_MSG_CMD_DEFAULT_SLV +}; + +struct i2c_ft_default_cfg_msg { + u32 ss_hcnt; + u32 ss_lcnt; + u32 fs_hcnt; + u32 fs_lcnt; + u32 hs_hcnt; + u32 hs_lcnt; + u32 sda_hold; + u32 tx_fifo_thr; + u32 rx_fifo_thr; + u32 smbclk_mext; + u32 smbclk_timeout; + u32 smbdat_timeout; + u32 cfg; + u32 intr_mask; +}; + +enum phyti2c_set_subid { + PHYTI2C_MSG_CMD_SET_MODULE_EN = 0, + PHYTI2C_MSG_CMD_SET_MODE, + PHYTI2C_MSG_CMD_SET_RESTART, + PHYTI2C_MSG_CMD_SET_ADDR_MODE, + PHYTI2C_MSG_CMD_SET_SPEED, + PHYTI2C_MSG_CMD_SET_INT_TL, + PHYTI2C_MSG_CMD_SET_SDA_HOLD, + PHYTI2C_MSG_CMD_SET_INTERRUPT, + PHYTI2C_MSG_CMD_SET_RX_FIFO_FULL, + PHYTI2C_MSG_CMD_SET_STOP_DET_IF_ADDRESSED, + PHYTI2C_MSG_CMD_SET_WRITE_PROTECT, + PHYTI2C_MSG_CMD_SET_SMBCLK_LOW_MEXT, + PHYTI2C_MSG_CMD_SET_SMBCLK_LOW_TIMEOUT, + PHYTI2C_MSG_CMD_SET_SMBDAT_STUCK_TIMEOUT, + PHYTI2C_MSG_CMD_SET_ADDR, + PHYTI2C_MSG_CMD_SET_SUSPEND +}; + +enum phyti2c_data_subid { + PHYTI2C_MSG_CMD_DATA_XFER = 0, + PHYTI2C_MSG_CMD_DATA_SLAVE +}; + +enum phyti2c_report_subid { + PHYTI2C_MSG_CMD_SMBCLK_EXT_LOW_TIMEOUT = 0, + PHYTI2C_MSG_CMD_SMBCLK_TMO_LOW_TIMEOUT, + PHYTI2C_MSG_CMD_SMBSDA_LOW_TIMEOUT, + PHYTI2C_MSG_CMD_SMBALERT_IN_N, + PHYTI2C_MSG_CMD_SLAVE_EVENT, +}; + +struct i2c_phyt_tranfer { + u32 tx_cmd_cnt; + u32 cur_cmd_cnt; + u32 cur_index; + u32 opt_finish_len; + u32 tx_ring_cnt; + bool is_need_check; + bool is_last_frame; +}; + +struct i2c_phyt_dev { + struct device *dev; + void __iomem *base; + void __iomem *log_addr; + void *tx_shmem_addr; + void *rx_shmem_addr; + u8 *tx_buf; + u8 *rx_buf; + struct clk *clk; + struct reset_control *rst; + struct i2c_client *slave; + struct i2c_client *ara; + struct i2c_msg *msgs; + struct timer_list timer; + struct phytium_pci_i2c *controller; + + int module; + int irq; + u32 intr_mask; + u32 flags; + u32 total_shmem_len; + u32 total_cnt; + int mode; + struct i2c_phyt_tranfer mng; + struct completion cmd_complete; + struct i2c_adapter adapter; + struct i2c_smbus_alert_setup alert_data; + + bool complete_flag; + u32 abort_source; + + int msgs_num; + int msg_err; + u32 tx_buf_len; + u32 rx_buf_len; + + u32 master_cfg; + u32 slave_cfg; + u32 functionality; + + u8 real_index[FT_I2C_SINGLE_FRAME_CNT]; + struct i2c_timings timings; + u32 sda_hold_time; + u16 ss_hcnt; + u16 ss_lcnt; + u16 fs_hcnt; + u16 fs_lcnt; + u16 fp_hcnt; + u16 fp_lcnt; + u16 hs_hcnt; + u16 hs_lcnt; + + bool alive_enabled; + bool debug_enabled; + u32 log_size; + bool pm_disabled; + spinlock_t shmem_spin; + + u32 (*get_clk_rate_khz)(struct i2c_phyt_dev *dev); + void (*disable)(struct i2c_phyt_dev *dev); + void (*disable_int)(struct i2c_phyt_dev *dev); + void (*watchdog)(struct i2c_phyt_dev *dev); + int (*init)(struct i2c_phyt_dev *dev); +}; + +struct phyt_msg_head { + u8 resv; + u8 seq; + u8 cmd_type; + u8 cmd_subid; + u16 len; + u8 status1; + u8 status0; +}; + +struct phyt_msg_info { + struct phyt_msg_head head; + u8 data[FT_I2C_MSG_DATA_LEN]; +}; + +struct i2c_ft_trans_msg_info { + u16 addr; + u16 flags; + u8 type; +} __packed; + +struct i2c_phyt_bus_speed_info { + u8 speed_mode; + u8 calc_en; + u16 scl_hcnt; + u16 scl_lcnt; + u32 sda_hold; +}; + +struct i2c_phyt_fifo_threshold { + u8 rx_fifo_threshold; + u8 tx_fifo_threshold; +}; + +struct i2c_phyt_sda_hold_info { + u32 sda_hold; +}; + +static inline void i2c_phyt_write_reg(struct i2c_phyt_dev *dev, + u32 reg_offset, u32 reg_value) +{ + writel_relaxed(reg_value, dev->base + reg_offset); +} + +static inline u32 i2c_phyt_read_reg(struct i2c_phyt_dev *dev, u32 reg_offset) +{ + return readl_relaxed(dev->base + reg_offset); +} + +unsigned long i2c_phyt_clk_rate(struct i2c_phyt_dev *dev); +int i2c_phyt_prepare_clk(struct i2c_phyt_dev *dev, bool prepare); +u32 i2c_phyt_func(struct i2c_adapter *adap); +u32 i2c_phyt_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); +u32 i2c_phyt_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); +void i2c_phyt_default_cfg(struct i2c_phyt_dev *dev, + struct i2c_ft_default_cfg_msg *buf); +void i2c_phyt_set_suspend(struct i2c_phyt_dev *dev); +void i2c_phyt_enable_debug(struct i2c_phyt_dev *dev); +void i2c_phyt_disable_debug(struct i2c_phyt_dev *dev); +void i2c_phyt_show_log(struct i2c_phyt_dev *dev); +void i2c_phyt_enable_alive(struct i2c_phyt_dev *dev); +void i2c_phyt_disable_alive(struct i2c_phyt_dev *dev); +void i2c_phyt_common_watchdog(struct i2c_phyt_dev *dev); +int i2c_phyt_malloc_log_mem(struct i2c_phyt_dev *dev); +int i2c_phyt_rv_data_cmd_handle(struct phyt_msg_info *rx_msg); +int i2c_phyt_master_probe(struct i2c_phyt_dev *dev); +void i2c_phyt_handle_tx_abort(struct i2c_phyt_dev *dev); +int i2c_phyt_check_result(struct i2c_phyt_dev *dev); +void i2c_phyt_set_cmd8(struct i2c_phyt_dev *dev, u8 sub_cmd, u8 data); +void i2c_phyt_set_cmd16(struct i2c_phyt_dev *dev, u8 sub_cmd, u16 data); +void i2c_phyt_set_cmd32(struct i2c_phyt_dev *dev, u8 sub_cmd, u32 data); +void i2c_phyt_common_set_cmd(struct i2c_phyt_dev *dev, + struct phyt_msg_info *i2c_mng_msg, u8 cmd, u8 sub_cmd); +void i2c_phyt_disable_int(struct i2c_phyt_dev *dev); +void i2c_phyt_set_int_tl(struct i2c_phyt_dev *dev, u8 tx_threshold, + u8 rx_threshold); +int i2c_phyt_slave_probe(struct i2c_phyt_dev *dev); +void i2c_phyt_set_module_en(struct i2c_phyt_dev *dev, u8 data); +void i2c_phyt_set_sda_hold(struct i2c_phyt_dev *dev, u32 data); +void i2c_phyt_disable(struct i2c_phyt_dev *dev); +int i2c_phyt_slave_event_process(struct i2c_phyt_dev *dev, + struct phyt_msg_info *rx_msg, u32 head, u32 tail); +void i2c_phyt_data_cmd8_array(struct i2c_phyt_dev *dev, u8 sub_cmd, u8 *data, + int len); +void i2c_phyt_slave_isr_handle(struct i2c_phyt_dev *dev); +void i2c_phyt_master_isr_handle(struct i2c_phyt_dev *dev); +int i2c_phyt_master_smbus_alert_process(struct i2c_phyt_dev *dev); +void i2c_phyt_common_regfile_enable_int(struct i2c_phyt_dev *dev); +void i2c_phyt_common_regfile_disable_int(struct i2c_phyt_dev *dev); +void i2c_phyt_common_regfile_clear_rv2ap_int(struct i2c_phyt_dev *dev, u32 stat); +void i2c_phyt_notify_rv(struct i2c_phyt_dev *dev, bool need_check); +int i2c_phyt_check_status(struct i2c_phyt_dev *dev, struct phyt_msg_info *msg); +void i2c_phyt_set_int_interrupt(struct i2c_phyt_dev *dev, + u32 is_enable, u32 intr_mask); +#endif diff --git a/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-master.c b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-master.c new file mode 100644 index 0000000000000000000000000000000000000000..8b205ae7458c5e15506671e1ee2c0112e6937646 --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-master.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2023-2024, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "i2c-phyt-core.h" + +#define FT_DEFAULT_TIMEOUT (FT_DEFAULT_CLOCK_FREQUENCY / 1000 * 35) + +#define FT_I2C_MSG_MNG_SIZE 8 +#define FT_I2C_SHMEM_STORE_OFFSET (FT_I2C_MSG_MNG_SIZE + sizeof(struct i2c_ft_trans_msg_info)) + +static int i2c_phyt_master_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num); +static void i2c_phyt_master_parse_data(struct i2c_phyt_dev *dev, + struct phyt_msg_info *shmem_msg, + int index); +static int i2c_phyt_master_default_init(struct i2c_phyt_dev *dev); +static int i2c_phyt_master_set_timings_master(struct i2c_phyt_dev *dev); + +static const struct i2c_algorithm i2c_phyt_master_algo = { + .master_xfer = i2c_phyt_master_xfer, + .functionality = i2c_phyt_func, +}; + + +static int i2c_phyt_master_set_timings_master(struct i2c_phyt_dev *dev) +{ + const char *mode_str, *fp_str = ""; + u32 sda_falling_time, scl_falling_time; + struct i2c_timings *t = &dev->timings; + u32 ic_clk; + int ret = 0; + + /* Set standard and fast speed dividers for high/low periods */ + sda_falling_time = t->sda_fall_ns ?: 300; /* ns */ + scl_falling_time = t->scl_fall_ns ?: 300; /* ns */ + + /* Calculate SCL timing parameters for standard mode if not set */ + if (!dev->ss_hcnt || !dev->ss_lcnt) { + ic_clk = i2c_phyt_clk_rate(dev); + dev->ss_hcnt = + i2c_phyt_scl_hcnt(ic_clk, 4000,/* tHD;STA = tHIGH = 4.0 us */ + sda_falling_time, 0,/* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->ss_lcnt = i2c_phyt_scl_lcnt(ic_clk, 4700, /* tLOW = 4.7 us */ + scl_falling_time, 0); /* No offset */ + } + dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n", dev->ss_hcnt, dev->ss_lcnt); + /* + * Set SCL timing parameters for fast mode or fast mode plus. Only + * difference is the timing parameter values since the registers are + * the same. + */ + if (t->bus_freq_hz == 1000000) { + /* + * Check are fast mode plus parameters available and use + * fast mode if not. + */ + if (dev->fp_hcnt && dev->fp_lcnt) { + dev->fs_hcnt = dev->fp_hcnt; + dev->fs_lcnt = dev->fp_lcnt; + fp_str = " Plus"; + } + } + /* + * Calculate SCL timing parameters for fast mode if not set. They are + * needed also in high speed mode. + */ + if (!dev->fs_hcnt || !dev->fs_lcnt) { + ic_clk = i2c_phyt_clk_rate(dev); + dev->fs_hcnt = + i2c_phyt_scl_hcnt(ic_clk, 600, /* tHD;STA = tHIGH = 0.6 us */ + sda_falling_time, 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + dev->fs_lcnt = i2c_phyt_scl_lcnt(ic_clk, 1300, /* tLOW = 1.3 us */ + scl_falling_time, 0); /* No offset */ + } + dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", fp_str, dev->fs_hcnt, dev->fs_lcnt); + + if (dev->hs_hcnt && dev->hs_lcnt) + dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", dev->hs_hcnt, + dev->hs_lcnt); + + switch (dev->master_cfg & FT_IC_CON_SPEED_MASK) { + case FT_IC_CON_SPEED_STD: + mode_str = "Standard Mode"; + break; + case FT_IC_CON_SPEED_HIGH: + mode_str = "High Speed Mode"; + break; + default: + mode_str = "Fast Mode"; + } + dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str); + + return ret; +} + +int i2c_phyt_master_smbus_alert_process(struct i2c_phyt_dev *dev) +{ + if (dev->ara) + i2c_handle_smbus_alert(dev->ara); + else + dev_dbg(dev->dev, "alert do nothing\n"); + + return 0; +} + +static int i2c_phyt_master_update_new_msg(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info shmem_msg; + struct phyt_msg_info *tx_shmem = (struct phyt_msg_info *)dev->tx_shmem_addr; + struct i2c_ft_trans_msg_info *i2c_msg_info; + struct i2c_msg *msgs = dev->msgs; + int remain_len; + + /*Check the current index and the len are valid */ + if ((dev->mng.cur_index >= dev->msgs_num) || + (msgs[dev->mng.cur_index].len < dev->mng.opt_finish_len)) + return -EINVAL; + + memset(&shmem_msg, 0, sizeof(struct phyt_msg_info)); + i2c_msg_info = (struct i2c_ft_trans_msg_info *)&shmem_msg.data[0]; + /*Get the remain len*/ + remain_len = msgs[dev->mng.cur_index].len - dev->mng.opt_finish_len; + if (!remain_len) { + /*Use the next index*/ + if (dev->mng.cur_index + 1 >= dev->msgs_num) + return FT_I2C_RUNNING; + + dev->mng.cur_index++; + + if ((msgs[dev->mng.cur_index - 1].flags & I2C_M_RD) != + (msgs[dev->mng.cur_index].flags & I2C_M_RD)) + i2c_msg_info->type = FT_I2C_TRANS_FRAME_RESTART; + + dev->mng.opt_finish_len = 0; + remain_len = msgs[dev->mng.cur_index].len; + } + + /*Set the Head.len and Head.seq*/ + shmem_msg.head.len = remain_len; + if (remain_len > FT_I2C_SINGLE_BUF_LEN) { + shmem_msg.head.len = FT_I2C_SINGLE_BUF_LEN; + } else { + if (dev->mng.cur_index + 1 >= dev->msgs_num) { + i2c_msg_info->type |= FT_I2C_TRANS_FRAME_END; + dev->mng.is_last_frame = true; + } + } + /*Set other info of the Head*/ + shmem_msg.head.seq = dev->total_cnt--; + shmem_msg.head.cmd_type = PHYTI2C_MSG_CMD_DATA; + shmem_msg.head.cmd_subid = PHYTI2C_MSG_CMD_DATA_XFER; + shmem_msg.head.status0 = FT_I2C_MSG_COMPLETE_UNKNOW; + + /*store addr flags of struct i2c_msg*/ + i2c_msg_info->addr = msgs[dev->mng.cur_index].addr; + i2c_msg_info->flags = msgs[dev->mng.cur_index].flags; + + dev->total_shmem_len = FT_I2C_SHMEM_STORE_OFFSET; + /*store data*/ + if (!(msgs[dev->mng.cur_index].flags & I2C_M_RD)) { + memcpy(&shmem_msg.data[sizeof(struct i2c_ft_trans_msg_info)], + &msgs[dev->mng.cur_index].buf[dev->mng.opt_finish_len], + shmem_msg.head.len); + dev->total_shmem_len += shmem_msg.head.len; + } + + dev->mng.opt_finish_len += shmem_msg.head.len; + /*update new data to shmem*/ + memcpy(&tx_shmem[dev->mng.tx_cmd_cnt % dev->mng.tx_ring_cnt], + &shmem_msg, dev->total_shmem_len); + + dev->real_index[dev->mng.tx_cmd_cnt % dev->mng.tx_ring_cnt] = + dev->mng.cur_index; + /*Update the Tx_Tail*/ + dev->mng.tx_cmd_cnt++; + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_TAIL, + dev->mng.tx_cmd_cnt % dev->mng.tx_ring_cnt); + return FT_I2C_RUNNING; +} + +static int i2c_phyt_master_handle(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info *tx_shmem = (struct phyt_msg_info *)dev->tx_shmem_addr; + struct phyt_msg_info shmem_msg; + u32 tx_head; + int ret; + + if (dev->mng.cur_cmd_cnt >= dev->mng.tx_cmd_cnt) + return 0; + + /*Get the index of general msg*/ + tx_head = dev->mng.cur_cmd_cnt % dev->mng.tx_ring_cnt; + dev->mng.cur_cmd_cnt++; + + /*Read the general_msg*/ + memcpy(&shmem_msg, &tx_shmem[tx_head], sizeof(struct phyt_msg_info)); + ret = i2c_phyt_check_status(dev, &shmem_msg); + + /*Error*/ + if (ret) + return -EINVAL; + + /*no check cmd*/ + if (!dev->mng.is_need_check) + return 0; + + i2c_phyt_master_parse_data(dev, &shmem_msg, tx_head); + /*No need to update new data*/ + if (dev->mng.is_last_frame) { + if (dev->mng.cur_cmd_cnt == dev->mng.tx_cmd_cnt) + return 0;/*This is last frame */ + return FT_I2C_RUNNING;/*There are some msgs needed to process*/ + } + return i2c_phyt_master_update_new_msg(dev); +} + +static int i2c_phyt_master_calc_total_frame_cnt(struct i2c_phyt_dev *dev) +{ + int i, frame_cnt = 0, len = 0, remain_len; + struct i2c_msg *msgs = dev->msgs; + + for (i = 0; i < dev->msgs_num; i++) { + len = 0; + remain_len = msgs[i].len; + while (len < remain_len) { + if (remain_len > FT_I2C_SINGLE_BUF_LEN) + len += FT_I2C_SINGLE_BUF_LEN; + else + len = remain_len; + + frame_cnt++; + } + } + return frame_cnt; +} + +static void i2c_phyt_master_xfer_single_frame(struct i2c_phyt_dev *dev) +{ + struct phyt_msg_info i2c_mng_msg; + struct i2c_ft_trans_msg_info *i2c_msg_info; + struct phyt_msg_info *shmem_msg = + (struct phyt_msg_info *)dev->tx_shmem_addr; + struct i2c_msg *msgs = dev->msgs; + int i, len = 0, remain_len; + + dev->total_cnt = i2c_phyt_master_calc_total_frame_cnt(dev); + if (dev->total_cnt) + dev->total_cnt--; + + /*orign info*/ + for (i = 0; i < dev->msgs_num; i++) { + remain_len = msgs[i].len; + dev->mng.opt_finish_len = 0; + + while (dev->mng.tx_cmd_cnt < dev->mng.tx_ring_cnt - 1) { + dev->total_shmem_len = 0; + memset(&i2c_mng_msg, 0, sizeof(i2c_mng_msg)); + + i2c_mng_msg.head.seq = dev->total_cnt--; + /*Set the Head.len*/ + len = remain_len; + if (remain_len >= FT_I2C_SINGLE_BUF_LEN) + len = FT_I2C_SINGLE_BUF_LEN; + i2c_mng_msg.head.len = len; + + /*Set other info of the Head*/ + i2c_mng_msg.head.cmd_type = PHYTI2C_MSG_CMD_DATA; + i2c_mng_msg.head.cmd_subid = PHYTI2C_MSG_CMD_DATA_XFER; + i2c_mng_msg.head.status0 = FT_I2C_MSG_COMPLETE_UNKNOW; + + /*Store addr flags of struct i2c_msg*/ + i2c_msg_info = + (struct i2c_ft_trans_msg_info *)&i2c_mng_msg.data[0]; + i2c_msg_info->addr = (u16)msgs[i].addr; + i2c_msg_info->flags = (u16)msgs[i].flags; + + /*Set the Head.seq (Start)*/ + if (!dev->mng.tx_cmd_cnt) + i2c_msg_info->type = FT_I2C_TRANS_FRAME_START; + + /*Set the Head.seq (Restart)*/ + if ((i) && ((msgs[i - 1].flags & I2C_M_RD) != + (msgs[i].flags & I2C_M_RD))) { + if (remain_len == msgs[i].len) + i2c_msg_info->type = FT_I2C_TRANS_FRAME_RESTART; + } + /*Set the Head.seq (End)*/ + if (((i + 1) >= dev->msgs_num) && (len == remain_len)) { + dev->mng.is_last_frame = true; + i2c_msg_info->type |= FT_I2C_TRANS_FRAME_END; + } + + + dev->total_shmem_len = FT_I2C_SHMEM_STORE_OFFSET; + /*store data*/ + if (!(msgs[i].flags & I2C_M_RD)) { + memcpy(&i2c_mng_msg.data[sizeof( + struct i2c_ft_trans_msg_info)], + &msgs[i].buf[dev->mng.opt_finish_len], len); + dev->total_shmem_len += len; + } + /*Update to share memory*/ + memcpy(&shmem_msg[dev->mng.tx_cmd_cnt], &i2c_mng_msg, + dev->total_shmem_len); + + /*Store the index of the i2c_msgs\the current index and the finished len*/ + dev->real_index[dev->mng.tx_cmd_cnt] = i; + dev->mng.cur_index = i; + dev->mng.opt_finish_len += len; + /*Record the count of AP2RV's msg*/ + dev->mng.tx_cmd_cnt++; + + remain_len -= len; + if (!remain_len) + break; + } + } + /*Update the Tx_Tail*/ + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_TAIL, dev->mng.tx_cmd_cnt); + i2c_phyt_notify_rv(dev, true); +} + +static int i2c_phyt_master_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num) +{ + struct i2c_phyt_dev *dev = i2c_get_adapdata(adapter); + int ret; + + pm_runtime_get_sync(dev->dev); + + dev->msgs = msgs; + dev->msgs_num = num; + dev->msg_err = 0; + dev->abort_source = 0; + dev->rx_buf_len = 0; + dev->mng.tx_cmd_cnt = 0; + dev->mng.cur_cmd_cnt = 0; + dev->mng.is_last_frame = false; + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_HEAD, 0); + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_TAIL, 0); + + i2c_phyt_master_xfer_single_frame(dev); + + ret = i2c_phyt_check_result(dev); + if (!ret) { + ret = num; + } else { + if (dev->abort_source != FT_I2C_SUCCESS) { + i2c_phyt_handle_tx_abort(dev); + if ((dev->abort_source & BIT(FT_I2C_TIMEOUT)) || + (dev->abort_source & BIT(FT_I2C_TRANS_PACKET_FAIL))) { + i2c_phyt_set_module_en( + dev, FT_I2C_ADAPTER_MODULE_RESET); + i2c_phyt_check_result(dev); + i2c_phyt_master_default_init(dev); + ret = -ETIMEDOUT; + goto done; + } + ret = -EINVAL; + } + } + +done: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return ret; +} + +static void +i2c_phyt_master_parse_data(struct i2c_phyt_dev *dev, + struct phyt_msg_info *shmem_msg, + int index) +{ + int real_index; + struct i2c_ft_trans_msg_info *i2c_msg_info; + struct i2c_msg *msgs = dev->msgs; + + i2c_msg_info = (struct i2c_ft_trans_msg_info *)&shmem_msg->data[0]; + /*Find the real index of i2c_msgs*/ + real_index = dev->real_index[index]; + if (real_index >= dev->msgs_num) + return; + + if (i2c_msg_info->flags & I2C_M_RD) { + memcpy(&msgs[real_index].buf[dev->rx_buf_len], + &shmem_msg->data[sizeof(struct i2c_ft_trans_msg_info)], + shmem_msg->head.len); + dev->rx_buf_len += shmem_msg->head.len; + } +} + +static int i2c_phyt_master_default_init(struct i2c_phyt_dev *dev) +{ + int ret; + struct i2c_ft_default_cfg_msg cfg_info; + + cfg_info.ss_hcnt = dev->ss_hcnt; + cfg_info.ss_lcnt = dev->ss_lcnt; + cfg_info.fs_hcnt = dev->fs_hcnt; + cfg_info.fs_lcnt = dev->fs_lcnt; + cfg_info.hs_hcnt = dev->hs_hcnt; + cfg_info.hs_lcnt = dev->hs_lcnt; + cfg_info.sda_hold = dev->sda_hold_time; + cfg_info.tx_fifo_thr = 3; + cfg_info.rx_fifo_thr = 0; + cfg_info.smbclk_mext = FT_DEFAULT_TIMEOUT; + cfg_info.smbclk_timeout = FT_DEFAULT_TIMEOUT; + cfg_info.smbdat_timeout = FT_DEFAULT_TIMEOUT; + cfg_info.cfg = dev->master_cfg; + cfg_info.intr_mask = dev->intr_mask; + i2c_phyt_default_cfg(dev, &cfg_info); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set module open: %d\n", ret); + return ret; + } + + return 0; +} + +static irqreturn_t i2c_phyt_master_regfile_isr(int this_irq, void *dev_id) +{ + struct i2c_phyt_dev *dev = (struct i2c_phyt_dev *)dev_id; + struct phyt_msg_info *rx_msg = (struct phyt_msg_info *)dev->rx_shmem_addr; + u32 stat, head, tail; + int ret; + + stat = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_STAT); + + if (!(stat & FT_I2C_RV2AP_INTR_BIT4)) { + dev_warn(dev->dev, "unexpect i2c_phyt regfile intr, stat:%#x\n", stat); + return IRQ_NONE; + } + + i2c_phyt_common_regfile_clear_rv2ap_int(dev, stat); + + if (!dev->mng.tx_ring_cnt) { + dev_err(dev->dev, "tx_ring_cnt is zero\n"); + return IRQ_NONE; + } + head = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_TX_HEAD) % dev->mng.tx_ring_cnt; + tail = dev->mng.cur_cmd_cnt % dev->mng.tx_ring_cnt; + do { + tail++; + tail %= dev->mng.tx_ring_cnt; + if (dev->complete_flag) { + if (rx_msg->head.cmd_type == PHYTI2C_MSG_CMD_REPORT) + goto done; + + ret = i2c_phyt_master_handle(dev); + if (ret == FT_I2C_RUNNING) + continue; + + dev->complete_flag = false; + dev->mng.cur_cmd_cnt = 0; + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_HEAD, 0); + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_TX_TAIL, 0); + complete(&dev->cmd_complete); + return IRQ_HANDLED; + } + } while (tail != head); +done: + i2c_phyt_master_isr_handle(dev); + + return IRQ_HANDLED; +} + +static int i2c_phyt_enable_smbus_alert(struct i2c_phyt_dev *i2c_dev) +{ + struct i2c_adapter *adap = &i2c_dev->adapter; + struct i2c_smbus_alert_setup setup; + + setup.irq = 0; + i2c_dev->ara = i2c_new_smbus_alert_device(adap, &setup); + + if (IS_ERR(i2c_dev->ara)) + return PTR_ERR(i2c_dev->ara); + + return 0; +} + +int i2c_phyt_master_probe(struct i2c_phyt_dev *dev) +{ + struct i2c_adapter *adapter = &dev->adapter; + unsigned long irq_flags; + int ret; + u32 alert = 0; + + init_completion(&dev->cmd_complete); + + i2c_phyt_common_regfile_disable_int(dev); + i2c_phyt_common_regfile_clear_rv2ap_int(dev, 0); + + irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; + ret = devm_request_irq(dev->dev, dev->irq, i2c_phyt_master_regfile_isr, irq_flags, + dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev->irq, ret); + return ret; + } + + i2c_phyt_common_regfile_enable_int(dev); + + ret = i2c_phyt_master_set_timings_master(dev); + if (ret) { + dev_err(dev->dev, "master set times\n"); + return ret; + } + + dev->intr_mask = FT_IC_INTR_SMBUS_TIME_MASK; + ret = i2c_phyt_master_default_init(dev); + if (ret) { + dev_err(dev->dev, "master default init\n"); + return ret; + } + dev->init = i2c_phyt_master_default_init; + dev->disable = i2c_phyt_set_suspend; + dev->disable_int = i2c_phyt_disable_int; + + /* XXX: should be initialized in firmware, remove it in future */ + snprintf(adapter->name, sizeof(adapter->name), "Phytium I2C_2.0 Master adapter"); + adapter->retries = 3; + adapter->algo = &i2c_phyt_master_algo; + adapter->dev.parent = dev->dev; + i2c_set_adapdata(adapter, dev); + /* + * Increment PM usage count during adapter registration in order to + * avoid possible spurious runtime suspend when adapter device is + * registered to the device core and immediate resume in case bus has + * registered I2C slaves that do I2C transfers in their probe. + */ + pm_runtime_get_noresume(dev->dev); + + ret = i2c_add_numbered_adapter(adapter); + if (ret) { + dev_err(dev->dev, "fail to add adapter: %d\n", ret); + goto exit_probe; + } + + if (of_property_read_bool(dev->dev->of_node, "smbus-alert")) + alert = 1; + else if (has_acpi_companion(dev->dev)) + fwnode_property_read_u32_array(dev->dev->fwnode, "smbus_alert", &alert, 1); + + if (alert) { + ret = i2c_phyt_enable_smbus_alert(dev); + if (ret) { + dev_err(dev->dev, + "failed to enable SMBus alert protocol (%d)\n", ret); + } + dev->intr_mask |= FT_IC_INTR_SMBALERT_IN_N; + i2c_phyt_set_int_interrupt(dev, FT_I2C_ENABLE_INTERRUPT, dev->intr_mask); + ret = i2c_phyt_check_result(dev); + if (ret) + dev_warn(dev->dev, "fail to set interrupt: %d\n", ret); + } else + dev_info(dev->dev, "find no alert\n"); + +exit_probe: + pm_runtime_put_noidle(dev->dev); + dev_info(dev->dev, "i2c_2.0 master probe ok\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phyt_master_probe); + +MODULE_DESCRIPTION("Phytium I2C bus master adapter"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-platform.c b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..45ad303f2875a6ff835360f09da15844424b0edd --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-platform.c @@ -0,0 +1,525 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium I2C adapter driver. + * + * Copyright (C) 2023-2024, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i2c-phyt-core.h" + +#define DRV_NAME "i2c-phytium-v2-platform" + +static u32 i2c_phyt_get_clk_rate_khz(struct i2c_phyt_dev *dev) +{ + return clk_get_rate(dev->clk)/1000; +} + +static ssize_t debug_show(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct i2c_phyt_dev *adapter_dev = dev_get_drvdata(dev); + ssize_t ret; + u32 reg; + + reg = i2c_phyt_read_reg(adapter_dev, FT_I2C_REGFILE_DEBUG); + ret = sprintf(buf, "%x\n", reg); + + return ret; +} + +static ssize_t debug_store(struct device *dev, + struct device_attribute *da, + const char *buf, size_t size) +{ + u8 loc, dis_en, status = 0; + char *p; + char *token; + long value; + u32 reg; + struct i2c_phyt_dev *adapter_dev = dev_get_drvdata(dev); + + dev_info(dev, "echo alive(1)/debug(0) enable(1)/disable(0) > debug\n"); + dev_info(dev, "Example:echo 0 1 > debug; Enable Debug Function\n"); + + p = kmalloc(size, GFP_KERNEL); + strscpy(p, buf, size); + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + loc = (u8)value; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + dis_en = value; + + reg = i2c_phyt_read_reg(adapter_dev, FT_I2C_REGFILE_DEBUG); + + if (loc == 1) { + if (dis_en == 1) { + adapter_dev->alive_enabled = true; + reg |= BIT(loc); + } else if (dis_en == 0) { + adapter_dev->alive_enabled = false; + reg &= ~BIT(loc); + } + } else if (loc == 0) { + if (dis_en == 1) { + adapter_dev->debug_enabled = true; + reg |= BIT(loc); + } else if (dis_en == 0) { + adapter_dev->debug_enabled = false; + reg &= ~BIT(loc); + } + } + + i2c_phyt_write_reg(adapter_dev, FT_I2C_REGFILE_DEBUG, reg); + + dev_info(dev, "reg write reg =0x%x, loc = %d, val=%d\n", reg, loc, dis_en); + + kfree(p); + return size; +} +static DEVICE_ATTR_RW(debug); + +static struct attribute *i2c_ft_device_attrs[] = { + &dev_attr_debug.attr, + NULL, +}; + +static const struct attribute_group i2c_ft_device_group = { + .attrs = i2c_ft_device_attrs, +}; + +#ifdef CONFIG_ACPI +static void i2c_phyt_acpi_params(struct platform_device *pdev, char method[], + u16 *hcnt, u16 *lcnt, u32 *sda_hold) +{ + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *obj; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + + obj = (union acpi_object *)buf.pointer; + if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) { + const union acpi_object *objs = obj->package.elements; + + *hcnt = (u16)objs[0].integer.value; + *lcnt = (u16)objs[1].integer.value; + *sda_hold = (u32)objs[2].integer.value; + } + + kfree(buf.pointer); +} + +static int i2c_phyt_acpi_configure(struct platform_device *pdev) +{ + struct i2c_phyt_dev *dev = platform_get_drvdata(pdev); + struct i2c_timings *t = &dev->timings; + u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; + const struct acpi_device_id *id; + + dev->adapter.nr = -1; + + /* + * Try to get SDA hold time and *CNT values from an ACPI method for + * selected speed modes. + */ + i2c_phyt_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); + i2c_phyt_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); + i2c_phyt_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); + i2c_phyt_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); + + switch (t->bus_freq_hz) { + case FT_I2C_SPEED_100K: + dev->sda_hold_time = ss_ht; + break; + case FT_I2C_SPEED_1000K: + dev->sda_hold_time = fp_ht; + break; + case FT_I2C_SPEED_3400K: + dev->sda_hold_time = hs_ht; + break; + case FT_I2C_SPEED_400K: + default: + dev->sda_hold_time = fs_ht; + break; + } + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (id && id->driver_data) + dev->flags |= (u32)id->driver_data; + + return 0; +} + +static const struct acpi_device_id i2c_phyt_acpi_match[] = { + { "PHYT0059", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, i2c_phyt_acpi_match); +#else +static inline int i2c_phyt_acpi_configure(struct platform_device *pdev) +{ + return -ENODEV; +} +#endif + +static void i2c_phyt_configure_master(struct i2c_phyt_dev *dev) +{ + struct i2c_timings *t = &dev->timings; + + dev->functionality = I2C_FUNC_10BIT_ADDR | FT_IC_DEFAULT_FUNCTIONALITY; + + dev->master_cfg = FT_IC_CON_MASTER | FT_IC_CON_SLAVE_DISABLE | + FT_IC_CON_RESTART_EN; + + dev->mode = phyt_IC_MASTER; + + switch (t->bus_freq_hz) { + case FT_I2C_SPEED_100K: + dev->master_cfg |= FT_IC_CON_SPEED_STD; + break; + case FT_I2C_SPEED_3400K: + dev->master_cfg |= FT_IC_CON_SPEED_HIGH; + break; + default: + dev->master_cfg |= FT_IC_CON_SPEED_FAST; + } +} + +static void i2c_phyt_configure_slave(struct i2c_phyt_dev *dev) +{ + dev->functionality = I2C_FUNC_SLAVE | FT_IC_DEFAULT_FUNCTIONALITY; + + dev->slave_cfg = FT_IC_CON_RX_FIFO_FULL_HLD_CTRL | + FT_IC_CON_RESTART_EN | FT_IC_CON_STOP_DET_IFADDRESSED; + + dev->mode = phyt_IC_SLAVE; +} + + +static void i2c_phyt_timer_handle(struct timer_list *t) +{ + struct i2c_phyt_dev *dev = from_timer(dev, t, timer); + + if (dev->alive_enabled && dev->watchdog) + dev->watchdog(dev); + + mod_timer(&dev->timer, jiffies + msecs_to_jiffies(2000)); +} + +static int i2c_phyt_plat_probe(struct platform_device *pdev) +{ + struct i2c_adapter *adap; + struct i2c_phyt_dev *dev; + struct i2c_timings *t; + u32 acpi_speed; + struct resource *reg_mem, *share_mem; + int irq, ret, i; + static const int supported_speeds[] = { + 0, FT_I2C_SPEED_100K, FT_I2C_SPEED_400K, FT_I2C_SPEED_1000K, FT_I2C_SPEED_3400K + }; + + + irq = platform_get_irq(pdev, 0); + + if (irq < 0) { + dev_err(&pdev->dev, "Err:irq :%d,exit\n", irq); + return irq; + } + + dev = devm_kzalloc(&pdev->dev, sizeof(struct i2c_phyt_dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->irq = irq; + /*find regfile info*/ + reg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!reg_mem) { + dev_err(&pdev->dev, "Err:can't find valid regfile mem,exit\n"); + return -ENOMEM; + } + /*get regfile base address*/ + dev->base = devm_ioremap_resource(&pdev->dev, reg_mem); + if (IS_ERR(dev->base)) { + dev_err(&pdev->dev, "dev->base is err exit\n"); + return PTR_ERR(dev->base); + } + /*find share mem info*/ + share_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!share_mem) { + dev_err(&pdev->dev, "Err:can't find valid shmem,exit\n"); + return -ENOMEM; + } + + /*set tx share mem start addr*/ + dev->tx_shmem_addr = devm_ioremap_wc(&pdev->dev, share_mem->start, + resource_size(share_mem)); + if (IS_ERR(dev->tx_shmem_addr)) { + dev_err(&pdev->dev, "tx_shmem_addr is err\n"); + return PTR_ERR(dev->tx_shmem_addr); + } + + /*set rx share mem start addr*/ + dev->mng.tx_ring_cnt = (i2c_phyt_read_reg(dev, FT_I2C_REGFILE_RING) & + FT_I2C_REGFILE_TX_RING_MASK) >> FT_I2C_REGFILE_TX_RING_OFFSET; + if (!dev->mng.tx_ring_cnt || (dev->mng.tx_ring_cnt > 8)) { + dev_err(&pdev->dev, "failed set tx ring cnt:%d\n", dev->mng.tx_ring_cnt); + return -EINVAL; + } + dev->rx_shmem_addr = dev->tx_shmem_addr + + dev->mng.tx_ring_cnt * sizeof(struct phyt_msg_info); + + dev->dev = &pdev->dev; + platform_set_drvdata(pdev, dev); + + ret = i2c_phyt_malloc_log_mem(dev); + if (ret) + return -ENOMEM; + + dev->timer.expires = jiffies + msecs_to_jiffies(3000); + timer_setup(&dev->timer, i2c_phyt_timer_handle, 0); + add_timer(&dev->timer); + + dev->watchdog = i2c_phyt_common_watchdog; + + i2c_phyt_disable_debug(dev); + dev->debug_enabled = false; + + dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(dev->rst)) { + if (PTR_ERR(dev->rst) == -EPROBE_DEFER) { + dev_err(&pdev->dev, "dev rst not null\n"); + return -EPROBE_DEFER; + } + } else { + reset_control_deassert(dev->rst); + } + + t = &dev->timings; + i2c_parse_fw_timings(&pdev->dev, t, false); + + acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + + /* + * Some DSTDs use a non standard speed, round down to the lowest + * standard speed. + */ + for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) { + if (acpi_speed < supported_speeds[i]) + break; + } + acpi_speed = supported_speeds[i - 1]; + + /* + * Find bus speed from the "clock-frequency" device property, ACPI + * or by using fast mode if neither is set. + */ + if (acpi_speed && t->bus_freq_hz) + t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); + else if (acpi_speed || t->bus_freq_hz) + t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); + else + t->bus_freq_hz = FT_I2C_SPEED_400K; + + if (has_acpi_companion(&pdev->dev)) + i2c_phyt_acpi_configure(pdev); + + /* + * Only standard mode at 100kHz, fast mode at 400kHz, + * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported. + */ + if (t->bus_freq_hz != FT_I2C_SPEED_100K && t->bus_freq_hz != FT_I2C_SPEED_400K && + t->bus_freq_hz != FT_I2C_SPEED_1000K && t->bus_freq_hz != FT_I2C_SPEED_3400K) { + dev_err(&pdev->dev, + "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", + t->bus_freq_hz); + ret = -EINVAL; + goto exit_reset; + } + + if (i2c_detect_slave_mode(&pdev->dev)) + i2c_phyt_configure_slave(dev); + else + i2c_phyt_configure_master(dev); + + dev->clk = devm_clk_get(&pdev->dev, NULL); + + if (!i2c_phyt_prepare_clk(dev, true)) { + u64 clk_khz; + + dev->get_clk_rate_khz = i2c_phyt_get_clk_rate_khz; + clk_khz = dev->get_clk_rate_khz(dev); + + if (!dev->sda_hold_time && t->sda_hold_ns) + dev->sda_hold_time = + div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); + } + + dev->adapter.nr = pdev->id; + + adap = &dev->adapter; + adap->owner = THIS_MODULE; + adap->class = I2C_CLASS_DEPRECATED; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + adap->dev.of_node = pdev->dev.of_node; + adap->timeout = HZ; + adap->dev.fwnode = pdev->dev.fwnode; + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_MAY_SKIP_RESUME); + + /* The code below assumes runtime PM to be disabled. */ + WARN_ON(pm_runtime_enabled(&pdev->dev)); + + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + + pm_runtime_enable(&pdev->dev); + + if (dev->mode == phyt_IC_SLAVE) + ret = i2c_phyt_slave_probe(dev); + else + ret = i2c_phyt_master_probe(dev); + + if (ret) + goto exit_probe; + + if (sysfs_create_group(&dev->dev->kobj, &i2c_ft_device_group)) + dev_warn(&pdev->dev, "failed create sysfs\n"); + + i2c_phyt_enable_alive(dev); + dev->alive_enabled = true; + + return ret; + +exit_probe: + pm_runtime_disable(dev->dev); +exit_reset: + del_timer(&dev->timer); + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + return ret; +} + +static int i2c_phyt_plat_remove(struct platform_device *pdev) +{ + struct i2c_phyt_dev *dev = platform_get_drvdata(pdev); + + dev_info(&pdev->dev, "i2c_2.0 remove\n"); + pm_runtime_get_sync(&pdev->dev); + + i2c_del_adapter(&dev->adapter); + sysfs_remove_group(&dev->dev->kobj, &i2c_ft_device_group); + dev->disable(dev); + /*disable alive function*/ + i2c_phyt_disable_alive(dev); + del_timer(&dev->timer); + + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(dev->dev); + + if (!IS_ERR_OR_NULL(dev->rst)) + reset_control_assert(dev->rst); + + i2c_phyt_common_regfile_disable_int(dev); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id i2c_phyt_of_match[] = { + { .compatible = "phytium,i2c-2.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_phyt_of_match); +#endif + +static int __maybe_unused i2c_phyt_plat_suspend(struct device *dev) +{ + struct i2c_phyt_dev *idev = dev_get_drvdata(dev); + + idev->disable(idev); + + i2c_phyt_disable_alive(idev); + idev->alive_enabled = false; + + i2c_phyt_prepare_clk(idev, false); + + return 0; +} + +static int __maybe_unused i2c_phyt_plat_resume(struct device *dev) +{ + struct i2c_phyt_dev *idev = dev_get_drvdata(dev); + + i2c_phyt_prepare_clk(idev, true); + idev->init(idev); + + i2c_phyt_enable_alive(idev); + idev->alive_enabled = true; + + return 0; +} + +static const struct dev_pm_ops i2c_phyt_dev_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_phyt_plat_suspend, + i2c_phyt_plat_resume) + SET_RUNTIME_PM_OPS(i2c_phyt_plat_suspend, + i2c_phyt_plat_resume, NULL) +}; + +static struct platform_driver i2c_phyt_driver = { + .probe = i2c_phyt_plat_probe, + .remove = i2c_phyt_plat_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(i2c_phyt_of_match), + .acpi_match_table = ACPI_PTR(i2c_phyt_acpi_match), + .pm = &i2c_phyt_dev_pm_ops, + }, +}; +module_platform_driver(i2c_phyt_driver); + +MODULE_ALIAS("platform:i2c-2.0"); +MODULE_AUTHOR("Wu Jinyong "); +MODULE_DESCRIPTION("Phytium I2C bus adapter"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(I2C_PHYTIUM_V2_DRV_VERSION); diff --git a/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-slave.c b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-slave.c new file mode 100644 index 0000000000000000000000000000000000000000..5675a1974fbee7cde96175e6cbea2b6aa0ae14c1 --- /dev/null +++ b/drivers/i2c/busses/phytium_i2c_v2/i2c-phyt-slave.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2C adapter driver (slave only). + * + * Copyright (C) 2023-2024, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "i2c-phyt-core.h" + +#define FT_I2C_RX_FIFO_FULL_ENABLE 1 +#define FT_I2C_RX_FIFO_FULL_DISABLE 0 + +#define FT_I2C_CON_STOP_DET_IFADDR_ENABLE 1 +#define FT_I2C_CON_STOP_DET_IFADDR_DISABLE 0 + +#define FT_I2C_SLAVE_EVNET_MAX_CNT 56 +#define FT_I2C_SLAVE_DATA_INDEX(index) (index+1) +/*one is cmd,the next one is data*/ +#define FT_I2C_SLAVE_RX_INFO_SIZE 2 + +int i2c_phyt_slave_event_process(struct i2c_phyt_dev *dev, + struct phyt_msg_info *rx_msg, u32 rx_head, u32 rx_tail) +{ + u8 buf[32] = {1}; + u32 head = rx_head, tail = rx_tail; + + if (!dev || !dev->slave) { + dev_err(dev->dev, "dev is null\n"); + return -ENXIO; + } + + if ((head >= FT_I2C_SLAVE_EVNET_MAX_CNT) || (tail >= FT_I2C_SLAVE_EVNET_MAX_CNT)) { + dev_err(dev->dev, "head:%d, tail:%d\n", head, tail); + return -EINVAL; + } + + while (head != tail) { + if (head % FT_I2C_SLAVE_RX_INFO_SIZE) + head++; + + i2c_slave_event(dev->slave, rx_msg->data[head], + &(rx_msg->data[FT_I2C_SLAVE_DATA_INDEX(head)])); + /*send the read data to RV*/ + if ((rx_msg->data[head] == I2C_SLAVE_READ_REQUESTED) || + (rx_msg->data[head] == I2C_SLAVE_READ_PROCESSED)) { + /*buf[0] is used to stor size,and fixed to 1*/ + buf[1] = rx_msg->data[FT_I2C_SLAVE_DATA_INDEX(head)]; + i2c_phyt_data_cmd8_array(dev, PHYTI2C_MSG_CMD_DATA_SLAVE, + buf, 2); + } + head += FT_I2C_SLAVE_RX_INFO_SIZE; + if (head >= FT_I2C_SLAVE_EVNET_MAX_CNT) + head = 0; + } + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_HEAD, head); + + return 0; +} + +static irqreturn_t i2c_phyt_slave_regfile_isr(int this_irq, void *dev_id) +{ + u32 stat; + struct i2c_phyt_dev *dev = (struct i2c_phyt_dev *)dev_id; + + stat = i2c_phyt_read_reg(dev, FT_I2C_REGFILE_RV2AP_INTR_STAT); + + if (!(stat & FT_I2C_RV2AP_INTR_BIT4)) { + dev_warn(dev->dev, "intr stat=%#x\n", stat); + return IRQ_NONE; + } + + i2c_phyt_common_regfile_clear_rv2ap_int(dev, stat); + + if (dev->complete_flag) { + dev->complete_flag = false; + complete(&dev->cmd_complete); + } else { + i2c_phyt_slave_isr_handle(dev); + i2c_phyt_show_log(dev); + } + + return IRQ_HANDLED; +} + +static int i2c_phyt_slave_init(struct i2c_phyt_dev *dev) +{ + u8 slave_mode = FT_I2C_MASTER_MODE_FLAG; + u8 rx_fifo_full_en = FT_I2C_RX_FIFO_FULL_DISABLE; + u8 stop_det_ifaddr_en = FT_I2C_CON_STOP_DET_IFADDR_DISABLE; + int ret; + + i2c_phyt_set_module_en(dev, FT_I2C_ADAPTER_MODULE_OFF); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set module off: %d\n", ret); + return ret; + } + + if (dev->sda_hold_time) { + i2c_phyt_set_sda_hold(dev, dev->sda_hold_time); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set sda hold time: %d\n", ret); + return ret; + } + } + + /*set slave mode*/ + if ((dev->mode & FT_I2C_CON_MASTER_MODE_MASK) == phyt_IC_SLAVE) + slave_mode = FT_I2C_SLAVE_MODE_FLAG; + else { + dev_err(dev->dev, "dev is not in slave mode\n"); + return -EINVAL; + } + + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_MODE, slave_mode); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set mode: %d\n", ret); + return ret; + } + + i2c_phyt_set_int_tl(dev, 0, 0); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set int tl: %d\n", ret); + return ret; + } + + if ((dev->slave_cfg & FT_I2C_CON_RX_FIFO_FULL_HLD_MASK) == + FT_IC_CON_RX_FIFO_FULL_HLD_CTRL) + rx_fifo_full_en = FT_I2C_RX_FIFO_FULL_ENABLE; + + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_RX_FIFO_FULL, rx_fifo_full_en); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set rx fifo full: %d\n", ret); + return ret; + } + + if ((dev->slave_cfg & FT_I2C_CON_STOP_DET_IFADDR_MASK) == FT_IC_CON_STOP_DET_IFADDRESSED) + stop_det_ifaddr_en = FT_I2C_CON_STOP_DET_IFADDR_ENABLE; + + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_STOP_DET_IF_ADDRESSED, + stop_det_ifaddr_en); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set stop det ifaddr: %d\n", ret); + return ret; + } + i2c_phyt_set_int_interrupt(dev, FT_I2C_ENABLE_INTERRUPT, FT_IC_INTR_SLAVE_MASK); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set interrupt: %d\n", ret); + return ret; + } + + return 0; +} + +static int i2c_phyt_reg_slave(struct i2c_client *slave) +{ + int ret; + struct i2c_phyt_dev *dev = i2c_get_adapdata(slave->adapter); + + if (dev->slave) + return -EBUSY; + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + + pm_runtime_get_sync(dev->dev); + + /* + * Set slave address in the IC_SAR register, + * the address to which the i2c responds. + */ + i2c_phyt_set_module_en(dev, FT_I2C_ADAPTER_MODULE_OFF); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set module off: %d\n", ret); + return ret; + } + + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_ADDR_MODE, FT_I2C_ADDR_7BIT_MODE); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set addr mode: %d\n", ret); + return ret; + } + + i2c_phyt_set_cmd8(dev, PHYTI2C_MSG_CMD_SET_ADDR, slave->addr); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set addr: %d\n", ret); + return ret; + } + + dev->slave = slave; + + i2c_phyt_set_module_en(dev, FT_I2C_ADAPTER_MODULE_ON); + ret = i2c_phyt_check_result(dev); + if (ret) { + dev_err(dev->dev, "fail to set module on: %d\n", ret); + return ret; + } + + return 0; +} + +static int i2c_phyt_unreg_slave(struct i2c_client *slave) +{ + struct i2c_phyt_dev *dev = i2c_get_adapdata(slave->adapter); + + dev->disable_int(dev); + dev->disable(dev); + dev->slave = NULL; + pm_runtime_put(dev->dev); + + return 0; +} + +static const struct i2c_algorithm i2c_phyt_slave_algo = { + .functionality = i2c_phyt_func, + .reg_slave = i2c_phyt_reg_slave, + .unreg_slave = i2c_phyt_unreg_slave, +}; + + +int i2c_phyt_slave_probe(struct i2c_phyt_dev *dev) +{ + struct i2c_adapter *adap = &dev->adapter; + int ret; + + init_completion(&dev->cmd_complete); + + i2c_phyt_common_regfile_disable_int(dev); + i2c_phyt_common_regfile_clear_rv2ap_int(dev, 0); + + ret = devm_request_irq(dev->dev, dev->irq, i2c_phyt_slave_regfile_isr, IRQF_SHARED, + dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev->irq, ret); + return ret; + } + + i2c_phyt_common_regfile_enable_int(dev); + + dev->init = i2c_phyt_slave_init; + dev->disable = i2c_phyt_disable; + dev->disable_int = i2c_phyt_disable_int; + + snprintf(adap->name, sizeof(adap->name), + "Phytium I2C_2.0 Slave adapter"); + adap->retries = 3; + adap->algo = &i2c_phyt_slave_algo; + adap->dev.parent = dev->dev; + + ret = i2c_phyt_slave_init(dev); + if (ret) + return ret; + + i2c_phyt_write_reg(dev, FT_I2C_REGFILE_HEAD, 0); + i2c_set_adapdata(adap, dev); + + ret = i2c_add_numbered_adapter(adap); + if (ret) + dev_err(dev->dev, "failure adding adapter: %d\n", ret); + + dev_info(dev->dev, "i2c_2.0 slave probe ok\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_phyt_slave_probe); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index a821c241d2f15b617742bee9cae9e18236f25985..feff507a3810eb46c4ce8b6867e5c05f0a02a446 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1488,7 +1488,8 @@ static int i8042_setup_aux(void) if (error) goto err_free_irq; - i8042_aux_irq_registered = true; + if (!phytium_check_cpu()) + i8042_aux_irq_registered = true; return 0; err_free_irq: diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 847510eb3810b04a3a3bc50b8c306451ef253cb3..aeb888096456acc92026068bf38f520383d9cdd4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -4120,7 +4120,7 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match); #ifdef CONFIG_PM_SLEEP static const struct dev_pm_ops arm_smmu_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(arm_smmu_suspend, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(arm_smmu_suspend, arm_smmu_resume) }; #define ARM_SMMU_PM_OPS (&arm_smmu_pm_ops) diff --git a/drivers/media/platform/phytium/phytium_jpeg_core.c b/drivers/media/platform/phytium/phytium_jpeg_core.c index ea4c7084df95b88afaf8e1f37aad2071b5f4afd1..103acf8f5b1dd47488e4a769314bf364e9343078 100644 --- a/drivers/media/platform/phytium/phytium_jpeg_core.c +++ b/drivers/media/platform/phytium/phytium_jpeg_core.c @@ -1123,8 +1123,8 @@ static int phytium_jpeg_parser_timer31_irq(struct phytium_jpeg_dev *jpeg_dev) return -ENODEV; } - ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer31_irq, - IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + ret = devm_request_irq(dev, irq, phytium_jpeg_timer31_irq, + IRQF_TIMER, PHYTIUM_JPEG_NAME, jpeg_dev); if (ret < 0) dev_err(dev, "Failed to request timer31 IRQ %d\n", irq); @@ -1177,8 +1177,8 @@ static int phytium_jpeg_parser_timer30_irq(struct phytium_jpeg_dev *jpeg_dev) return -ENODEV; } - ret = devm_request_threaded_irq(dev, irq, NULL, phytium_jpeg_timer30_irq, - IRQF_ONESHOT, PHYTIUM_JPEG_NAME, jpeg_dev); + ret = devm_request_irq(dev, irq, phytium_jpeg_timer30_irq, + IRQF_TIMER, PHYTIUM_JPEG_NAME, jpeg_dev); if (ret < 0) dev_err(dev, "Failed to request timer30 IRQ %d\n", irq); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index a31cacc653fc7b087f3360b751537b7a729b07e4..9a1f11bb1437919f1d3753645a4f0304ec2bb693 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1099,3 +1099,13 @@ config MMC_PHYTIUM_MCI_PLTFM If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_PHYTIUM_MCI_PLTFM_V2 + tristate "Phytium V2 MultiMedia Card Interface support" + depends on ARCH_PHYTIUM && OF + default y if ARCH_PHYTIUM + help + This selects support for the V2 MultiMedia Card Interface on Phytium SoCs. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 129010f638dd49a614ea3f420d9dedc80da1dbde..19cf805cb74ae97ba1ccec3ef5405455bf1953e1 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_MMC_OWL) += owl-mmc.o obj-$(CONFIG_MMC_PHYTIUM_SDCI) += phytium-sdci.o obj-$(CONFIG_MMC_PHYTIUM_MCI_PCI) += phytium-mci-pci.o phytium-mci.o obj-$(CONFIG_MMC_PHYTIUM_MCI_PLTFM) += phytium-mci-plat.o phytium-mci.o +obj-$(CONFIG_MMC_PHYTIUM_MCI_PLTFM_V2) += phytium-mci-plat-v2.o phytium-mci-v2.o obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c index eb7aa5b11a40b96709db222551ccf15a910f2be2..926938ece158244d6474089dd1ec297ca354794c 100644 --- a/drivers/mmc/host/phytium-mci-pci.c +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -173,3 +173,4 @@ module_pci_driver(phytium_mci_pci_driver); MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci-plat-v2.c b/drivers/mmc/host/phytium-mci-plat-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..3ad27688802156313b79541b3a091bb7498e1670 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-plat-v2.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Phytium Multimedia Card Interface PCI driver + * + * Copyright (C) 2024 Phytium Technology Co.,Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci-v2.h" + +static u32 mci_caps = MMC_CAP_CMD23; + +#if defined CONFIG_PM && defined CONFIG_PM_SLEEP + +static const struct dev_pm_ops phytium_mci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phyt_mci_suspend, + phyt_mci_resume) + SET_RUNTIME_PM_OPS(phyt_mci_runtime_suspend, + phyt_mci_runtime_resume, NULL) +}; +#else +#define phytium_mci_dev_pm_ops NULL +#endif + +static ssize_t debug_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + return sprintf(buf, "%d\n", host->debug_enable); +} + +static ssize_t debug_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct phytium_mci_host *host = mmc_priv(mmc); + int enable; + int ret; + + ret = sscanf(buf, "%d\n", &enable); + if (ret == 0) { + ret = -EINVAL; + return ret; + } + + host->debug_enable = enable; + + phytium_mci_set_debug_enable(host, host->debug_enable); + + return count; +} + +static ssize_t alive_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + return sprintf(buf, "%d\n", host->alive_enable); +} + +static ssize_t alive_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); + struct phytium_mci_host *host = mmc_priv(mmc); + int enable; + int ret; + + ret = sscanf(buf, "%d\n", &enable); + if (ret == 0) { + ret = -EINVAL; + return ret; + } + + host->alive_enable = enable; + + phytium_mci_set_alive_enable(host, host->alive_enable); + + return count; +} + +static DEVICE_ATTR_RW(debug_enable); +static DEVICE_ATTR_RW(alive_enable); + +static int phytium_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + struct resource *res; + const struct acpi_device_id *match; + struct device *dev = &pdev->dev; + int ret; + + mmc = mmc_alloc_host(sizeof(struct phytium_mci_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + if (dev->of_node) { + host->src_clk = devm_clk_get(&pdev->dev, "phytium_mci_clk"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->clk_rate = clk_get_rate(host->src_clk); + } else if (has_acpi_companion(dev)) { + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) { + dev_err(dev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + host->clk_rate = 1200000000; + } + + host->is_use_dma = 1; + host->is_device_x100 = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->regf_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->regf_base)) { + ret = PTR_ERR(host->regf_base); + goto host_free; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 0); + + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + host->irq_flags = IRQF_SHARED; + dev_dbg(&pdev->dev, "%s %d:irq:%d\n", __func__, __LINE__, host->irq); + host->dev = &pdev->dev; + host->caps = mci_caps; + host->mmc = mmc; + ret = phyt_mci_common_probe(host); + if (ret == MCI_REALEASE_MEM) { + ret = -ENOMEM; + goto release_mem; + } else if (ret) { + goto release; + } + + ret = device_create_file(&mmc->class_dev, + &dev_attr_debug_enable); + if (ret < 0) + goto debug_enable_free; + + ret = device_create_file(&mmc->class_dev, + &dev_attr_alive_enable); + if (ret < 0) + goto alive_enable_free; + + platform_set_drvdata(pdev, mmc); + dev_info(&pdev->dev, "%s %d: probe phytium mci successful.\n", __func__, __LINE__); + return 0; + +alive_enable_free: + device_remove_file(&mmc->class_dev, &dev_attr_alive_enable); +debug_enable_free: + device_remove_file(&mmc->class_dev, &dev_attr_debug_enable); +release: + phyt_mci_deinit_hw(host); +release_mem: + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } +host_free: + mmc_free_host(mmc); + return ret; +} + +static int phytium_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct phytium_mci_host *host; + + mmc = platform_get_drvdata(pdev); + if (!mmc) { + dev_info(&pdev->dev, "%s %d: mmc is null.\n", __func__, __LINE__); + return -1; + } + host = mmc_priv(mmc); + if (!host) { + dev_info(&pdev->dev, "%s %d: host is null.\n", __func__, __LINE__); + mmc_remove_host(mmc); + mmc_free_host(mmc); + return -1; + } + del_timer(&host->alive_timer); + del_timer(&host->hotplug_timer); + mmc_remove_host(host->mmc); + + if (host->dma.adma_table) { + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct phytium_adma2_64_desc), + host->dma.adma_table, host->dma.adma_addr); + } + phyt_mci_deinit_hw(host); + mmc_free_host(mmc); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id phytium_mci_of_ids[] = { + { .compatible = "phytium,mci_2.0", }, + {} +}; + +MODULE_DEVICE_TABLE(of, phytium_mci_of_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_mci_acpi_ids[] = { + { .id = "PHYT0061" }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytium_mci_acpi_ids); +#else +#define phytium_mci_acpi_ids NULL +#endif + +static struct platform_driver phytium_mci_driver = { + .probe = phytium_mci_probe, + .remove = phytium_mci_remove, + .driver = { + .name = "phytium-mci-platform-2.0", + .of_match_table = phytium_mci_of_ids, + .acpi_match_table = phytium_mci_acpi_ids, + .pm = &phytium_mci_dev_pm_ops, + }, +}; + +module_platform_driver(phytium_mci_driver); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lai Xueyu "); +MODULE_VERSION(PHYTIUM_MMC_V2_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci-plat.c b/drivers/mmc/host/phytium-mci-plat.c index c18b7dd160d326d0342026bd1245aed54f7b00b5..f937ea691f969f15e5559e4a36e7fad13ce65a15 100644 --- a/drivers/mmc/host/phytium-mci-plat.c +++ b/drivers/mmc/host/phytium-mci-plat.c @@ -193,3 +193,4 @@ module_platform_driver(phytium_mci_driver); MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci-v2.c b/drivers/mmc/host/phytium-mci-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..31ae666de4927d94f69f4bb44179a1720953cedf --- /dev/null +++ b/drivers/mmc/host/phytium-mci-v2.c @@ -0,0 +1,1077 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2024 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-mci-v2.h" + +struct init_data_t *init_data; +struct start_command_data_t *start_command_data; +struct start_data_data_t *start_data_data; +struct set_ios_data_t *set_ios_data; +struct cmd_next_data_t *cmd_next_data; +struct err_irq_data_t *err_irq_data; + +static struct phytium_mci_host *shost; + +static const u32 rv2ap_int_mask = MMC_RXRING_TAIL_INT_MASK; + +static const u32 cmd_err_ints_mask = MCI_INT_MASK_RTO | MCI_INT_MASK_RCRC | MCI_INT_MASK_RE | + MCI_INT_MASK_DCRC | MCI_INT_MASK_DRTO | + MCI_MASKED_INTS_SBE_BCI; + +static int ap_phytium_mci_cmd_next(struct phyt_msg_info *rxmsg); +static int ap_phytium_mci_data_xfer_next(struct phyt_msg_info *rxmsg); +static void phytium_mci_adma_reset(struct phytium_mci_host *host); +static void phytium_mci_init(struct phytium_mci_host *host); +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma); +static void phytium_mci_init_hw(struct phytium_mci_host *host); +static int phytium_mci_get_cd(struct mmc_host *mmc); +static int phytium_mci_get_ro(struct mmc_host *mmc); +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd); +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data); +static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable); +static int phytium_check_msg(void); +static int phytium_mci_card_busy(struct mmc_host *mmc); +struct phyt_msg_info *shmem[RING_MAX]; +struct phyt_msg_info *rvshmem[RING_MAX_RV]; + +static void phytium_mci_set_int_mask(struct phytium_mci_host *host) +{ + u32 int_mask; + + int_mask = ~rv2ap_int_mask; + + writel(int_mask, host->regf_base + MMC_RV2AP_INT_MASK); + dev_info(host->dev, "set int mask %x\n", int_mask); +} + +static void phytium_mci_prepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MCI_PREPARE_FLAG)) { + data->host_cookie |= MCI_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } +} + +static void phytium_mci_unprepare_data(struct phytium_mci_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MCI_ASYNC_FLAG) + return; + + if (data->host_cookie & MCI_PREPARE_FLAG) { + dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + data->host_cookie &= ~MCI_PREPARE_FLAG; + } +} + +static inline u32 +phytium_mci_cmd_find_resp(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + resp = 0x5; + break; + + case MMC_RSP_R2: + resp = 0x7; + break; + + case MMC_RSP_R3: + resp = 0x1; + break; + + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline +u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 opcode = cmd->opcode; + u32 resp = phytium_mci_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = ((opcode & 0x3f) | ((resp & 0x7) << 6)); + + if (opcode == MMC_GO_INACTIVE_STATE || + (opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 28); + + if (test_and_clear_bit(MCI_CARD_NEED_INIT, &host->flags)) + rawcmd |= (0x1 << 15); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + rawcmd |= (0x1 << 9); + + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 10); + } + + if (host->use_hold) + rawcmd |= (0x1 << 29); + + return (rawcmd | (0x1 << 31)); +} + +static inline void +phytium_mci_adma_write_desc(struct phytium_mci_host *host, + struct phytium_adma2_64_desc *desc, + dma_addr_t addr, u32 len, u32 attribute) +{ + desc->attribute = attribute; + desc->len = len; + desc->addr_lo = lower_32_bits(addr); + desc->addr_hi = upper_32_bits(addr); + dev_dbg(host->dev, "%s %d:addr_lo:0x%x ddr_hi:0x%x\n", __func__, + __LINE__, desc->addr_lo, desc->addr_hi); + + if ((attribute == 0x80000004) || (attribute == 0x8000000c)) { + desc->desc_lo = 0; + desc->desc_hi = 0; + } +} + +static void +phytium_mci_data_sg_write_2_admc_table(struct phytium_mci_host *host, struct mmc_data *data) +{ + struct phytium_adma2_64_desc *desc; + u32 dma_len, i; + dma_addr_t dma_address; + struct scatterlist *sg; + + phytium_mci_init_adma_table(host, &host->dma); + + desc = host->dma.adma_table; + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + if (i == 0) { + if (sg_is_last(sg) || (data->sg_count == 1 && dma_len == SD_BLOCK_SIZE)) + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000000c); + else + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x8000001a); + } else if (sg_is_last(sg)) { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000004); + } else { + phytium_mci_adma_write_desc(host, desc, dma_address, + dma_len, 0x80000012); + } + + desc++; + } +} + +static void phytium_mci_track_cmd_data(struct phytium_mci_host *host, + struct mmc_command *cmd, + struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void phytium_mci_request_done(struct phytium_mci_host *host, struct mmc_request *mrq) +{ + phytium_mci_track_cmd_data(host, mrq->cmd, mrq->data); + + if (mrq->data) + phytium_mci_unprepare_data(host, mrq); + + mmc_request_done(host->mmc, mrq); +} + +static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + host->error = 0; + host->mrq = NULL; + WARN_ON(host->mrq); + host->mrq = mrq; + + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, + mrq->cmd->opcode, mrq->cmd->arg); + + if (mrq->sbc) { + phytium_mci_start_command(host, mrq, mrq->sbc); + return; + } + if (mrq->data) { + phytium_mci_prepare_data(host, mrq); + + if ((mrq->data->sg->length >= 512) && host->is_use_dma && + ((mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (mrq->cmd->opcode == MMC_WRITE_BLOCK) || + (mrq->cmd->opcode == SD_IO_RW_EXTENDED))) + + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + + phytium_mci_start_data(host, mrq, mrq->cmd, mrq->data); + return; + } + phytium_mci_start_command(host, mrq, mrq->cmd); +} + +static void phytium_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + phytium_mci_prepare_data(host, mrq); + data->host_cookie |= MCI_ASYNC_FLAG; +} + +static void phytium_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + if (data->host_cookie & MCI_ASYNC_FLAG) { + data->host_cookie &= ~MCI_ASYNC_FLAG; + phytium_mci_unprepare_data(host, mrq); + } +} + +static void phytium_mci_data_read_without_dma(struct phytium_mci_host *host, + struct mmc_data *data) +{ + u32 length, i, data_val, dma_len, tmp = 0; + u32 *virt_addr; + unsigned long flags; + struct scatterlist *sg; + + length = data->blocks * data->blksz; + + if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { + spin_lock_irqsave(&host->lock, flags); + if (data->host_cookie & MCI_ASYNC_FLAG) { + tmp = MCI_ASYNC_FLAG; + phytium_mci_post_req(host->mmc, data->mrq, 0); + } else { + phytium_mci_unprepare_data(host, data->mrq); + } + + for_each_sg(data->sg, sg, data->sg_count, i) { + dma_len = sg_dma_len(sg); + virt_addr = sg_virt(data->sg); + + for (i = 0; i < (dma_len / 4); i++) { + data_val = readl(host->regf_base + MCI_DATA); + memcpy(virt_addr, &data_val, 4); + ++virt_addr; + } + } + + if (tmp & MCI_ASYNC_FLAG) + phytium_mci_pre_req(host->mmc, data->mrq); + else + phytium_mci_prepare_data(host, data->mrq); + + spin_unlock_irqrestore(&host->lock, flags); + } + data->bytes_xfered = length; +} + +static irqreturn_t phytium_mci_irq(int irq, void *dev_id) +{ + struct phytium_mci_host *host = (struct phytium_mci_host *) dev_id; + u32 rv2ap_event; + + rv2ap_event = readl(host->regf_base + MMC_RV2AP_INT); + if (rv2ap_event & MMC_TXRING_HEAD_INT) { + rv2ap_event &= ~MMC_TXRING_HEAD_INT; + pr_debug("MCIAP %s MMC_TXRING_HEAD_INT %x\n", __func__, rv2ap_event); + writel(rv2ap_event, host->regf_base + MMC_RV2AP_INT); + } + if (rv2ap_event & MMC_RXRING_TAIL_INT) { + rv2ap_event &= ~MMC_RXRING_TAIL_INT; + pr_debug("MCIAP %s MMC_RXRING_TAIL_INT %x\n", __func__, rv2ap_event); + writel(rv2ap_event, host->regf_base + MMC_RV2AP_INT); + phytium_check_msg(); + } + return IRQ_HANDLED; +} + +static void phytium_mci_init_adma_table(struct phytium_mci_host *host, + struct phytium_mci_dma *dma) +{ + struct phytium_adma2_64_desc *adma_table = dma->adma_table; + dma_addr_t dma_addr; + int i; + + memset(adma_table, 0, sizeof(struct phytium_adma2_64_desc) * MAX_BD_NUM); + + for (i = 0; i < (MAX_BD_NUM - 1); i++) { + dma_addr = dma->adma_addr + sizeof(*adma_table) * (i + 1); + adma_table[i].desc_lo = lower_32_bits(dma_addr); + adma_table[i].desc_hi = upper_32_bits(dma_addr); + adma_table[i].attribute = 0; + adma_table[i].NON1 = 0; + adma_table[i].len = 0; + adma_table[i].NON2 = 0; + } + + phytium_mci_adma_reset(host); +} + +static void phytium_mci_ack_sdio_irq(struct mmc_host *mmc) +{ + unsigned long flags; + struct phytium_mci_host *host = mmc_priv(mmc); + + spin_lock_irqsave(&host->lock, flags); + __phytium_mci_enable_sdio_irq(host, 1); + spin_unlock_irqrestore(&host->lock, flags); +} + +#ifdef CONFIG_PM_SLEEP +int phyt_mci_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phyt_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phyt_mci_suspend); + +int phyt_mci_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_set_int_mask(host); + phytium_mci_init(host); + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phyt_mci_resume); + +#endif + +#ifdef CONFIG_PM +int phyt_mci_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phyt_mci_deinit_hw(host); + return 0; +} +EXPORT_SYMBOL(phyt_mci_runtime_suspend); + +int phyt_mci_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct phytium_mci_host *host = mmc_priv(mmc); + + phytium_mci_set_int_mask(host); + phytium_mci_init(host); + phytium_mci_init_hw(host); + return 0; +} +EXPORT_SYMBOL(phyt_mci_runtime_resume); + +#endif + +static int phytium_ap_to_rv(struct phyt_msg_info *msg) +{ + int i; + int ret = 0; + int tx_t, tx_h, p; + + tx_t = readl(shost->regf_base + MMC_TX_TAIL) & 0xffff; + tx_h = readl(shost->regf_base + MMC_TX_HEAD) & 0xffff; + + if ((tx_t + 1) % RING_MAX == tx_h) { + pr_err("mci ring buff full\n"); + return -1; + } + + p = tx_t; + + tx_t = (tx_t + 1) % RING_MAX; + + shmem[p] = (struct phyt_msg_info *)(*shmem + p); + pr_debug("MCIAP shmem[%d] %x\n", p, (unsigned int)(long)shmem[p]); + pr_debug("MCIAP MSG CMD:%d SCMD:%d\n", msg->cmd_type, msg->cmd_subid); + + /*write msg to shmem*/ + memcpy(shmem[p], msg, sizeof(struct phyt_msg_info)); + + /*update tx tail pointer*/ + writel(tx_t | MMC_TX_TAIL_INT, shost->regf_base + MMC_TX_TAIL); + + /*wait and check status*/ + for (i = 0; i < COMPLETE_TIMEOUT; i++) { + dsb(sy); + if (shmem[p]->status0 == GOING || shmem[p]->status0 == NOT_READY) { + pr_debug("MCIAP [%d] not complete %x\r\n", p, shmem[p]->status0); + } else if (shmem[p]->status0 == SUCCESS) { + ret = shmem[p]->status1; + pr_debug("MCIAP [%d] complete and return %d\r\n", p, ret); + /*clear status*/ + shmem[p]->status0 = 0; + shmem[p]->status1 = 0; + break; + } else if (shmem[p]->status0 >= GENERIC_ERROR) { + pr_debug("MCIAP [%d] status error %x\r\n", p, shmem[p]->status0); + ret = -1; + /*clear status*/ + shmem[p]->status0 = 0; + shmem[p]->status1 = 0; + break; + } + udelay(40); + } + pr_debug("MCIAP %s end [%d] times:%d\n", __func__, p, i); + return ret; +} + +static void phytium_mci_init(struct phytium_mci_host *host) +{ + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_INIT; + + pr_debug("MCIAP host->mmc->caps %x host->clk_rate %ld\n", + host->mmc->caps, host->clk_rate); + + init_data = (struct init_data_t *)host->msg.data; + init_data->caps = host->mmc->caps; + init_data->clk_rate = host->clk_rate; + + phytium_ap_to_rv(&host->msg); +} + +static void phytium_mci_init_hw(struct phytium_mci_host *host) +{ + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_INIT_HW; + + phytium_ap_to_rv(&host->msg); + dev_info(host->dev, "init hardware done!"); +} + +void phyt_mci_deinit_hw(struct phytium_mci_host *host) +{ + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_DEINIT_HW; + + phytium_ap_to_rv(&host->msg); +} +EXPORT_SYMBOL_GPL(phyt_mci_deinit_hw); + +static void phytium_mci_start_command(struct phytium_mci_host *host, + struct mmc_request *mrq, + struct mmc_command *cmd) +{ + u32 rawcmd; + + host->cmd = cmd; + host->mrq = mrq; + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + + pr_debug("MCIAP arg%x flags%x opcode%d rawcmd%x\n", + host->cmd->arg, host->cmd->flags, host->cmd->opcode, rawcmd); + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_START_CMD; + + start_command_data = (struct start_command_data_t *)host->msg.data; + start_command_data->cmd_arg = cmd->arg; + start_command_data->cmd_flags = cmd->flags; + start_command_data->cmd_opcode = cmd->opcode; + start_command_data->rawcmd = rawcmd; + + phytium_ap_to_rv(&host->msg); +} + +static void +phytium_mci_start_data(struct phytium_mci_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + u32 rawcmd; + + host->cmd = cmd; + cmd->error = 0; + host->mrq = mrq; + host->data = data; + rawcmd = phytium_mci_cmd_prepare_raw_cmd(host, mrq, cmd); + phytium_mci_data_sg_write_2_admc_table(host, data); + + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_START_DATA; + + start_data_data = (struct start_data_data_t *)host->msg.data; + start_data_data->data_flags = data->flags; + start_data_data->adtc_type = host->adtc_type; + start_data_data->adma_addr = host->dma.adma_addr; + start_data_data->mrq_data_blksz = mrq->data->blksz; + start_data_data->mrq_data_blocks = mrq->data->blocks; + start_data_data->cmd_arg = cmd->arg; + start_data_data->rawcmd = rawcmd; + + phytium_ap_to_rv(&host->msg); +} + +static void phytium_mci_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_OPS_SET_IOS; + + set_ios_data = (struct set_ios_data_t *)host->msg.data; + set_ios_data->ios_clock = ios->clock; + set_ios_data->ios_timing = ios->timing; + set_ios_data->ios_bus_width = ios->bus_width; + set_ios_data->ios_power_mode = ios->power_mode; + + phytium_ap_to_rv(&host->msg); + + if (ios->power_mode == MMC_POWER_UP) + set_bit(MCI_CARD_NEED_INIT, &host->flags); +} + +static void __phytium_mci_enable_sdio_irq(struct phytium_mci_host *host, int enable) +{ + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_SDIO_IRQ_EN; + + host->msg.data[0] = enable & 0xFF; + + phytium_ap_to_rv(&host->msg); +} + +static void phytium_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + __phytium_mci_enable_sdio_irq(host, enable); +} + +static int phytium_mci_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + int ret = 0; + + host->msg.cmd_type = PHYTMMC_MSG_CMD_SET; + host->msg.cmd_subid = MCI_OPS_SWITCH_VOLT; + + host->msg.data[0] = ios->signal_voltage & 0xFF; + + ret = phytium_ap_to_rv(&host->msg); + + pr_debug("MCIAP %s %d\n", __func__, ret); + + return ret; +} + +static void phytium_mci_hw_reset(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + + host->msg.cmd_type = PHYTMMC_MSG_CMD_DEFAULT; + host->msg.cmd_subid = MCI_HW_RESET; + + phytium_ap_to_rv(&host->msg); +} + +static void phytium_mci_adma_reset(struct phytium_mci_host *host) +{ + host->msg.cmd_type = PHYTMMC_MSG_CMD_DEFAULT; + host->msg.cmd_subid = MCI_ADMA_RESET; + + phytium_ap_to_rv(&host->msg); +} + +static int phytium_mci_get_cd(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + u32 status; + + if (mmc->caps & MMC_CAP_NONREMOVABLE) + return 1; + + status = readl(host->regf_base + MCI_CARD_DETECT); + + pr_debug("MCIAP get cd %d\n", status); + + if ((status & 0x1) == 0x1) + return 0; + + return 1; +} + +static int phytium_mci_card_busy(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + int ret; + + host->msg.cmd_type = PHYTMMC_MSG_CMD_GET; + host->msg.cmd_subid = MCI_GET_CARD_BUSY; + + ret = phytium_ap_to_rv(&host->msg); + + pr_debug("MCIAP card busy %d\n", ret); + return ret; +} + +static int phytium_mci_get_ro(struct mmc_host *mmc) +{ + struct phytium_mci_host *host = mmc_priv(mmc); + int ret; + + host->msg.cmd_type = PHYTMMC_MSG_CMD_GET; + host->msg.cmd_subid = MCI_GET_RO; + + ret = phytium_ap_to_rv(&host->msg); + + dev_dbg(host->dev, "MCIAP ro %d\n", ret); + + return ret; +} + +static int ap_phytium_mci_cmd_next(struct phyt_msg_info *rxmsg) +{ + struct phytium_mci_host *host; + u32 events; + + host = shost; + + if (!host->cmd) + return 1; + + cmd_next_data = (struct cmd_next_data_t *)rxmsg->data; + events = cmd_next_data->events; + host->cmd->resp[0] = cmd_next_data->response0; + host->cmd->resp[1] = cmd_next_data->response1; + host->cmd->resp[2] = cmd_next_data->response2; + host->cmd->resp[3] = cmd_next_data->response3; + + if (!(events & (MCI_RAW_INTS_CMD | MCI_INT_MASK_HTO))) { + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) && (events & MCI_RAW_INTS_RTO) + && readl(host->regf_base + MCI_CARD_DETECT)) { + host->cmd->error = -ENOMEDIUM; + host->cmd->resp[0] = 0; + } else if (events & MCI_RAW_INTS_RTO || + (host->cmd->opcode != MMC_SEND_TUNING_BLOCK && + host->cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) { + host->cmd->error = -ETIMEDOUT; + } else if (events & MCI_RAW_INTS_RCRC) { + host->cmd->error = -EILSEQ; + } else { + host->cmd->error = -ETIMEDOUT; + } + } + + if ((host->cmd->error && !(host->cmd->opcode == MMC_SEND_TUNING_BLOCK || + host->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) || + (host->mrq->sbc && host->mrq->sbc->error)) { + phytium_mci_request_done(host, host->mrq); + } else if (host->cmd == host->mrq->sbc) { + if ((host->mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) || + (host->mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) || + (host->mrq->cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (host->mrq->cmd->opcode == MMC_WRITE_BLOCK)) { + dev_dbg(host->dev, "%s %d:sbc done and next cmd :%d length:%d\n", + __func__, __LINE__, host->mrq->cmd->opcode, + host->mrq->data->sg->length); + phytium_mci_prepare_data(host, host->mrq); + if (host->is_use_dma) + host->adtc_type = BLOCK_RW_ADTC; + else + host->adtc_type = COMMOM_ADTC; + phytium_mci_start_data(host, host->mrq, host->mrq->cmd, host->mrq->data); + } else { + dev_err(host->dev, "%s %d:ERROR: cmd %d followers the SBC\n", + __func__, __LINE__, host->cmd->opcode); + } + } else if (!host->cmd->data) { + phytium_mci_request_done(host, host->mrq); + } + return 1; +} + +static int ap_phytium_mci_data_xfer_next(struct phyt_msg_info *rxmsg) +{ + struct phytium_mci_host *host; + u32 events; + struct mmc_request *mrq; + struct mmc_data *data; + + host = shost; + mrq = host->mrq; + data = host->data; + + events = ((uint32_t *)(rxmsg->data))[0]; + + if (events & MCI_RAW_INTS_DTO) { + if (host->adtc_type == COMMOM_ADTC && + (mrq->cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) + phytium_mci_data_read_without_dma(host, data); + else + data->bytes_xfered = data->blocks * data->blksz; + } else { + data->bytes_xfered = 0; + if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) + && readl(host->regf_base + MCI_CARD_DETECT) + && (events & cmd_err_ints_mask)) { + data->error = -ENOMEDIUM; + data->mrq->cmd->error = -ENOMEDIUM; + } else if (events & (MCI_RAW_INTS_DCRC | MCI_RAW_INTS_EBE | + MCI_RAW_INTS_SBE_BCI)) { + data->error = -EILSEQ; + } else { + data->error = -ETIMEDOUT; + } + } + + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && + (data->error || !mrq->sbc)) { + phytium_mci_start_command(host, mrq, mrq->stop); + } else { + phytium_mci_request_done(host, mrq); + } + return 1; +} + +static int ap_phytium_mci_card_detect_irq(struct phyt_msg_info *rxmsg) +{ + u8 cd; + struct phytium_mci_host *host; + + host = shost; + cd = rxmsg->data[0]; + + pr_debug("MCIAP card_detect_irq %d\n", cd); + if (cd) { + if (host->mmc->card) { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(0)); + } + } else { + cancel_delayed_work(&host->mmc->detect); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } + return 1; +} + +static int ap_phytium_mci_err_irq(struct phyt_msg_info *rxmsg) +{ + u32 raw_ints; + u32 dmac_status; + u32 ints_mask; + u32 dmac_mask; + u32 opcode; + + err_irq_data = (struct err_irq_data_t *)rxmsg->data; + raw_ints = err_irq_data->raw_ints; + ints_mask = err_irq_data->ints_mask; + dmac_status = err_irq_data->dmac_status; + dmac_mask = err_irq_data->dmac_mask; + opcode = err_irq_data->opcode; + + pr_err("MCIAP ERR raw_ints:%x ints_mask:%x dmac_status:%x dmac_mask:%x cmd:%d\n", + raw_ints, ints_mask, dmac_status, dmac_mask, opcode); + + return 1; +} + +static int phytium_check_msg(void) +{ + int rx_t, rx_h; + int complete; + struct phyt_msg_info rxmsg; + int p = 0; + + rx_t = readl(shost->regf_base + MMC_RX_TAIL) & 0xffff; + rx_h = readl(shost->regf_base + MMC_RX_HEAD) & 0xffff; + + while (rx_t != rx_h) { + pr_debug("MCIAP %s rx_t:%d rx_h:%d\n", __func__, rx_t, rx_h); + + p = rx_h; + rx_h = (rx_h + 1) % RING_MAX_RV; + + writel(rx_h, shost->regf_base + MMC_RX_HEAD); + rvshmem[p] = (struct phyt_msg_info *)(*rvshmem + p); + pr_debug("MCIAP %s %d %x\n", __func__, p, (unsigned int)(long)rvshmem[p]); + + /*read msg from shmem*/ + memcpy(&rxmsg, rvshmem[p], sizeof(struct phyt_msg_info)); + /*read msg from shmem*/ + + /*execute cmd*/ + switch (rxmsg.cmd_type) { + case PHYTMMC_MSG_CMD_REPORT: + switch (rxmsg.cmd_subid) { + case MCI_CMD_NEXT: + complete = ap_phytium_mci_cmd_next(&rxmsg); + break; + case MCI_DATA_NEXT: + complete = ap_phytium_mci_data_xfer_next(&rxmsg); + break; + case MCI_CD_IRQ: + complete = ap_phytium_mci_card_detect_irq(&rxmsg); + break; + case MCI_ERR_IRQ: + complete = ap_phytium_mci_err_irq(&rxmsg); + break; + default: + pr_debug("MCIAP invalid sub cmd\r\n"); + break; + } + break; + default: + pr_debug("MCIAP invalid cmd\r\n"); + break; + } + + /*write complete*/ + rvshmem[p]->status0 = complete; + } + return 1; +} + +int phytium_mci_set_debug_enable(struct phytium_mci_host *host, bool enable) +{ + static bool debug_enable; + u32 debug_reg; + + if (enable == debug_enable) + return 0; + + debug_enable = enable; + + debug_reg = readl(host->regf_base + MCI_DEBUG); + pr_debug("MCIAP %s debug_reg %x\n", __func__, debug_reg); + + if (!debug_enable && (debug_reg & MCI_DEBUG_ENABLE)) { + debug_reg &= ~MCI_DEBUG_ENABLE; + writel(debug_reg, host->regf_base + MCI_DEBUG); + } else if (debug_enable && !(debug_reg & MCI_DEBUG_ENABLE)) { + debug_reg |= MCI_DEBUG_ENABLE; + writel(debug_reg, host->regf_base + MCI_DEBUG); + } + + return 1; +} +EXPORT_SYMBOL(phytium_mci_set_debug_enable); + +int phytium_mci_set_alive_enable(struct phytium_mci_host *host, bool enable) +{ + static bool alive_enable; + u32 debug_reg; + + if (enable == alive_enable) + return 0; + + alive_enable = enable; + + debug_reg = readl(host->regf_base + MCI_DEBUG); + pr_debug("MCIAP %s debug_reg %x\n", __func__, debug_reg); + + if (!alive_enable && (debug_reg & MCI_ALIVE_ENABLE)) { + debug_reg &= ~MCI_ALIVE_ENABLE; + writel(debug_reg, host->regf_base + MCI_DEBUG); + del_timer(&host->alive_timer); + } else if (alive_enable && !(debug_reg & MCI_ALIVE_ENABLE)) { + debug_reg |= MCI_ALIVE_ENABLE | MCI_ALIVE; + writel(debug_reg, host->regf_base + MCI_DEBUG); + add_timer(&host->alive_timer); + } + + return 1; +} +EXPORT_SYMBOL(phytium_mci_set_alive_enable); + +static void alive_timer_func(struct timer_list *t) +{ + struct phytium_mci_host *host; + u32 debug_reg; + + host = from_timer(host, t, alive_timer); + if (!host) + return; + + debug_reg = readl(host->regf_base + MCI_DEBUG); + pr_debug("MCIAP %s debug_reg %x\n", __func__, debug_reg); + debug_reg |= MCI_ALIVE; + writel(debug_reg, host->regf_base + MCI_DEBUG); + mod_timer(&host->alive_timer, jiffies + msecs_to_jiffies(5000)); +} + +static struct mmc_host_ops phytium_mci_ops = { + .post_req = phytium_mci_post_req, + .pre_req = phytium_mci_pre_req, + .request = phytium_mci_ops_request, + .set_ios = phytium_mci_ops_set_ios, + .get_cd = phytium_mci_get_cd, + .get_ro = phytium_mci_get_ro, + .enable_sdio_irq = phytium_mci_enable_sdio_irq, + .ack_sdio_irq = phytium_mci_ack_sdio_irq, + .card_busy = phytium_mci_card_busy, + .start_signal_voltage_switch = phytium_mci_ops_switch_volt, + .card_hw_reset = phytium_mci_hw_reset, +}; + +int phyt_mci_common_probe(struct phytium_mci_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *dev = host->dev; + int ret; + + shost = host; + + dev_info(host->dev, "phytium_mci_common_probe start\n"); + + *shmem = host->base; + *rvshmem = (struct phyt_msg_info *)(*shmem + RING_MAX); + + dma_set_mask(dev, DMA_BIT_MASK(64)); + dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); + + host->alive_timer.expires = jiffies + msecs_to_jiffies(5000); + timer_setup(&host->alive_timer, alive_timer_func, 0); + + mmc->f_min = MCI_F_MIN; + if (!mmc->f_max) + mmc->f_max = MCI_F_MAX; + + mmc->ops = &phytium_mci_ops; + mmc->ocr_avail_sdio = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ocr_avail_mmc = MMC_VDD_165_195; + mmc->ocr_avail = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps |= host->caps; + + if (mmc->caps & MMC_CAP_SDIO_IRQ) { + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + dev_dbg(host->dev, "%s %d: MMC_CAP_SDIO_IRQ\n", __func__, __LINE__); + } + mmc->caps2 |= host->caps2; + + phytium_mci_set_int_mask(host); + + phytium_mci_init(host); + + phytium_mci_init_hw(host); + + if (host->is_use_dma) { + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma.adma_table = dma_alloc_coherent(host->dev, + MAX_BD_NUM * + sizeof(struct phytium_adma2_64_desc), + &host->dma.adma_addr, GFP_KERNEL); + if (!host->dma.adma_table) + return MCI_REALEASE_MEM; + + host->dma.desc_sz = ADMA2_64_DESC_SZ; + phytium_mci_init_adma_table(host, &host->dma); + } else { + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = 4 * 1024; + mmc->max_blk_size = 512; + mmc->max_req_size = 4 * 512; + mmc->max_blk_count = mmc->max_req_size / 512; + } + + spin_lock_init(&host->lock); + + ret = devm_request_irq(host->dev, host->irq, phytium_mci_irq, + host->irq_flags, "phytium-mci", host); + if (ret) + return ret; + + ret = mmc_add_host(mmc); + if (ret) { + dev_err(host->dev, "%s %d: mmc add host!\n", __func__, __LINE__); + return ret; + } + return 0; +} +EXPORT_SYMBOL(phyt_mci_common_probe); + +MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lai Xueyu "); +MODULE_VERSION(PHYTIUM_MMC_V2_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci-v2.h b/drivers/mmc/host/phytium-mci-v2.h new file mode 100644 index 0000000000000000000000000000000000000000..f02f9db33133260780577bc2891e03dccb712be2 --- /dev/null +++ b/drivers/mmc/host/phytium-mci-v2.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Driver for Phytium Multimedia Card Interface + * + * Copyright (C) 2024 Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_MCI_H +#define __PHYTIUM_MCI_H + +#include +#include +#include +#include +#include +#include +#include + +/*------------------------------------------------------*/ +/* Common Definition */ +/*------------------------------------------------------*/ +#define PHYTIUM_MMC_V2_DRIVER_VERSION "1.1.0" + +#define SD_BLOCK_SIZE 512 +#define MAX_BD_NUM 128 +#define MCI_CLK 1200000000 +#define MCI_REALEASE_MEM 0x1 + +#define MCI_PREPARE_FLAG (0x1 << 0) +#define MCI_ASYNC_FLAG (0x1 << 1) +#define MCI_MMAP_FLAG (0x1 << 2) + +#define MCI_F_MIN 400000 +#define MCI_F_MAX 50000000 + +/* MCI_RAW_INTS mask */ +#define MCI_RAW_INTS_CD (0x1 << 0) /* W1C */ +#define MCI_RAW_INTS_RE (0x1 << 1) /* W1C */ +#define MCI_RAW_INTS_CMD (0x1 << 2) /* W1C */ +#define MCI_RAW_INTS_DTO (0x1 << 3) /* W1C */ +#define MCI_RAW_INTS_TXDR (0x1 << 4) /* W1C */ +#define MCI_RAW_INTS_RXDR (0x1 << 5) /* W1C */ +#define MCI_RAW_INTS_RCRC (0x1 << 6) /* W1C */ +#define MCI_RAW_INTS_DCRC (0x1 << 7) /* W1C */ +#define MCI_RAW_INTS_RTO (0x1 << 8) /* W1C */ +#define MCI_RAW_INTS_DRTO (0x1 << 9) /* W1C */ +#define MCI_RAW_INTS_HTO (0x1 << 10) /* W1C */ +#define MCI_RAW_INTS_FRUN (0x1 << 11) /* W1C */ +#define MCI_RAW_INTS_HLE (0x1 << 12) /* W1C */ +#define MCI_RAW_INTS_SBE_BCI (0x1 << 13) /* W1C */ +#define MCI_RAW_INTS_ACD (0x1 << 14) /* W1C */ +#define MCI_RAW_INTS_EBE (0x1 << 15) /* W1C */ +#define MCI_RAW_INTS_SDIO (0x1 << 16) /* W1C */ + +/* MCI_MASKED_INTS mask */ +#define MCI_MASKED_INTS_CD (0x1 << 0) /* RO */ +#define MCI_MASKED_INTS_RE (0x1 << 1) /* RO */ +#define MCI_MASKED_INTS_CMD (0x1 << 2) /* RO */ +#define MCI_MASKED_INTS_DTO (0x1 << 3) /* RO */ +#define MCI_MASKED_INTS_TXDR (0x1 << 4) /* RO */ +#define MCI_MASKED_INTS_RXDR (0x1 << 5) /* RO */ +#define MCI_MASKED_INTS_RCRC (0x1 << 6) /* RO */ +#define MCI_MASKED_INTS_DCRC (0x1 << 7) /* RO */ +#define MCI_MASKED_INTS_RTO (0x1 << 8) /* RO */ +#define MCI_MASKED_INTS_DRTO (0x1 << 9) /* RO */ +#define MCI_MASKED_INTS_HTO (0x1 << 10) /* RO */ +#define MCI_MASKED_INTS_FRUN (0x1 << 11) /* RO */ +#define MCI_MASKED_INTS_HLE (0x1 << 12) /* RO */ +#define MCI_MASKED_INTS_SBE_BCI (0x1 << 13) /* RO */ +#define MCI_MASKED_INTS_ACD (0x1 << 14) /* RO */ +#define MCI_MASKED_INTS_EBE (0x1 << 15) /* RO */ +#define MCI_MASKED_INTS_SDIO (0x1 << 16) /* RO */ + +/* MCI_INT_MASK mask */ +#define MCI_INT_MASK_CD (0x1 << 0) /* RW */ +#define MCI_INT_MASK_RE (0x1 << 1) /* RW */ +#define MCI_INT_MASK_CMD (0x1 << 2) /* RW */ +#define MCI_INT_MASK_DTO (0x1 << 3) /* RW */ +#define MCI_INT_MASK_TXDR (0x1 << 4) /* RW */ +#define MCI_INT_MASK_RXDR (0x1 << 5) /* RW */ +#define MCI_INT_MASK_RCRC (0x1 << 6) /* RW */ +#define MCI_INT_MASK_DCRC (0x1 << 7) /* RW */ +#define MCI_INT_MASK_RTO (0x1 << 8) /* RW */ +#define MCI_INT_MASK_DRTO (0x1 << 9) /* RW */ +#define MCI_INT_MASK_HTO (0x1 << 10) /* RW */ +#define MCI_INT_MASK_FRUN (0x1 << 11) /* RW */ +#define MCI_INT_MASK_HLE (0x1 << 12) /* RW */ +#define MCI_INT_MASK_SBE_BCI (0x1 << 13) /* RW */ +#define MCI_INT_MASK_ACD (0x1 << 14) /* RW */ +#define MCI_INT_MASK_EBE (0x1 << 15) /* RW */ +#define MCI_INT_MASK_SDIO (0x1 << 16) /* RW */ + +#define MCI_CARD_DETECT 0x30 /* the card detect reg */ +#define MCI_DATA 0x34 /* the data FIFO access */ + +#define MCI_DEBUG 0x58 /* debug function */ + +/* MCI_DEBUG mask */ +#define MCI_DEBUG_ENABLE (0x1 << 0) /* RW */ +#define MCI_ALIVE_ENABLE (0x1 << 1) /* RW */ +#define MCI_ALIVE (0x1 << 2) /* RW */ +#define MCI_HAVE_LOG (0x1 << 3) /* RW */ +#define MCI_SIZE (0xf << 4) /* RW */ +#define MCI_ADDR (0x3fffff << 8) /* RW */ + +#define AP_TO_RV_MAX_DATA 64 +#define MCI_HW_RESET 0x00 +#define MCI_ADMA_RESET 0x01 +#define MCI_INIT 0x00 +#define MCI_INIT_HW 0x01 +#define MCI_DEINIT_HW 0x02 +#define MCI_START_CMD 0x03 +#define MCI_START_DATA 0x04 +#define MCI_OPS_SET_IOS 0x05 +#define MCI_SDIO_IRQ_EN 0x06 +#define MCI_OPS_SWITCH_VOLT 0x07 +#define MCI_GET_CD 0x00 +#define MCI_GET_CARD_BUSY 0x01 +#define MCI_GET_STATUS 0x02 +#define MCI_GET_RO 0x03 +#define MCI_CD_IRQ 0x00 +#define MCI_ERR_IRQ 0x01 +#define MCI_CMD_NEXT 0x02 +#define MCI_DATA_NEXT 0x03 + +#define RING_MAX 20 +#define RING_MAX_RV 10 + +#define COMPLETE_TIMEOUT 200 + +#define MMC_TX_HEAD 0x0 +#define MMC_TX_TAIL 0x4 +#define MMC_RX_HEAD 0x8 +#define MMC_RX_TAIL 0xc +#define MMC_TX_TAIL_INT (0x1 << 16) /* RW */ + +#define MMC_RV2AP_INT_MASK 0x028 +#define MMC_TXRING_HEAD_INT_MASK 0x1 /* RW */ +#define MMC_RXRING_TAIL_INT_MASK (0x1 << 1) /* RW */ + +#define MMC_RV2AP_INT 0x02c +#define MMC_TXRING_HEAD_INT 0x1 /* RW */ +#define MMC_RXRING_TAIL_INT (0x1 << 1) /* RW */ +/*--------------------------------------*/ +/* Structure Type */ +/*--------------------------------------*/ +/* Maximum segments assuming a 512KiB maximum requisition */ +/* size and a minimum4KiB page size. */ +#define MCI_MAX_SEGS 128 +/* ADMA2 64-bit DMA descriptor size */ +#define ADMA2_64_DESC_SZ 32 + +/* Each descriptor can transfer up to 4KB of data in chained mode */ +/*ADMA2 64-bit descriptor.*/ +struct phytium_adma2_64_desc { + u32 attribute; +#define IDMAC_DES0_DIC BIT(1) +#define IDMAC_DES0_LD BIT(2) +#define IDMAC_DES0_FD BIT(3) +#define IDMAC_DES0_CH BIT(4) +#define IDMAC_DES0_ER BIT(5) +#define IDMAC_DES0_CES BIT(30) +#define IDMAC_DES0_OWN BIT(31) + u32 NON1; + u32 len; + u32 NON2; + u32 addr_lo; /* Lower 32-bits of Buffer Address Pointer 1*/ + u32 addr_hi; /* Upper 32-bits of Buffer Address Pointer 1*/ + u32 desc_lo; /* Lower 32-bits of Next Descriptor Address */ + u32 desc_hi; /* Upper 32-bits of Next Descriptor Address */ +} __packed __aligned(4); + +struct phytium_mci_dma { + struct scatterlist *sg; /* I/O scatter list */ + /* ADMA descriptor table, pointer to adma_table array */ + struct phytium_adma2_64_desc *adma_table; + /* Mapped ADMA descr. table, the physical address of adma_table array */ + dma_addr_t adma_addr; + unsigned int desc_sz; /* ADMA descriptor size */ +}; + +enum adtc_t { + COMMOM_ADTC = 0, + BLOCK_RW_ADTC = 1 +}; + +enum phytmmc_msg_cmd_id { + PHYTMMC_MSG_CMD_DEFAULT = 0, + PHYTMMC_MSG_CMD_SET, + PHYTMMC_MSG_CMD_GET, + PHYTMMC_MSG_CMD_DATA, + PHYTMMC_MSG_CMD_REPORT +}; + +struct phyt_msg_info { + u8 reserved; + u8 seq; + u8 cmd_type; + u8 cmd_subid; + u16 len; + u8 status1; + u8 status0; +#define NOT_READY 0x0 +#define SUCCESS 0x1 +#define GOING 0x2 +#define GENERIC_ERROR 0x10 +#define TYPE_NOT_SUPPORTED 0x11 +#define CMD_NOT_SUPPORTED 0x12 +#define INVALID_PARAMETERS 0x13 + u8 data[56]; +}; + +struct phytium_mci_host { + struct device *dev; + struct mmc_host *mmc; + u32 caps; + u32 caps2; + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + void __iomem *base; /* host base address */ + void __iomem *regf_base; /* regfile base address */ + void *adma_table1; + dma_addr_t adma_addr1; + struct phytium_mci_dma dma_rx; /* dma channel */ + struct phytium_mci_dma dma_tx; /* dma channel */ + struct phytium_mci_dma dma; /* dma channel */ + u64 dma_mask; + bool vqmmc_enabled; + u32 *sg_virt_addr; + enum adtc_t adtc_type; /* 0:common adtc cmd; 1:block r/w adtc cmd;*/ + struct timer_list hotplug_timer; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + u32 current_rca; /*the current rca value*/ + u32 current_ios_clk; + u32 is_use_dma; + u32 is_device_x100; + struct clk *src_clk; /* phytium_mci source clock */ + unsigned long clk_rate; + unsigned long clk_div; + unsigned long irq_flags; + unsigned long flags; +#define MCI_CARD_NEED_INIT 1 + bool cmd_cs; /*volt switch cmd cs status*/ + bool use_hold; /*use hold*/ + bool clk_set; /*clock set*/ + s32 clk_smpl_drv_25m; + s32 clk_smpl_drv_50m; + s32 clk_smpl_drv_66m; + s32 clk_smpl_drv_100m; + struct phyt_msg_info msg; + struct phyt_msg_info rxmsg; + bool debug_enable; + bool alive_enable; + struct timer_list alive_timer; +}; + +int phyt_mci_common_probe(struct phytium_mci_host *host); +void phyt_mci_deinit_hw(struct phytium_mci_host *host); +int phyt_mci_runtime_suspend(struct device *dev); +int phyt_mci_runtime_resume(struct device *dev); +int phyt_mci_resume(struct device *dev); +int phyt_mci_suspend(struct device *dev); +int phytium_mci_set_debug_enable(struct phytium_mci_host *host, bool enable); +int phytium_mci_set_alive_enable(struct phytium_mci_host *host, bool enable); + +struct init_data_t { + uint32_t caps; + uint32_t clk_rate; +}; + +struct start_command_data_t { + uint32_t cmd_arg; + uint32_t cmd_flags; + uint32_t cmd_opcode; + uint32_t rawcmd; +}; + +struct start_data_data_t { + uint32_t data_flags; + uint32_t adtc_type; + uint64_t adma_addr; + uint32_t mrq_data_blksz; + uint32_t mrq_data_blocks; + uint32_t cmd_arg; + uint32_t rawcmd; +}; + +struct set_ios_data_t { + uint32_t ios_clock; + uint8_t ios_timing; + uint8_t ios_bus_width; + uint8_t ios_power_mode; +}; + +struct cmd_next_data_t { + uint32_t events; + uint32_t response0; + uint32_t response1; + uint32_t response2; + uint32_t response3; +}; + +struct err_irq_data_t { + uint32_t raw_ints; + uint32_t ints_mask; + uint32_t dmac_status; + uint32_t dmac_mask; + uint32_t opcode; +}; +#endif /* __PHYTIUM_MCI_HW_H */ diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c index 8be02664b49a111aa735631775fe6c8a5a80b020..49b0f277a2ab4da2316084e3e37aa2ab6d73a2ce 100644 --- a/drivers/mmc/host/phytium-mci.c +++ b/drivers/mmc/host/phytium-mci.c @@ -1623,3 +1623,4 @@ EXPORT_SYMBOL(phytium_mci_common_probe); MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h index 9d3e86a4d5dfd4b01250237ec0bb8f040e25c8d7..5bf6df4540c33c222916d8ae1e46d33d09c8da75 100644 --- a/drivers/mmc/host/phytium-mci.h +++ b/drivers/mmc/host/phytium-mci.h @@ -19,6 +19,8 @@ /*------------------------------------------------------*/ /* Common Definition */ /*------------------------------------------------------*/ +#define PHYTIUM_MMC_DRIVER_VERSION "1.0.0" + #define MAX_BD_NUM 128 #define SD_BLOCK_SIZE 512 diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c index 44379f5589c682f2a2cc1860939f0e054b4cddec..7bb1de026f222da2479201bd3d1b2b2cba12c86a 100644 --- a/drivers/mmc/host/phytium-sdci.c +++ b/drivers/mmc/host/phytium-sdci.c @@ -39,6 +39,8 @@ #include "phytium-sdci.h" +#define PHYTIUM_SDCI_DRIVER_VERSION "1.0.0" + static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | SDCI_SDCI_NORMAL_ISER_EEI_EN; static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | SDCI_ERROR_ISR_CCRCE_EN | @@ -1437,3 +1439,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan "); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium SD Card Interface driver"); +MODULE_VERSION(PHYTIUM_SDCI_DRIVER_VERSION); diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index e910b432f7787ac5be6055f4bfe997b702050042..c494c1cc0b46e5bcd2771cd79b23d28e46e2f594 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -394,4 +394,10 @@ config MTD_PISMO When built as a module, it will be called pismo.ko +config MTD_PHYTIUM_LBC + tristate "phytium lbc Flash Chip Support" + depends on MTD_COMPLEX_MAPPINGS + help + This driver is support phytium localbus control,it is used for norflash devices. + If compiled as a module, it will be called phytium-lbc.ko. endmenu diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 094cfb244086516ba2ed6bb557191e4ca73b6976..1264665fdd5515a1c7c04fb01e6a89a6f78d4f4b 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -43,3 +43,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o obj-$(CONFIG_MTD_VMU) += vmu-flash.o obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o +obj-$(CONFIG_MTD_PHYTIUM_LBC) += phytium_lbc.o diff --git a/drivers/mtd/maps/phytium_lbc.c b/drivers/mtd/maps/phytium_lbc.c new file mode 100644 index 0000000000000000000000000000000000000000..3b39cbe3f164012a87121a5fa7f2514e1ea8f503 --- /dev/null +++ b/drivers/mtd/maps/phytium_lbc.c @@ -0,0 +1,563 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Localbus Controller driver. + * + * Copyright (c) 2023-2024, Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define LBC_CSAR(n) (0x018+8*n) +#define LBC_CSASR(n) (0x01c+8*n) +#define LBC_CSPR(n) (0x058+4*n) +#define LBC_CFG_SEQ_ADDR 0x130 +#define LBC_CFG_SEQ_DATA 0x134 +#define LBC_CFG_SEQ_STAT 0x138 +#define LBC_FTIM0_CS_NOR(n) (0x13c+0xc*n) +#define LBC_FTIM1_CS_NOR(n) (0x140+0xc*n) //read seq +#define LBC_FTIM2_CS_NOR(n) (0x144+0xc*n) //write seq +#define LBC_FTIM0_CS_GPCM(n) (0x19c+0xc*n) +#define LBC_FTIM1_CS_GPCM(n) (0x1a0+0xc*n) +#define LBC_FTIM2_CS_GPCM(n) (0x1a4+0xc*n) + +#define LBC_DEVICE_ADDR 0x10000000 +#define PHYTIUM_MAX_SRAM_BLOCK 8 + +#define PHYTIUM_LOCALBUS_DRIVER_VERSION "1.0.0" + +struct phytium_lbc_dev { + struct resource *res; + struct mtd_info *mtd; + struct map_info *map; + u32 device_type; // nor flash, sync-ram +}; + +struct phytium_lbc { + struct phytium_lbc_dev dev[PHYTIUM_MAX_SRAM_BLOCK]; + u8 dev_num; + void __iomem *io_base; + void __iomem *mm_base; + resource_size_t mm_size; + u32 total_device_size; +}; + +//functions for read, write, copy to, copy from, etc. +static map_word lbc_read(struct map_info *map, unsigned long adr) +{ + map_word temp; + unsigned long i = adr; + + temp.x[0] = readw_relaxed(map->virt + i); + return temp; +} + + +static void lbc_write(struct map_info *map, map_word d, unsigned long adr) +{ + u8 tmp[2] = {0}; + unsigned long i = adr; + + if ((adr & 0x1) == 0) + writew_relaxed(d.x[0], map->virt+i); + else { + tmp[1] = d.x[0]; + writew_relaxed(*tmp, map->virt + i-1); + } +} + +static void lbc_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + unsigned char *f = (unsigned char *)map->virt + from; + unsigned char *t = (unsigned char *)to; + + memcpy_fromio(t, f, len); +} + +static void lbc_copy_to(struct map_info *map, unsigned long to, + const void *from, ssize_t len) +{ + unsigned char *f = (unsigned char *)from; + unsigned char *t = (unsigned char *)map->virt + to; + + memcpy_toio(t, f, len); +} + +static int phytium_lbc_calac_device_size(int device_size) +{ + int ret = 0; + u32 value; + + if (device_size%SZ_16K == 0) { + value = device_size/SZ_16K - 1; + } else { + ret = -EINVAL; + return ret; + } + + return value; +} + +static int phytium_lbc_time_seq_init(struct phytium_lbc *lbc, u8 dev_num, u32 device_time_seq[]) +{ + switch (lbc->dev[dev_num].device_type) { + case 1: + writel_relaxed(device_time_seq[0], lbc->io_base + LBC_CSPR(dev_num)); + writel_relaxed(device_time_seq[1], lbc->io_base + LBC_FTIM0_CS_NOR(dev_num)); + writel_relaxed(device_time_seq[2], lbc->io_base + LBC_FTIM1_CS_NOR(dev_num)); + writel_relaxed(device_time_seq[3], lbc->io_base + LBC_FTIM2_CS_NOR(dev_num)); + break; + case 2: + writel_relaxed(device_time_seq[0], lbc->io_base + LBC_CSPR(dev_num)); + writel_relaxed(device_time_seq[1], lbc->io_base + LBC_FTIM0_CS_GPCM(dev_num)); + writel_relaxed(device_time_seq[2], lbc->io_base + LBC_FTIM1_CS_GPCM(dev_num)); + writel_relaxed(device_time_seq[3], lbc->io_base + LBC_FTIM2_CS_GPCM(dev_num)); + break; + case 3: + writel_relaxed(device_time_seq[0], lbc->io_base + LBC_CSPR(dev_num)); + writel_relaxed(device_time_seq[1], lbc->io_base + LBC_FTIM0_CS_NOR(dev_num)); + writel_relaxed(device_time_seq[2], lbc->io_base + LBC_FTIM1_CS_NOR(dev_num)); + writel_relaxed(device_time_seq[3], lbc->io_base + LBC_FTIM2_CS_NOR(dev_num)); + break; + case 4: + writel_relaxed(device_time_seq[0], lbc->io_base + LBC_CSPR(dev_num)); + writel_relaxed(device_time_seq[1], lbc->io_base + LBC_FTIM0_CS_GPCM(dev_num)); + writel_relaxed(device_time_seq[2], lbc->io_base + LBC_FTIM1_CS_GPCM(dev_num)); + writel_relaxed(device_time_seq[3], lbc->io_base + LBC_FTIM2_CS_GPCM(dev_num)); + break; + default: + return -EINVAL; + } + return 0; +} +static int phytium_lbc_dev_init(struct phytium_lbc *lbc, u8 dev_num, u32 device_time_seq[]) +{ + int ret = 0; + int err; + u32 tmp = lbc->dev[dev_num].map->phys >> 12; + + writel_relaxed(tmp, lbc->io_base+LBC_CSAR(dev_num)); + ret = phytium_lbc_calac_device_size(lbc->dev[dev_num].map->size); + if (ret >= 0) + writel_relaxed(ret, lbc->io_base+LBC_CSASR(dev_num)); + else + return ret; + //sequence + err = phytium_lbc_time_seq_init(lbc, dev_num, device_time_seq); + if (err) { + pr_err("lbc: time sequence init failed"); + return err; + } + return ret; +} + +static int phytium_lbc_of_setup(struct phytium_lbc *lbc, + struct platform_device *pdev, struct device_node *np, u8 dev_num) +{ + int err = 0; + int bankwidth; + int device_size; + int device_type; + u32 device_time_seq[4]; + struct device *dev = &pdev->dev; + int ret; + + of_property_read_u32(np, "bank-width", &bankwidth); + if (!bankwidth) + return -EINVAL; + + of_property_read_u32(np, "device-size", &device_size); + if (!device_size) + return -EINVAL; + if (device_size > SZ_256M) { + dev_err(dev, "device size is too large\n"); + return -EINVAL; + } + if (device_size < SZ_16K) { + dev_err(dev, "device size is too small\n"); + return -EINVAL; + } + + of_property_read_u32(np, "device-type", &device_type); + if (!device_type) + return -EINVAL; + lbc->dev[dev_num].device_type = device_type; + dev_dbg(dev, "device_type=%d", lbc->dev[dev_num].device_type); + + if (!lbc->dev[dev_num].device_type) { + dev_err(dev, "device type invalid or not find"); + return -EINVAL; + } + + ret = of_property_read_u32_array(np, "device-time-seq", device_time_seq, 4); + if (ret) { + dev_err(dev, "device time sequence invalid or not found\n"); + return -EINVAL; + } + + lbc->total_device_size = lbc->total_device_size + device_size; + if (lbc->total_device_size > lbc->mm_size) { + dev_err(dev, "total device size is too large\n"); + return -EINVAL; + } + + lbc->dev[dev_num].map = devm_kzalloc(&pdev->dev, sizeof(struct map_info), GFP_KERNEL); + if (!lbc->dev[dev_num].map) + return -ENOMEM; + + lbc->dev[dev_num].map->size = device_size; + if (dev_num == 0) { + lbc->dev[dev_num].map->phys = LBC_DEVICE_ADDR; + lbc->dev[dev_num].map->virt = lbc->mm_base; + } else { + lbc->dev[dev_num].map->phys = lbc->dev[dev_num-1].map->phys + device_size; + lbc->dev[dev_num].map->virt = lbc->dev[dev_num-1].map->virt + device_size; + } + + err = phytium_lbc_dev_init(lbc, dev_num, device_time_seq); + if (err < 0) { + dev_err(dev, "device size is invalid\n"); + return -EINVAL; + } + + lbc->dev[dev_num].map->name = np->name; + lbc->dev[dev_num].map->bankwidth = bankwidth; + lbc->dev[dev_num].map->read = lbc_read; + lbc->dev[dev_num].map->write = lbc_write; + lbc->dev[dev_num].map->copy_from = lbc_copy_from; + lbc->dev[dev_num].map->copy_to = lbc_copy_to; + + lbc->dev[dev_num].map->device_node = np; + + switch (lbc->dev[dev_num].device_type) { + case 1: + lbc->dev[dev_num].mtd = do_map_probe("cfi_probe", lbc->dev[dev_num].map); + break; + case 2: + lbc->dev[dev_num].mtd = do_map_probe("map_ram", lbc->dev[dev_num].map); + break; + case 3: + lbc->dev[dev_num].mtd = do_map_probe("cfi_probe", lbc->dev[dev_num].map); + break; + case 4: + lbc->dev[dev_num].mtd = do_map_probe("map_ram", lbc->dev[dev_num].map); + break; + default: + dev_err(dev, "failed to alloc probe func"); + return -EINVAL; + } + + if (!lbc->dev[dev_num].mtd) { + dev_err(&pdev->dev, "probing failed\n"); + return -ENXIO; + } + + lbc->dev[dev_num].mtd->dev.parent = &pdev->dev; + mtd_set_of_node(lbc->dev[dev_num].mtd, pdev->dev.of_node); + + err = mtd_device_register(lbc->dev[dev_num].mtd, NULL, 0); + if (err) + dev_err(&pdev->dev, "failed to add partitions\n"); + return err; +} + +static int phytium_lbc_acpi_setup(struct phytium_lbc *lbc, + struct platform_device *pdev, struct fwnode_handle *fh, + u8 dev_num) +{ + int err = 0; + int bankwidth; + int device_size; + int device_type; + u32 device_time_seq[4]; + struct device *dev = &pdev->dev; + int ret; + + fwnode_property_read_u32(fh, "bank-width", &bankwidth); + if (!bankwidth) + return -EINVAL; + + fwnode_property_read_u32(fh, "device-size", &device_size); + if (!device_size) + return -EINVAL; + if (device_size > SZ_256M) { + dev_err(dev, "device size is too large\n"); + return -EINVAL; + } + if (device_size < SZ_16K) { + dev_err(dev, "device size is too small\n"); + return -EINVAL; + } + + fwnode_property_read_u32(fh, "device-type", &device_type); + if (!device_type) + return -EINVAL; + lbc->dev[dev_num].device_type = device_type; + dev_dbg(dev, "device_type = %d", lbc->dev[dev_num].device_type); + + if (!lbc->dev[dev_num].device_type) { + dev_err(dev, "device type invalid or not find"); + return -EINVAL; + } + + ret = fwnode_property_read_u32_array(fh, "device-time-seq", device_time_seq, 4); + if (ret) { + dev_err(dev, "device time sequence invalid or not found\n"); + return -EINVAL; + } + + lbc->total_device_size = lbc->total_device_size + device_size; + if (lbc->total_device_size > lbc->mm_size) { + dev_err(dev, "total device size is too large\n"); + return -EINVAL; + } + + lbc->dev[dev_num].map = devm_kzalloc(&pdev->dev, sizeof(struct map_info), GFP_KERNEL); + if (!lbc->dev[dev_num].map) + return -ENOMEM; + + lbc->dev[dev_num].map->size = device_size; + if (dev_num == 0) { + lbc->dev[dev_num].map->phys = LBC_DEVICE_ADDR; + lbc->dev[dev_num].map->virt = lbc->mm_base; + } else { + lbc->dev[dev_num].map->phys = lbc->dev[dev_num-1].map->phys + device_size; + lbc->dev[dev_num].map->virt = lbc->dev[dev_num-1].map->virt + device_size; + } + + err = phytium_lbc_dev_init(lbc, dev_num, device_time_seq); + if (err < 0) { + dev_err(dev, "device size is invalid\n"); + return -EINVAL; + } + + //there is something wrong, because struct fwnode_handle has no variable + //called "name", need fixing + lbc->dev[dev_num].map->bankwidth = bankwidth; + lbc->dev[dev_num].map->read = lbc_read; + lbc->dev[dev_num].map->write = lbc_write; + lbc->dev[dev_num].map->copy_from = lbc_copy_from; + lbc->dev[dev_num].map->copy_to = lbc_copy_to; + + lbc->dev[dev_num].map->fwnode_handle = fh; + + switch (lbc->dev[dev_num].device_type) { + case 1: + lbc->dev[dev_num].mtd = do_map_probe("cfi_probe", lbc->dev[dev_num].map); + break; + case 2: + lbc->dev[dev_num].mtd = do_map_probe("map_ram", lbc->dev[dev_num].map); + break; + case 3: + lbc->dev[dev_num].mtd = do_map_probe("cfi_probe", lbc->dev[dev_num].map); + break; + case 4: + lbc->dev[dev_num].mtd = do_map_probe("map_ram", lbc->dev[dev_num].map); + break; + default: + dev_err(dev, "failed to alloc probe func"); + return -EINVAL; + } + + if (!lbc->dev[dev_num].mtd) { + dev_err(&pdev->dev, "probing failed\n"); + return -ENXIO; + } + + lbc->dev[dev_num].mtd->dev.parent = &pdev->dev; + lbc->dev[dev_num].mtd->dev.fwnode = pdev->dev.fwnode; + + err = mtd_device_register(lbc->dev[dev_num].mtd, NULL, 0); + if (err) + dev_err(&pdev->dev, "failed to add partitions\n"); + return err; +} + +static int phytium_lbc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_lbc *lbc; + struct device_node *np; + struct fwnode_handle *child_handle; + u8 dev_num = 0; + int ret; + int i; + struct resource *res; + const char **reg_names_array; + + lbc = devm_kzalloc(dev, sizeof(struct phytium_lbc), GFP_KERNEL); + if (!lbc) + return -ENOMEM; + + device_for_each_child_node(dev, child_handle) + dev_num++; + lbc->dev_num = dev_num; + + if (!lbc->dev_num || lbc->dev_num > PHYTIUM_MAX_SRAM_BLOCK) { + dev_err(dev, "no device deceted, or device number is too large\n"); + kfree(lbc); + return -ENODEV; + } + dev_num = 0; + reg_names_array = kcalloc(4, sizeof(*reg_names_array), GFP_KERNEL); + if (dev->of_node) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "localbus"); + lbc->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(lbc->io_base)) { + ret = PTR_ERR(lbc->io_base); + kfree(lbc); + return ret; + } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lbc_mm"); + lbc->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(lbc->mm_base)) { + ret = PTR_ERR(lbc->mm_base); + kfree(lbc); + return ret; + } + } else if (has_acpi_companion(dev)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fwnode_property_read_string_array(dev->fwnode, "reg-names", reg_names_array, 2); + res->name = reg_names_array[0]; + lbc->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(lbc->io_base)) { + ret = PTR_ERR(lbc->io_base); + kfree(lbc); + return ret; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + res->name = reg_names_array[1]; + lbc->mm_base = devm_ioremap_resource(dev, res); + if (IS_ERR(lbc->mm_base)) { + ret = PTR_ERR(lbc->mm_base); + kfree(lbc); + return ret; + } + } + + lbc->mm_size = resource_size(res); + lbc->total_device_size = 0; + + platform_set_drvdata(pdev, lbc); + + if (dev->of_node) { + for_each_available_child_of_node(dev->of_node, np) { + ret = phytium_lbc_of_setup(lbc, pdev, np, dev_num); + if (ret) { + dev_err(dev, "unable to setup new device\n"); + goto err_destroy; + } + dev_num++; + lbc->dev_num = dev_num; + } + } else if (has_acpi_companion(dev)) { + device_for_each_child_node(dev, child_handle) { + ret = phytium_lbc_acpi_setup(lbc, pdev, child_handle, dev_num); + if (ret) { + dev_err(dev, "unable to setup new device\n"); + goto err_destroy; + } + dev_num++; + lbc->dev_num = dev_num; + } + } + + return 0; + +err_destroy: + for (i = 0; i < dev_num; i++) + map_destroy(lbc->dev[i].mtd); + return ret; +} + +static int phytium_lbc_remove(struct platform_device *pdev) +{ + + struct phytium_lbc *lbc = platform_get_drvdata(pdev); + int i = 0; + + for (i = 0; i < lbc->dev_num; i++) { + mtd_device_unregister(lbc->dev[i].mtd); + map_destroy(lbc->dev[i].mtd); + } + + return 0; +} + +static int __maybe_unused phytium_lbc_suspend(struct device *dev) +{ + return pm_runtime_force_suspend(dev); +} + +static int __maybe_unused phytium_lbc_resume(struct device *dev) +{ + return pm_runtime_force_resume(dev); +} + +static const struct dev_pm_ops phytium_lbc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_lbc_suspend, phytium_lbc_resume) +}; + +static const struct of_device_id phytium_lbc_of_match[] = { + {.compatible = "phytium,localbus"}, + { } +}; +MODULE_DEVICE_TABLE(of, phytium_lbc_of_match); +static const struct acpi_device_id phytium_lbc_acpi_match[] = { + {"PHYT0053", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, phytium_lbc_acpi_match); + +static struct platform_driver phytium_lbc_driver = { + .probe = phytium_lbc_probe, + .remove = phytium_lbc_remove, + .driver = { + .name = "phytium_lbc", + .of_match_table = phytium_lbc_of_match, + .acpi_match_table = phytium_lbc_acpi_match, + }, +}; + +static int __init phytium_lbc_init(void) +{ + int err; + + err = platform_driver_register(&phytium_lbc_driver); + return err; +} + +static void __exit phytium_lbc_exit(void) +{ + platform_driver_unregister(&phytium_lbc_driver); +} + +module_init(phytium_lbc_init); +module_exit(phytium_lbc_exit); + +MODULE_AUTHOR("Hanmo Wang "); +MODULE_DESCRIPTION("Phytium LBC driver for devices"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PHYTIUM_LOCALBUS_DRIVER_VERSION); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 3a5441dcc7337744e9de84094c84b6e4fbbc4e0e..d8a84e28d63c2c493f43e29712e293e4ccab2c5b 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1337,11 +1337,12 @@ struct macb { struct phylink_config phylink_config; struct phylink_pcs phylink_usx_pcs; struct phylink_pcs phylink_sgmii_pcs; - int link; int speed; int duplex; int use_ncsi; + int force_phy_mode; + u32 caps; unsigned int dma_burst_length; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 18e28ac80f5f613df533591890e1e0905cef44c8..0391a41ea5a1e79e77b3816080818eef6484f91d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -640,7 +640,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs, struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs); gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | - GEM_BIT(SIGNAL_OK)); + GEM_BIT(SIGNAL_OK) | GEM_BIT(TX_EN)); return 0; } @@ -1080,33 +1080,26 @@ static const struct phylink_mac_ops macb_phylink_ops = { .mac_link_up = macb_mac_link_up, }; -static bool macb_phy_handle_exists(struct device_node *dn) -{ - dn = of_parse_phandle(dn, "phy-handle", 0); - of_node_put(dn); - return dn != NULL; -} - static int macb_phylink_connect(struct macb *bp) { - struct device_node *dn = bp->pdev->dev.of_node; + struct fwnode_handle *fwnode = dev_fwnode(&bp->pdev->dev); struct net_device *dev = bp->dev; struct phy_device *phydev; int ret = 0; - if (dn) - ret = phylink_of_phy_connect(bp->phylink, dn, 0); + if (fwnode) + ret = phylink_fwnode_phy_connect(bp->phylink, fwnode, 0); - if (!dn || (ret && !macb_phy_handle_exists(dn))) { + if (!fwnode || ret) { phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); return -ENXIO; } + phydev->force_mode = bp->force_phy_mode; /* attach the mac to the phy */ - if (phylink_expects_phy(bp->phylink)) - ret = phylink_connect_phy(bp->phylink, phydev); + ret = phylink_connect_phy(bp->phylink, phydev); } if (ret) { @@ -2018,7 +2011,8 @@ static void macb_tx_restart(struct macb_queue *queue) if (queue->tx_head == queue->tx_tail) goto out_tx_ptr_unlock; - tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); + tbqp = queue_readl(queue, TBQP) - lower_32_bits(queue->tx_ring_dma); + tbqp = tbqp / macb_dma_desc_get_size(bp); tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head)); @@ -3248,9 +3242,6 @@ static void macb_init_hw(struct macb *bp) if (bp->caps & MACB_CAPS_SEL_CLK) bp->sel_clk_hw(bp, bp->speed); phytium_mac_config(bp); - if (bp->link) - macb_usx_pcs_link_up(&bp->phylink_usx_pcs, 0, - bp->phy_interface, bp->speed, bp->duplex); } else { bp->speed = SPEED_10; bp->duplex = DUPLEX_HALF; @@ -5799,6 +5790,10 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; + if (device_property_read_bool(&pdev->dev, "force-phy-mode")) { + bp->force_phy_mode = 1; + } + err = macb_mii_init(bp); if (err) goto err_out_phy_exit; @@ -5837,8 +5832,10 @@ static int macb_probe(struct platform_device *pdev) return 0; err_out_unregister_mdio: - mdiobus_unregister(bp->mii_bus); - mdiobus_free(bp->mii_bus); + if (bp->mii_bus) { + mdiobus_unregister(bp->mii_bus); + mdiobus_free(bp->mii_bus); + } err_out_phy_exit: phy_exit(bp->sgmii_phy); @@ -5866,8 +5863,11 @@ static int macb_remove(struct platform_device *pdev) bp = netdev_priv(dev); unregister_netdev(dev); phy_exit(bp->sgmii_phy); - mdiobus_unregister(bp->mii_bus); - mdiobus_free(bp->mii_bus); + + if (bp->mii_bus) { + mdiobus_unregister(bp->mii_bus); + mdiobus_free(bp->mii_bus); + } tasklet_kill(&bp->hresp_err_tasklet); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/phytium/Makefile b/drivers/net/ethernet/phytium/Makefile index 6e710d4d54b6641bc1679405d6909827cfd09eeb..debaed63e45388b11f529678e547166e753c5d10 100644 --- a/drivers/net/ethernet/phytium/Makefile +++ b/drivers/net/ethernet/phytium/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. # # Makefile for the Phytium network device drivers. # -# obj-$(CONFIG_PHYTMAC) += phytmac.o diff --git a/drivers/net/ethernet/phytium/phytmac.h b/drivers/net/ethernet/phytium/phytmac.h index df4ec49a0851222571c52642bc1d282bc8e4ce8d..ee22cda030e94a3548ffc3008bb6c4745dcf4d5d 100644 --- a/drivers/net/ethernet/phytium/phytmac.h +++ b/drivers/net/ethernet/phytium/phytmac.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ + #ifndef _PHYTMAC_H #define _PHYTMAC_H @@ -10,10 +12,12 @@ #include #include #include +#include -#define PHYTMAC_DRV_NAME "phytium-mac" +#define PHYTMAC_PCI_DRV_NAME "phytmac_pci" +#define PHYTMAC_PLAT_DRV_NAME "phytmac_platform" #define PHYTMAC_DRV_DESC "PHYTIUM Ethernet Driver" -#define PHYTMAC_DRIVER_VERSION "1.0.5" +#define PHYTMAC_DRIVER_VERSION "1.0.47" #define PHYTMAC_DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ @@ -44,11 +48,15 @@ #define PHYTMAC_POWEROFF 1 #define PHYTMAC_POWERON 2 +#define PHYTMAC_PWCTL_GMAC_ID 6 +#define PHYTMAC_PWCTL_DEFAULT_VAL 0 #define PHYTMAC_WOL_MAGIC_PACKET 1 #define DEFAULT_MSG_RING_SIZE 16 +#define PHYTMAC_MDIO_TIMEOUT 1000000 /* in usecs */ + #define PHYTMAC_CAPS_JUMBO 0x00000001 #define PHYTMAC_CAPS_PTP 0x00000002 #define PHYTMAC_CAPS_BD_RD_PREFETCH 0x00000004 @@ -58,13 +66,18 @@ #define PHYTMAC_CAPS_TAILPTR 0x00000040 #define PHYTMAC_CAPS_START 0x00000080 #define PHYTMAC_CAPS_NO_WOL 0x0000100 -#define PHYTMAC_CAPS_LPI 0x0000400 +#define PHYTMAC_CAPS_PWCTRL 0x0000200 #define PHYTMAC_CAPS_MSG 0x0000800 +#define PHYTMAC_CAPS_RXPTR 0x0001000 + +#define VERSION_V0 0 +#define VERSION_V3 0x3 #define PHYTMAC_TX 0x1 #define PHYTMAC_RX 0x2 -#define PHYTMAC_GREGS_LEN 16 +#define PHYTMAC_ETHTOOLD_REGS_LEN 64 +#define PHYTMAC_STATIS_REG_NUM 45 #define PHYTMAC_MTU_MIN_SIZE ETH_MIN_MTU @@ -102,9 +115,6 @@ #define PHYTMAC_MSG_READ(_pdata, _reg) \ __raw_readl((_pdata)->mac_regs + (_reg)) -#define PHYTMAC_WRITE(_pdata, _reg, _val) \ - __raw_writel((_val), (_pdata)->mac_regs + (_reg)) - #define LSO_UFO 1 #define LSO_TSO 2 @@ -122,6 +132,47 @@ #define PHYTMAC_WAKE_UCAST 0x00000004 #define PHYTMAC_WAKE_MCAST 0x00000008 +/* XDP */ +#define PHYTMAC_XDP_PASS 0 +#define PHYTMAC_XDP_CONSUMED BIT(0) +#define PHYTMAC_XDP_TX BIT(1) +#define PHYTMAC_XDP_REDIR BIT(2) + +#define PHYTMAC_DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum phytmac_interface { + PHYTMAC_PHY_INTERFACE_MODE_NA, + PHYTMAC_PHY_INTERFACE_MODE_INTERNAL, + PHYTMAC_PHY_INTERFACE_MODE_MII, + PHYTMAC_PHY_INTERFACE_MODE_GMII, + PHYTMAC_PHY_INTERFACE_MODE_SGMII, + PHYTMAC_PHY_INTERFACE_MODE_TBI, + PHYTMAC_PHY_INTERFACE_MODE_REVMII, + PHYTMAC_PHY_INTERFACE_MODE_RMII, + PHYTMAC_PHY_INTERFACE_MODE_RGMII, + PHYTMAC_PHY_INTERFACE_MODE_RGMII_ID, + PHYTMAC_PHY_INTERFACE_MODE_RGMII_RXID, + PHYTMAC_PHY_INTERFACE_MODE_RGMII_TXID, + PHYTMAC_PHY_INTERFACE_MODE_RTBI, + PHYTMAC_PHY_INTERFACE_MODE_SMII, + PHYTMAC_PHY_INTERFACE_MODE_XGMII, + PHYTMAC_PHY_INTERFACE_MODE_MOCA, + PHYTMAC_PHY_INTERFACE_MODE_QSGMII, + PHYTMAC_PHY_INTERFACE_MODE_TRGMII, + PHYTMAC_PHY_INTERFACE_MODE_100BASEX, + PHYTMAC_PHY_INTERFACE_MODE_1000BASEX, + PHYTMAC_PHY_INTERFACE_MODE_2500BASEX, + PHYTMAC_PHY_INTERFACE_MODE_5GBASER, + PHYTMAC_PHY_INTERFACE_MODE_RXAUI, + PHYTMAC_PHY_INTERFACE_MODE_XAUI, + /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ + PHYTMAC_PHY_INTERFACE_MODE_10GBASER, + PHYTMAC_PHY_INTERFACE_MODE_USXGMII, + /* 10GBASE-KR - with Clause 73 AN */ + PHYTMAC_PHY_INTERFACE_MODE_10GKR, + PHYTMAC_PHY_INTERFACE_MODE_MAX, +}; + struct packet_info { int lso; int desc_cnt; @@ -214,7 +265,6 @@ static const struct phytmac_statistics queue_statistics[] = { struct phytmac_config { struct phytmac_hw_if *hw_if; u32 caps; - u32 tsu_rate; u16 queue_num; }; @@ -294,8 +344,20 @@ struct phytmac_dma_desc { }; #endif +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum phytmac_tx_buf_type { + PHYTMAC_TYPE_SKB = 0, + PHYTMAC_TYPE_XDP, +}; + struct phytmac_tx_skb { - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + enum phytmac_tx_buf_type type; dma_addr_t addr; size_t length; bool mapped_as_page; @@ -318,6 +380,7 @@ struct phytmac_queue { struct phytmac *pdata; int irq; int index; + struct bpf_prog *xdp_prog; /* tx queue info */ unsigned int tx_head; @@ -340,6 +403,7 @@ struct phytmac_queue { struct phytmac_rx_buffer *rx_buffer_info; struct napi_struct rx_napi; struct phytmac_queue_stats stats; + struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_PHYTMAC_ENABLE_PTP struct work_struct tx_ts_task; @@ -364,11 +428,12 @@ struct phytmac_msg { u32 tx_msg_ring_size; u32 rx_msg_ring_size; u32 tx_msg_head; - u32 tx_msg_tail; + u32 tx_msg_wr_tail; + u32 tx_msg_rd_tail; u32 rx_msg_head; u32 rx_msg_tail; - /* Lock to protect msg */ - spinlock_t msg_lock; + /*use msg_lock to protect msg */ + spinlock_t msg_lock; }; struct ts_ctrl { @@ -385,6 +450,7 @@ struct phytmac { struct platform_device *platdev; struct net_device *ndev; struct device *dev; + struct bpf_prog *xdp_prog; struct ncsi_dev *ncsidev; struct fwnode_handle *fwnode; struct phytmac_hw_if *hw_if; @@ -401,13 +467,10 @@ struct phytmac { u32 min_tx_length; u32 jumbo_len; u32 wol; - u32 lpi; u32 power_state; struct work_struct restart_task; /* Lock to protect mac config */ spinlock_t lock; - /* Lock to protect msg tx */ - spinlock_t msg_lock; u32 rx_ring_size; u32 tx_ring_size; u32 dma_data_width; @@ -430,6 +493,7 @@ struct phytmac { struct phylink_pcs phylink_pcs; int pause; phy_interface_t phy_interface; + enum phytmac_interface phytmac_v2_interface; int speed; int duplex; int autoneg; @@ -446,8 +510,27 @@ struct phytmac { /* Lock to protect fs */ spinlock_t rx_fs_lock; unsigned int max_rx_fs; + u32 version; + char fw_version[32]; }; +/* phytmac_desc_unused - calculate if we have unused descriptors */ +static inline int phytmac_txdesc_unused(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + if (queue->tx_head > queue->tx_tail) + return queue->tx_head - queue->tx_tail - 1; + + return pdata->tx_ring_size + queue->tx_head - queue->tx_tail - 1; +} + +static inline struct netdev_queue *phytmac_get_txq(const struct phytmac *pdata, + struct phytmac_queue *queue) +{ + return netdev_get_tx_queue(pdata->ndev, queue->index); +} + struct phytmac_hw_if { int (*init_msg_ring)(struct phytmac *pdata); int (*init_hw)(struct phytmac *pdata); @@ -493,6 +576,7 @@ struct phytmac_hw_if { int (*mdio_read)(struct phytmac *pdata, int mii_id, int regnum); int (*mdio_write)(struct phytmac *pdata, int mii_id, int regnum, u16 data); + int (*mdio_idle)(struct phytmac *pdata); int (*mdio_read_c45)(struct phytmac *pdata, int mii_id, int devad, int regnum); int (*mdio_write_c45)(struct phytmac *pdata, int mii_id, int devad, int regnum, u16 data); @@ -507,7 +591,7 @@ struct phytmac_hw_if { void (*init_rx_map)(struct phytmac_queue *queue, u32 index); unsigned int (*rx_map)(struct phytmac_queue *queue, u32 index, dma_addr_t addr); void (*transmit)(struct phytmac_queue *queue); - void (*restart)(struct phytmac *pdata); + void (*update_rx_tail)(struct phytmac_queue *queue); int (*tx_complete)(const struct phytmac_dma_desc *desc); bool (*rx_complete)(const struct phytmac_dma_desc *desc); int (*get_rx_pkt_len)(struct phytmac *pdata, const struct phytmac_dma_desc *desc); @@ -517,6 +601,7 @@ struct phytmac_hw_if { bool (*rx_pkt_start)(const struct phytmac_dma_desc *desc); bool (*rx_pkt_end)(const struct phytmac_dma_desc *desc); unsigned int (*zero_rx_desc_addr)(struct phytmac_dma_desc *desc); + unsigned int (*zero_tx_desc)(struct phytmac_dma_desc *desc); void (*clear_rx_desc)(struct phytmac_queue *queue, int begin, int end); void (*clear_tx_desc)(struct phytmac_queue *queue); /* ptp */ @@ -585,6 +670,7 @@ struct phytmac_hw_if { #define PHYTMAC_RX_DMA_ATTR \ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) #define PHYTMAC_SKB_PAD (NET_SKB_PAD) +#define PHYTMAC_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define PHYTMAC_RXBUFFER_2048 2048 #define PHYTMAC_MAX_FRAME_BUILD_SKB \ @@ -593,6 +679,8 @@ struct phytmac_hw_if { #define PHYTMAC_RX_PAGE_ORDER 0 #define PHYTMAC_RX_PAGE_SIZE (PAGE_SIZE << PHYTMAC_RX_PAGE_ORDER) +void __iomem * +phytmac_devm_ioremap_resource_np(struct device *dev, const struct resource *res); struct phytmac_tx_skb *phytmac_get_tx_skb(struct phytmac_queue *queue, unsigned int index); inline struct phytmac_dma_desc *phytmac_get_tx_desc(struct phytmac_queue *queue, @@ -604,6 +692,7 @@ int phytmac_drv_probe(struct phytmac *pdata); int phytmac_drv_remove(struct phytmac *pdata); int phytmac_drv_suspend(struct phytmac *pdata); int phytmac_drv_resume(struct phytmac *pdata); +void phytmac_drv_shutdown(struct phytmac *pdata); struct phytmac *phytmac_alloc_pdata(struct device *dev); void phytmac_free_pdata(struct phytmac *pdata); int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size); diff --git a/drivers/net/ethernet/phytium/phytmac_ethtool.c b/drivers/net/ethernet/phytium/phytmac_ethtool.c index e1698fa10b0955c0983bdd861427b740f2ff0f2e..bed3d6e4d040cec4b5c1b85a4141073e8d87ff53 100644 --- a/drivers/net/ethernet/phytium/phytmac_ethtool.c +++ b/drivers/net/ethernet/phytium/phytmac_ethtool.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ #include #include @@ -69,7 +70,7 @@ static void phytmac_get_ethtool_strings(struct net_device *ndev, u32 sset, u8 *p static inline int phytmac_get_regs_len(struct net_device *ndev) { - return PHYTMAC_GREGS_LEN; + return PHYTMAC_ETHTOOLD_REGS_LEN; } static void phytmac_get_regs(struct net_device *ndev, @@ -80,7 +81,7 @@ static void phytmac_get_regs(struct net_device *ndev, struct phytmac_hw_if *hw_if = pdata->hw_if; u32 *regs_buff = p; - memset(p, 0, PHYTMAC_GREGS_LEN * sizeof(u32)); + memset(p, 0, PHYTMAC_ETHTOOLD_REGS_LEN); hw_if->get_regs(pdata, regs_buff); } @@ -95,18 +96,14 @@ static void phytmac_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol wol->supported = WAKE_MAGIC | WAKE_ARP | WAKE_UCAST | WAKE_MCAST; - if (pdata->wol & PHYTMAC_WAKE_MAGIC) { + if (pdata->wol & PHYTMAC_WAKE_MAGIC) wol->wolopts |= WAKE_MAGIC; - } - if (pdata->wol & PHYTMAC_WAKE_ARP) { + if (pdata->wol & PHYTMAC_WAKE_ARP) wol->wolopts |= WAKE_ARP; - } - if (pdata->wol & PHYTMAC_WAKE_UCAST) { + if (pdata->wol & PHYTMAC_WAKE_UCAST) wol->wolopts |= WAKE_UCAST; - } - if (pdata->wol & PHYTMAC_WAKE_MCAST) { + if (pdata->wol & PHYTMAC_WAKE_MCAST) wol->wolopts |= WAKE_MCAST; - } } static int phytmac_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) @@ -510,13 +507,16 @@ static void phytmac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo { struct phytmac *pdata = netdev_priv(ndev); - strscpy(drvinfo->driver, PHYTMAC_DRV_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->version, PHYTMAC_DRIVER_VERSION, sizeof(drvinfo->version)); + strscpy(drvinfo->fw_version, pdata->fw_version, sizeof(drvinfo->fw_version)); - if (pdata->platdev) + if (pdata->platdev) { + strscpy(drvinfo->driver, PHYTMAC_PLAT_DRV_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pdata->platdev->name, sizeof(drvinfo->bus_info)); - else if (pdata->pcidev) + } else if (pdata->pcidev) { + strscpy(drvinfo->driver, PHYTMAC_PCI_DRV_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pci_name(pdata->pcidev), sizeof(drvinfo->bus_info)); + } } static const struct ethtool_ops phytmac_ethtool_ops = { diff --git a/drivers/net/ethernet/phytium/phytmac_main.c b/drivers/net/ethernet/phytium/phytmac_main.c index fd29c647e2775bc4b614ec8ceb48bc4afc60e15d..9ed5ec09523b3ee6ef99dbdae62e105c202bb911 100644 --- a/drivers/net/ethernet/phytium/phytmac_main.c +++ b/drivers/net/ethernet/phytium/phytmac_main.c @@ -2,6 +2,8 @@ /* * Phytium Ethernet Controller driver * + * Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -38,6 +40,8 @@ #include #include #include +#include +#include #include "phytmac.h" #include "phytmac_ptp.h" @@ -58,11 +62,21 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* Max length of transmit frame must be a multiple of 8 bytes */ #define PHYTMAC_TX_LEN_ALIGN 8 -/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a +/* Limit maximum TX length as per TSO errata. This is to avoid a * false amba_error in TX path from the DMA assuming there is not enough * space in the SRAM (16KB) even when there is. */ +static int phytmac_xdp_xmit_back(struct phytmac *pdata, struct xdp_buff *xdp); + +static inline int phytmac_calc_rx_buf_len(void) +{ +#if (PAGE_SIZE < 8192) + return rounddown(PHYTMAC_MAX_FRAME_BUILD_SKB, RX_BUFFER_MULTIPLE); +#endif + return rounddown(PHYTMAC_RXBUFFER_2048, RX_BUFFER_MULTIPLE); +} + static int phytmac_queue_phyaddr_check(struct phytmac *pdata, dma_addr_t ring_base_addr, int offset) { @@ -82,9 +96,20 @@ static int phytmac_queue_phyaddr_check(struct phytmac *pdata, dma_addr_t ring_ba static int phytmac_change_mtu(struct net_device *ndev, int new_mtu) { + struct phytmac *pdata = netdev_priv(ndev); + int max_frame = new_mtu + PHYTMAC_ETH_PKT_HDR_PAD; + if (netif_running(ndev)) return -EBUSY; + if (pdata->xdp_prog) { + if (max_frame > phytmac_calc_rx_buf_len()) { + netdev_warn(pdata->ndev, + "Requested MTU size is not supported with XDP.\n"); + return -EINVAL; + } + } + if (new_mtu > MAX_MTU) { netdev_info(ndev, "Can not set MTU over %d.\n", MAX_MTU); return -EINVAL; @@ -125,10 +150,12 @@ static int phytmac_get_mac_address(struct phytmac *pdata) if (is_valid_ether_addr(addr)) { eth_hw_addr_set(pdata->ndev, addr); + ether_addr_copy(pdata->ndev->perm_addr, addr); return 0; } dev_info(pdata->dev, "invalid hw address, using random\n"); eth_hw_addr_random(pdata->ndev); + ether_addr_copy(pdata->ndev->perm_addr, pdata->ndev->dev_addr); return 0; } @@ -297,14 +324,6 @@ static struct net_device_stats *phytmac_get_stats(struct net_device *dev) return nstat; } -static inline int phytmac_calc_rx_buf_len(void) -{ -#if (PAGE_SIZE < 8192) - return rounddown(PHYTMAC_MAX_FRAME_BUILD_SKB, RX_BUFFER_MULTIPLE); -#endif - return rounddown(PHYTMAC_RXBUFFER_2048, RX_BUFFER_MULTIPLE); -} - inline struct phytmac_dma_desc *phytmac_get_rx_desc(struct phytmac_queue *queue, unsigned int index) { @@ -372,6 +391,7 @@ static int phytmac_free_tx_resource(struct phytmac *pdata) struct phytmac_dma_desc *tx_ring_base = NULL; dma_addr_t tx_ring_base_addr; unsigned int q; + int tx_offset; int size; queue = pdata->queues; @@ -389,8 +409,9 @@ static int phytmac_free_tx_resource(struct phytmac *pdata) } if (tx_ring_base) { - size = pdata->queues_num * (TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + - RING_ADDR_INTERVAL); + tx_offset = TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + RING_ADDR_INTERVAL; + tx_offset = ALIGN(tx_offset, 4096); + size = pdata->queues_num * tx_offset; dma_free_coherent(pdata->dev, size, tx_ring_base, tx_ring_base_addr); } @@ -403,6 +424,7 @@ static int phytmac_free_rx_resource(struct phytmac *pdata) struct phytmac_dma_desc *rx_ring_base = NULL; dma_addr_t rx_ring_base_addr; unsigned int q; + int rx_offset; int size; queue = pdata->queues; @@ -417,6 +439,11 @@ static int phytmac_free_rx_resource(struct phytmac *pdata) if (queue->rx_ring) queue->rx_ring = NULL; + if (queue->xdp_prog) { + queue->xdp_prog = NULL; + xdp_rxq_info_unreg(&queue->xdp_rxq); + } + if (queue->rx_buffer_info) { vfree(queue->rx_buffer_info); queue->rx_buffer_info = NULL; @@ -424,8 +451,9 @@ static int phytmac_free_rx_resource(struct phytmac *pdata) } if (rx_ring_base) { - size = pdata->queues_num * (RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + - RING_ADDR_INTERVAL); + rx_offset = RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + RING_ADDR_INTERVAL; + rx_offset = ALIGN(rx_offset, 4096); + size = pdata->queues_num * rx_offset; dma_free_coherent(pdata->dev, size, rx_ring_base, rx_ring_base_addr); } @@ -544,6 +572,20 @@ static int phytmac_alloc_rx_resource(struct phytmac *pdata) queue->rx_buffer_info = vzalloc(size); if (!queue->rx_buffer_info) goto err; + + memset(&queue->xdp_rxq, 0, sizeof(queue->xdp_rxq)); + WRITE_ONCE(queue->xdp_prog, pdata->xdp_prog); + + /* XDP RX-queue info */ + ret = xdp_rxq_info_reg(&queue->xdp_rxq, queue->pdata->ndev, q, 0); + if (ret < 0) { + netdev_err(pdata->ndev, "Failed to register xdp_rxq index %u\n", q); + goto err; + } + + xdp_rxq_info_unreg_mem_model(&queue->xdp_rxq); + WARN_ON(xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); } return 0; @@ -859,6 +901,115 @@ static struct sk_buff *phytmac_build_skb(struct phytmac_rx_buffer *rx_buffer, return skb; } +static void phytmac_rx_buffer_flip(struct phytmac_rx_buffer *rx_buffer, unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = PHYTMAC_RX_PAGE_SIZE / 2; + rx_buffer->page_offset ^= truesize; +#else + truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PHYTMAC_SKB_PAD + size); + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *phytmac_run_xdp(struct phytmac *pdata, + struct xdp_buff *xdp) +{ + int err, result = PHYTMAC_XDP_PASS; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(pdata->xdp_prog); + + if (!xdp_prog) + goto xdp_out; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + result = phytmac_xdp_xmit_back(pdata, xdp); + if (result == PHYTMAC_XDP_CONSUMED) + goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(pdata->ndev, xdp, xdp_prog); + if (err) + goto out_failure; + result = PHYTMAC_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(pdata->ndev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(pdata->ndev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = PHYTMAC_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +static struct sk_buff *phytmac_rx_xdp_single(struct phytmac_queue *queue, + struct phytmac_dma_desc *desc, + unsigned int *xdp_xmit) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_rx_buffer *rx_buffer; + struct sk_buff *skb = NULL; + unsigned int len; + struct xdp_buff xdp; + unsigned char *hard_start; + u32 frame_sz = 0; + + len = hw_if->get_rx_pkt_len(pdata, desc); + rx_buffer = phytmac_get_rx_buffer(queue, queue->rx_tail, len); + +#if (PAGE_SIZE < 8192) + frame_sz = PHYTMAC_RX_PAGE_SIZE / 2; +#else + frame_sz = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PHYTMAC_SKB_PAD + len); +#endif + xdp_init_buff(&xdp, frame_sz, &queue->xdp_rxq); + + hard_start = page_address(rx_buffer->page) + rx_buffer->page_offset - PHYTMAC_SKB_PAD; + xdp_prepare_buff(&xdp, hard_start, PHYTMAC_SKB_PAD, len, true); + xdp_buff_clear_frags_flag(&xdp); + skb = phytmac_run_xdp(pdata, &xdp); + + if (IS_ERR(skb)) { + unsigned int xdp_res = -PTR_ERR(skb); + + if (xdp_res & (PHYTMAC_XDP_TX | PHYTMAC_XDP_REDIR)) { + *xdp_xmit |= xdp_res; + phytmac_rx_buffer_flip(rx_buffer, len); + } else { + rx_buffer->pagecnt_bias++; + } + hw_if->zero_rx_desc_addr(desc); + phytmac_put_rx_buffer(queue, rx_buffer); + pdata->ndev->stats.rx_bytes += len; + queue->stats.rx_bytes += len; + } else { + rx_buffer->pagecnt_bias++; + } + + return skb; +} + static struct sk_buff *phytmac_rx_single(struct phytmac_queue *queue, struct phytmac_dma_desc *desc) { struct phytmac *pdata = queue->pdata; @@ -869,7 +1020,6 @@ static struct sk_buff *phytmac_rx_single(struct phytmac_queue *queue, struct phy len = hw_if->get_rx_pkt_len(pdata, desc); rx_buffer = phytmac_get_rx_buffer(queue, queue->rx_tail, len); - hw_if->zero_rx_desc_addr(desc); skb = phytmac_build_skb(rx_buffer, len); if (unlikely(!skb)) { @@ -881,6 +1031,7 @@ static struct sk_buff *phytmac_rx_single(struct phytmac_queue *queue, struct phy return NULL; } + hw_if->zero_rx_desc_addr(desc); phytmac_put_rx_buffer(queue, rx_buffer); skb->protocol = eth_type_trans(skb, pdata->ndev); @@ -916,7 +1067,6 @@ static struct sk_buff *phytmac_rx_frame(struct phytmac_queue *queue, desc = phytmac_get_rx_desc(queue, first_frag); rx_buffer = phytmac_get_rx_buffer(queue, first_frag, frag_len); - hw_if->zero_rx_desc_addr(desc); skb = phytmac_build_skb(rx_buffer, frag_len); if (unlikely(!skb)) { @@ -927,6 +1077,7 @@ static struct sk_buff *phytmac_rx_frame(struct phytmac_queue *queue, return NULL; } + hw_if->zero_rx_desc_addr(desc); phytmac_put_rx_buffer(queue, rx_buffer); for (frag = first_frag + 1; ; frag++) { @@ -1033,6 +1184,8 @@ static void phytmac_rx_clean(struct phytmac_queue *queue) queue->rx_next_to_alloc = queue->rx_head; /* make newly descriptor to hardware */ wmb(); + + hw_if->update_rx_tail(queue); } static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, @@ -1042,6 +1195,7 @@ static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, struct phytmac_hw_if *hw_if = pdata->hw_if; struct sk_buff *skb; struct phytmac_dma_desc *desc; + unsigned int xdp_xmit = 0; int count = 0; while (count < budget) { @@ -1055,10 +1209,15 @@ static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, /* Ensure ctrl is at least as up-to-date as rxused */ dma_rmb(); - if (hw_if->rx_single_buffer(desc)) - skb = phytmac_rx_single(queue, desc); - else + if (hw_if->rx_single_buffer(desc)) { + skb = phytmac_rx_xdp_single(queue, desc, &xdp_xmit); + if (!IS_ERR(skb)) + skb = phytmac_rx_single(queue, desc); + } else { + if (pdata->xdp_prog) + netdev_warn(pdata->ndev, "xdp does not support multiple buffers!!\n"); skb = phytmac_rx_mbuffer(queue); + } if (!skb) { netdev_warn(pdata->ndev, "phytmac rx skb is NULL\n"); @@ -1067,18 +1226,28 @@ static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, pdata->ndev->stats.rx_packets++; queue->stats.rx_packets++; - pdata->ndev->stats.rx_bytes += skb->len; - queue->stats.rx_bytes += skb->len; + if (!IS_ERR(skb)) { + pdata->ndev->stats.rx_bytes += skb->len; + queue->stats.rx_bytes += skb->len; + } queue->rx_tail = (queue->rx_tail + 1) & (pdata->rx_ring_size - 1); count++; + if (IS_ERR(skb)) { + skb = NULL; + continue; + } + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) phytmac_ptp_rxstamp(pdata, skb, desc); napi_gro_receive(napi, skb); } + if (xdp_xmit & PHYTMAC_XDP_REDIR) + xdp_do_flush_map(); + phytmac_rx_clean(queue); return count; @@ -1096,8 +1265,13 @@ static void phytmac_tx_unmap(struct phytmac *pdata, struct phytmac_tx_skb *tx_sk tx_skb->addr = 0; } - if (tx_skb->skb) { - napi_consume_skb(tx_skb->skb, budget); + if (tx_skb->type == PHYTMAC_TYPE_XDP) { + if (tx_skb->xdpf) + xdp_return_frame(tx_skb->xdpf); + tx_skb->xdpf = NULL; + } else { + if (tx_skb->skb) + napi_consume_skb(tx_skb->skb, budget); tx_skb->skb = NULL; } } @@ -1131,6 +1305,19 @@ static int phytmac_maybe_wake_tx_queue(struct phytmac_queue *queue) return (space <= (3 * pdata->tx_ring_size / 4)) ? 1 : 0; } +static inline void phytmac_do_ptp_txstamp(struct phytmac_queue *queue, + struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + !phytmac_ptp_one_step(skb)) { + phytmac_ptp_txstamp(queue, skb, desc); + } + } +} + static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) { struct phytmac *pdata = queue->pdata; @@ -1143,7 +1330,7 @@ static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) unsigned int tail = queue->tx_tail; unsigned int head; - spin_lock(&pdata->lock); + spin_lock(&queue->tx_lock); for (head = queue->tx_head; head != tail && packet_count < budget; ) { struct sk_buff *skb; @@ -1157,30 +1344,35 @@ static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) /* Process all buffers of the current transmitted frame */ for (;; head++) { tx_skb = phytmac_get_tx_skb(queue, head); - skb = tx_skb->skb; - - if (skb) { - complete = 1; - if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { - if (unlikely(skb_shinfo(skb)->tx_flags & - SKBTX_HW_TSTAMP) && - !phytmac_ptp_one_step(skb)) { - phytmac_ptp_txstamp(queue, skb, desc); - } - } - if (netif_msg_drv(pdata)) - netdev_info(pdata->ndev, "desc %u (data %p) tx complete\n", - head, tx_skb->skb->data); - - pdata->ndev->stats.tx_packets++; - queue->stats.tx_packets++; - pdata->ndev->stats.tx_bytes += tx_skb->skb->len; - queue->stats.tx_bytes += tx_skb->skb->len; - packet_count++; + if (tx_skb->type == PHYTMAC_TYPE_SKB) { + skb = tx_skb->skb; + if (skb) { + complete = 1; + phytmac_do_ptp_txstamp(queue, skb, desc); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "desc %u (data %p) tx complete\n", + head, tx_skb->skb->data); + + pdata->ndev->stats.tx_packets++; + queue->stats.tx_packets++; + pdata->ndev->stats.tx_bytes += tx_skb->skb->len; + queue->stats.tx_bytes += tx_skb->skb->len; + packet_count++; + } + } else if (tx_skb->type == PHYTMAC_TYPE_XDP) { + if (tx_skb->xdpf) { + complete = 1; + pdata->ndev->stats.tx_packets++; + queue->stats.tx_packets++; + pdata->ndev->stats.tx_bytes += tx_skb->xdpf->len; + queue->stats.tx_bytes += tx_skb->xdpf->len; + packet_count++; + } } - /* Now we can safely release resources */ + /* Now we can safely release resources */ phytmac_tx_unmap(pdata, tx_skb, budget); if (complete) { @@ -1198,7 +1390,7 @@ static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) if (__netif_subqueue_stopped(pdata->ndev, queue_index) && (phytmac_maybe_wake_tx_queue(queue))) netif_wake_subqueue(pdata->ndev, queue_index); - spin_unlock(&pdata->lock); + spin_unlock(&queue->tx_lock); return packet_count; } @@ -1281,6 +1473,7 @@ static inline int phytmac_clear_csum(struct sk_buff *skb) static int phytmac_add_fcs(struct sk_buff **skb, struct net_device *ndev) { +#ifdef PHYTMAC_SW_FCS bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; @@ -1290,7 +1483,7 @@ static int phytmac_add_fcs(struct sk_buff **skb, struct net_device *ndev) u32 fcs; int i; - if ((ndev->features & NETIF_F_HW_CSUM) || + if (!(ndev->features & NETIF_F_HW_CSUM) || !((*skb)->ip_summed != CHECKSUM_PARTIAL) || skb_shinfo(*skb)->gso_size || phytmac_ptp_one_step(*skb)) return 0; @@ -1327,6 +1520,8 @@ static int phytmac_add_fcs(struct sk_buff **skb, struct net_device *ndev) for (i = 0; i < 4; ++i) skb_put_u8(*skb, (fcs >> (i * 8)) & 0xff); +#endif + return 0; } @@ -1376,13 +1571,13 @@ static int phytmac_packet_info(struct phytmac *pdata, desc_cnt += TXD_USE_COUNT(pdata, skb_frag_size(&skb_shinfo(skb)->frags[f])); packet->desc_cnt = desc_cnt; - if ((!(pdata->ndev->features & NETIF_F_HW_CSUM)) && +#ifdef PHYTMAC_SW_FCS + if ((pdata->ndev->features & NETIF_F_HW_CSUM) && skb->ip_summed != CHECKSUM_PARTIAL && !is_lso && !phytmac_ptp_one_step(skb)) packet->nocrc = 1; - else - packet->nocrc = 0; +#endif if (netif_msg_pktdata(pdata)) { netdev_info(pdata->ndev, "packet info: desc_cnt=%d, nocrc=%d,ip_summed=%d\n", @@ -1422,6 +1617,7 @@ static unsigned int phytmac_tx_map(struct phytmac *pdata, /* Save info to properly release resources */ tx_skb->skb = NULL; + tx_skb->type = PHYTMAC_TYPE_SKB; tx_skb->addr = mapping; tx_skb->length = size; tx_skb->mapped_as_page = false; @@ -1450,6 +1646,7 @@ static unsigned int phytmac_tx_map(struct phytmac *pdata, /* Save info to properly release resources */ tx_skb->skb = NULL; + tx_skb->type = PHYTMAC_TYPE_SKB; tx_skb->addr = mapping; tx_skb->length = size; tx_skb->mapped_as_page = true; @@ -1514,6 +1711,59 @@ static inline void phytmac_init_ring(struct phytmac *pdata) hw_if->init_ring_hw(pdata); } +static int phytmac_start_xmit_xdp(struct phytmac *pdata, + struct phytmac_queue *queue, + struct xdp_frame *xdpf) +{ + u32 len; + struct phytmac_tx_skb *tx_buffer; + struct packet_info packet; + dma_addr_t dma; + struct phytmac_hw_if *hw_if = pdata->hw_if; + u16 tx_tail; + + len = xdpf->len; + + memset(&packet, 0, sizeof(struct packet_info)); + + if (unlikely(!phytmac_txdesc_unused(queue))) + return PHYTMAC_XDP_CONSUMED; + + dma = dma_map_single(pdata->dev, xdpf->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, dma)) + return PHYTMAC_XDP_CONSUMED; + + /* record the location of the first descriptor for this packet */ + tx_buffer = phytmac_get_tx_skb(queue, queue->tx_tail); + tx_buffer->mapped_as_page = false; + + /* Temporarily set the tail pointer for the next package */ + tx_tail = queue->tx_tail + 1; + + dma_unmap_len_set(tx_buffer, length, len); + dma_unmap_addr_set(tx_buffer, addr, dma); + tx_buffer->type = PHYTMAC_TYPE_XDP; + tx_buffer->xdpf = xdpf; + + packet.lso = 0; + packet.mss = 0; + packet.nocrc = 0; + + /* Avoid any potential race with xdp_xmit and cleanup */ + smp_wmb(); + + hw_if->tx_map(queue, tx_tail, &packet); + + queue->tx_tail = tx_tail & (pdata->tx_ring_size - 1); + + hw_if->transmit(queue); + + /* Make sure there is space in the ring for the next send. */ + phytmac_maybe_stop_tx_queue(queue, PHYTMAC_DESC_NEEDED); + + return PHYTMAC_XDP_TX; +} + static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct phytmac *pdata = netdev_priv(ndev); @@ -1522,7 +1772,6 @@ static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *nd struct phytmac_queue *queue = &pdata->queues[queue_index]; netdev_tx_t ret = NETDEV_TX_OK; struct packet_info packet; - unsigned long flags; if (phytmac_clear_csum(skb)) { dev_kfree_skb_any(skb); @@ -1543,7 +1792,7 @@ static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *nd if (netif_msg_pktdata(pdata)) phytmac_dump_pkt(pdata, skb, true); - spin_lock_irqsave(&pdata->lock, flags); + spin_lock_bh(&queue->tx_lock); /* Check that there are enough descriptors available */ ret = phytmac_maybe_stop_tx_queue(queue, packet.desc_cnt); if (ret) @@ -1562,7 +1811,7 @@ static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *nd hw_if->transmit(queue); tx_return: - spin_unlock_irqrestore(&pdata->lock, flags); + spin_unlock_bh(&queue->tx_lock); return ret; } @@ -1668,6 +1917,7 @@ static void phytmac_mac_link_down(struct phylink_config *config, unsigned int mo unsigned int q; unsigned long flags; struct phytmac_tx_skb *tx_skb; + struct phytmac_dma_desc *tx_desc = NULL; int i; if (netif_msg_link(pdata)) { @@ -1686,18 +1936,22 @@ static void phytmac_mac_link_down(struct phylink_config *config, unsigned int mo /* Disable Rx and Tx */ hw_if->enable_network(pdata, false, PHYTMAC_RX | PHYTMAC_TX); + spin_unlock_irqrestore(&pdata->lock, flags); /* Tx clean */ for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + spin_lock_bh(&queue->tx_lock); for (i = 0; i < pdata->tx_ring_size; i++) { tx_skb = phytmac_get_tx_skb(queue, i); if (tx_skb) phytmac_tx_unmap(pdata, tx_skb, 0); + + tx_desc = phytmac_get_tx_desc(queue, i); + hw_if->zero_tx_desc(tx_desc); } + spin_unlock_bh(&queue->tx_lock); } - spin_unlock_irqrestore(&pdata->lock, flags); - netif_tx_stop_all_queues(ndev); } @@ -1763,6 +2017,12 @@ int phytmac_mdio_register(struct phytmac *pdata) goto err_out; } + if (hw_if->mdio_idle) { + ret = hw_if->mdio_idle(pdata); + if (ret) + goto free_mdio; + } + pdata->mii_bus->name = "phytmac_mii_bus"; pdata->mii_bus->read = &phytmac_mdio_read_c22; pdata->mii_bus->write = &phytmac_mdio_write_c22; @@ -1890,6 +2150,8 @@ static int phytmac_phylink_create(struct phytmac *pdata) pdata->phylink_config.dev = &pdata->ndev->dev; pdata->phylink_config.type = PHYLINK_NETDEV; + pdata->phylink_config.mac_managed_pm = true; + if (pdata->phy_interface == PHY_INTERFACE_MODE_SGMII || pdata->phy_interface == PHY_INTERFACE_MODE_1000BASEX || pdata->phy_interface == PHY_INTERFACE_MODE_2500BASEX || @@ -1923,10 +2185,6 @@ static int phytmac_open(struct net_device *ndev) if (netif_msg_probe(pdata)) dev_dbg(pdata->dev, "open\n"); - /* phytmac_powerup */ - if (pdata->power_state == PHYTMAC_POWEROFF) - hw_if->poweron(pdata, PHYTMAC_POWERON); - if (hw_if->init_msg_ring) hw_if->init_msg_ring(pdata); @@ -2033,10 +2291,6 @@ static int phytmac_close(struct net_device *ndev) if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) phytmac_ptp_unregister(pdata); - /* phytmac_powerup */ - if (pdata->power_state == PHYTMAC_POWERON) - hw_if->poweron(pdata, PHYTMAC_POWEROFF); - return 0; } @@ -2174,6 +2428,140 @@ int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size) return ret; } +static int phytmac_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + int i, frame_size = dev->mtu + PHYTMAC_ETH_PKT_HDR_PAD; + struct phytmac *pdata = netdev_priv(dev); + struct bpf_prog *old_prog; + bool running = netif_running(dev); + bool need_reset; + + /* verify phytmac rx ring attributes are sufficient for XDP */ + if (frame_size > phytmac_calc_rx_buf_len()) { + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + phytmac_calc_rx_buf_len(), frame_size); + return -EINVAL; + } + + old_prog = xchg(&pdata->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (need_reset && running) { + phytmac_close(dev); + } else { + for (i = 0; i < pdata->queues_num; i++) + (void)xchg(&pdata->queues[i].xdp_prog, + pdata->xdp_prog); + } + + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!need_reset) + return 0; + + if (prog) + xdp_features_set_redirect_target(dev, false); + else + xdp_features_clear_redirect_target(dev); + + if (running) + phytmac_open(dev); + + return 0; +} + +static int phytmac_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return phytmac_xdp_setup(dev, xdp->prog); + default: + return -EINVAL; + } +} + +static struct phytmac_queue *phytmac_xdp_txq_mapping(struct phytmac *pdata) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= pdata->queues_num) + r_idx = r_idx % pdata->queues_num; + + return &pdata->queues[r_idx]; +} + +static int phytmac_xdp_xmit_back(struct phytmac *pdata, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + int cpu = smp_processor_id(); + struct phytmac_queue *queue; + struct netdev_queue *nq; + u32 ret; + + if (unlikely(!xdpf)) + return PHYTMAC_XDP_CONSUMED; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + queue = pdata->xdp_prog ? phytmac_xdp_txq_mapping(pdata) : NULL; + if (unlikely(!queue)) + return PHYTMAC_XDP_CONSUMED; + + nq = phytmac_get_txq(pdata, queue); + __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + ret = phytmac_start_xmit_xdp(pdata, queue, xdpf); + __netif_tx_unlock(nq); + + return ret; +} + +static int phytmac_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct phytmac *pdata = netdev_priv(dev); + int cpu = smp_processor_id(); + struct phytmac_queue *queue; + struct netdev_queue *nq; + int nxmit = 0; + int i; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* During program transitions its possible pdata->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + queue = pdata->xdp_prog ? phytmac_xdp_txq_mapping(pdata) : NULL; + if (unlikely(!queue)) + return -ENXIO; + + nq = phytmac_get_txq(pdata, queue); + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = phytmac_start_xmit_xdp(pdata, queue, xdpf); + if (err != PHYTMAC_XDP_TX) + break; + nxmit++; + } + + __netif_tx_unlock(nq); + + return nxmit; +} + static const struct net_device_ops phytmac_netdev_ops = { .ndo_open = phytmac_open, .ndo_stop = phytmac_close, @@ -2188,6 +2576,8 @@ static const struct net_device_ops phytmac_netdev_ops = { .ndo_features_check = phytmac_features_check, .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, + .ndo_bpf = phytmac_xdp, + .ndo_xdp_xmit = phytmac_xdp_xmit, }; static int phytmac_init(struct phytmac *pdata) @@ -2294,6 +2684,17 @@ void phytmac_default_config(struct phytmac *pdata) ndev->max_mtu = ETH_DATA_LEN; ndev->features = ndev->hw_features; + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; + + switch (pdata->version) { + case VERSION_V3: + strscpy(pdata->fw_version, "MAC_FTM300", sizeof(pdata->fw_version)); + break; + + default: + strscpy(pdata->fw_version, "", sizeof(pdata->fw_version)); + break; + } } static void phytmac_ncsi_handler(struct ncsi_dev *nd) @@ -2309,6 +2710,7 @@ int phytmac_drv_probe(struct phytmac *pdata) { struct net_device *ndev = pdata->ndev; struct device *dev = pdata->dev; + struct phytmac_hw_if *hw_if = pdata->hw_if; int ret = 0; if (netif_msg_probe(pdata)) @@ -2332,12 +2734,11 @@ int phytmac_drv_probe(struct phytmac *pdata) goto err_out; } - netif_carrier_off(ndev); - ret = register_netdev(ndev); - if (ret) { - dev_err(pdata->dev, "Cannot register net device, aborting.\n"); - goto err_out; - } + if (pdata->power_state == PHYTMAC_POWEROFF) + hw_if->poweron(pdata, PHYTMAC_POWERON); + + if (hw_if->init_msg_ring) + hw_if->init_msg_ring(pdata); if (pdata->use_mii && !pdata->mii_bus) { ret = phytmac_mdio_register(pdata); @@ -2359,6 +2760,13 @@ int phytmac_drv_probe(struct phytmac *pdata) goto err_phylink_init; } + ret = register_netdev(ndev); + if (ret) { + dev_err(pdata->dev, "Cannot register net device, aborting.\n"); + goto err_phylink_init; + } + netif_carrier_off(ndev); + if (netif_msg_probe(pdata)) dev_dbg(pdata->dev, "probe successfully! Phytium %s at 0x%08lx irq %d (%pM)\n", "MAC", ndev->base_addr, ndev->irq, ndev->dev_addr); @@ -2373,8 +2781,6 @@ int phytmac_drv_probe(struct phytmac *pdata) if (pdata->mii_bus) mdiobus_free(pdata->mii_bus); - unregister_netdev(ndev); - err_out: return ret; } @@ -2383,12 +2789,15 @@ EXPORT_SYMBOL_GPL(phytmac_drv_probe); int phytmac_drv_remove(struct phytmac *pdata) { struct net_device *ndev = pdata->ndev; + struct phytmac_hw_if *hw_if = pdata->hw_if; if (ndev) { if (pdata->use_ncsi && pdata->ncsidev) ncsi_unregister_dev(pdata->ncsidev); unregister_netdev(ndev); + if (pdata->power_state == PHYTMAC_POWERON) + hw_if->poweron(pdata, PHYTMAC_POWEROFF); if (pdata->use_mii && pdata->mii_bus) { mdiobus_unregister(pdata->mii_bus); @@ -2422,20 +2831,23 @@ int phytmac_drv_suspend(struct phytmac *pdata) /* napi_disable */ for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + hw_if->clear_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); napi_disable(&queue->tx_napi); napi_disable(&queue->rx_napi); } if (pdata->wol) { hw_if->set_wol(pdata, pdata->wol); + pdata->power_state = PHYTMAC_POWEROFF; } else { rtnl_lock(); phylink_stop(pdata->phylink); rtnl_unlock(); spin_lock_irqsave(&pdata->lock, flags); hw_if->reset_hw(pdata); - hw_if->poweron(pdata, PHYTMAC_POWEROFF); spin_unlock_irqrestore(&pdata->lock, flags); + hw_if->poweron(pdata, PHYTMAC_POWEROFF); } return 0; @@ -2449,15 +2861,14 @@ int phytmac_drv_resume(struct phytmac *pdata) struct phytmac_hw_if *hw_if = pdata->hw_if; struct ethtool_rx_fs_item *item; - if (!netif_running(pdata->ndev)) - return 0; - - if (pdata->power_state == PHYTMAC_POWEROFF) - hw_if->poweron(pdata, PHYTMAC_POWERON); + hw_if->poweron(pdata, PHYTMAC_POWERON); if (hw_if->init_msg_ring) hw_if->init_msg_ring(pdata); + if (!netif_running(pdata->ndev)) + return 0; + if (pdata->wol) { hw_if->set_wol(pdata, 0); rtnl_lock(); @@ -2504,7 +2915,7 @@ struct phytmac *phytmac_alloc_pdata(struct device *dev) pdata->dev = dev; spin_lock_init(&pdata->lock); - spin_lock_init(&pdata->msg_lock); + spin_lock_init(&pdata->msg_ring.msg_lock); spin_lock_init(&pdata->ts_clk_lock); pdata->msg_enable = netif_msg_init(debug, PHYTMAC_DEFAULT_MSG_ENABLE); @@ -2520,6 +2931,89 @@ void phytmac_free_pdata(struct phytmac *pdata) } EXPORT_SYMBOL_GPL(phytmac_free_pdata); +void phytmac_drv_shutdown(struct phytmac *pdata) +{ + struct net_device *netdev = pdata->ndev; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) + phytmac_close(netdev); + rtnl_unlock(); + + if (pdata->power_state == PHYTMAC_POWERON) + hw_if->poweron(pdata, PHYTMAC_POWEROFF); +} +EXPORT_SYMBOL_GPL(phytmac_drv_shutdown); + +static void phytmac_devm_iounmap_np(struct device *dev, void *res) +{ + iounmap(*(void __iomem **)res); +} + +static void __iomem *phytmac_devm_ioremap_np(struct device *dev, resource_size_t offset, + resource_size_t size) +{ + void __iomem **ptr, *addr = NULL; + + ptr = devres_alloc_node(phytmac_devm_iounmap_np, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); + if (!ptr) + return NULL; + + addr = ioremap_np(offset, size); + if (addr) { + *ptr = addr; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return addr; +} + +void __iomem * +phytmac_devm_ioremap_resource_np(struct device *dev, const struct resource *res) +{ + resource_size_t size; + void __iomem *dest_ptr; + char *pretty_name; + + if (!res || resource_type(res) != IORESOURCE_MEM) { + dev_err(dev, "invalid resource %pR\n", res); + return IOMEM_ERR_PTR(-EINVAL); + } + + size = resource_size(res); + + if (res->name) + pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", + dev_name(dev), res->name); + else + pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!pretty_name) { + dev_err(dev, "can't generate pretty name for resource %pR\n", res); + return IOMEM_ERR_PTR(-ENOMEM); + } + + if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { + dev_err(dev, "can't request region for resource %pR\n", res); + return IOMEM_ERR_PTR(-EBUSY); + } + + dest_ptr = phytmac_devm_ioremap_np(dev, res->start, size); + if (!dest_ptr) { + dev_err(dev, "ioremap failed for resource %pR\n", res); + devm_release_mem_region(dev, res->start, size); + dest_ptr = IOMEM_ERR_PTR(-ENOMEM); + } + + return dest_ptr; +} +EXPORT_SYMBOL_GPL(phytmac_devm_ioremap_resource_np); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Phytium Ethernet driver"); MODULE_AUTHOR("Wenting Song"); diff --git a/drivers/net/ethernet/phytium/phytmac_pci.c b/drivers/net/ethernet/phytium/phytmac_pci.c index 60bd296d8f0b6c95c1cb3b210f719efb1241773f..fac5d717ad76695369ed01fa9c8ad3a346238fdd 100644 --- a/drivers/net/ethernet/phytium/phytmac_pci.c +++ b/drivers/net/ethernet/phytium/phytmac_pci.c @@ -2,6 +2,9 @@ /* * Phytium GMAC PCI wrapper. * + * Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. + * + * Author: Wenting Song */ #include @@ -21,7 +24,6 @@ struct phytmac_data { struct phytmac_hw_if *hw_if; u32 caps; - u32 tsu_rate; u16 queue_num; int speed; bool duplex; @@ -72,7 +74,7 @@ static int phytmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i /* Obtain the mmio areas for the device */ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); - ret = pcim_iomap_regions(pdev, bar_mask, PHYTMAC_DRV_NAME); + ret = pcim_iomap_regions(pdev, bar_mask, PHYTMAC_PCI_DRV_NAME); if (ret) { dev_err(dev, "pcim_iomap_regions failed\n"); goto err_pci_enable; @@ -303,7 +305,7 @@ static const struct dev_pm_ops phytmac_pci_pm_ops = { }; static struct pci_driver phytmac_driver = { - .name = PHYTMAC_DRV_NAME, + .name = PHYTMAC_PCI_DRV_NAME, .id_table = phytmac_pci_table, .probe = phytmac_pci_probe, .remove = phytmac_pci_remove, diff --git a/drivers/net/ethernet/phytium/phytmac_platform.c b/drivers/net/ethernet/phytium/phytmac_platform.c index 9390056fdc7a95a86917608ba97561ca8e362c49..2f2170cf0775ae4bec70ed9721073df22aefacc1 100644 --- a/drivers/net/ethernet/phytium/phytmac_platform.c +++ b/drivers/net/ethernet/phytium/phytmac_platform.c @@ -2,6 +2,9 @@ /* * Phytium GMAC Platform wrapper. * + * Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. + * + * Author: Wenting Song */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -21,18 +24,16 @@ static const struct phytmac_config phytium_1p0_config = { | PHYTMAC_CAPS_JUMBO | PHYTMAC_CAPS_LSO, .queue_num = 4, - .tsu_rate = 300000000, }; static const struct phytmac_config phytium_2p0_config = { .hw_if = &phytmac_2p0_hw, .caps = PHYTMAC_CAPS_TAILPTR - | PHYTMAC_CAPS_LPI + | PHYTMAC_CAPS_RXPTR | PHYTMAC_CAPS_LSO | PHYTMAC_CAPS_MSG | PHYTMAC_CAPS_JUMBO, .queue_num = 2, - .tsu_rate = 300000000, }; #if defined(CONFIG_OF) @@ -47,7 +48,8 @@ MODULE_DEVICE_TABLE(of, phytmac_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id phytmac_acpi_ids[] = { { .id = "PHYT0046", .driver_data = (kernel_ulong_t)&phytium_1p0_config }, - { } + { .id = "PHYT0056", .driver_data = (kernel_ulong_t)&phytium_2p0_config }, + {} }; MODULE_DEVICE_TABLE(acpi, phytmac_acpi_ids); @@ -55,6 +57,95 @@ MODULE_DEVICE_TABLE(acpi, phytmac_acpi_ids); #define phytmac_acpi_ids NULL #endif +static const char *phytmac_phy_modes(enum phytmac_interface interface) +{ + switch (interface) { + case PHYTMAC_PHY_INTERFACE_MODE_NA: + return ""; + case PHYTMAC_PHY_INTERFACE_MODE_INTERNAL: + return "internal"; + case PHYTMAC_PHY_INTERFACE_MODE_MII: + return "mii"; + case PHYTMAC_PHY_INTERFACE_MODE_GMII: + return "gmii"; + case PHYTMAC_PHY_INTERFACE_MODE_SGMII: + return "sgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_TBI: + return "tbi"; + case PHYTMAC_PHY_INTERFACE_MODE_REVMII: + return "rev-mii"; + case PHYTMAC_PHY_INTERFACE_MODE_RMII: + return "rmii"; + case PHYTMAC_PHY_INTERFACE_MODE_RGMII: + return "rgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_RGMII_ID: + return "rgmii-id"; + case PHYTMAC_PHY_INTERFACE_MODE_RGMII_RXID: + return "rgmii-rxid"; + case PHYTMAC_PHY_INTERFACE_MODE_RGMII_TXID: + return "rgmii-txid"; + case PHYTMAC_PHY_INTERFACE_MODE_RTBI: + return "rtbi"; + case PHYTMAC_PHY_INTERFACE_MODE_SMII: + return "smii"; + case PHYTMAC_PHY_INTERFACE_MODE_XGMII: + return "xgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_MOCA: + return "moca"; + case PHYTMAC_PHY_INTERFACE_MODE_QSGMII: + return "qsgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_TRGMII: + return "trgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_100BASEX: + return "100base-x"; + case PHYTMAC_PHY_INTERFACE_MODE_1000BASEX: + return "1000base-x"; + case PHYTMAC_PHY_INTERFACE_MODE_2500BASEX: + return "2500base-x"; + case PHYTMAC_PHY_INTERFACE_MODE_5GBASER: + return "5gbase-r"; + case PHYTMAC_PHY_INTERFACE_MODE_RXAUI: + return "rxaui"; + case PHYTMAC_PHY_INTERFACE_MODE_XAUI: + return "xaui"; + case PHYTMAC_PHY_INTERFACE_MODE_10GBASER: + return "10gbase-r"; + case PHYTMAC_PHY_INTERFACE_MODE_USXGMII: + return "usxgmii"; + case PHYTMAC_PHY_INTERFACE_MODE_10GKR: + return "10gbase-kr"; + default: + return "unknown"; + } +} + +static int phytmac_v2_get_phy_mode(struct platform_device *pdev) +{ + const char *pm; + int err, i; + int phy_interface; + + err = device_property_read_string(&pdev->dev, "phy-mode", &pm); + if (err < 0) + return err; + + phy_interface = PHYTMAC_PHY_INTERFACE_MODE_MAX + 1; + for (i = 0; i < PHYTMAC_PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phytmac_phy_modes(i))) { + phy_interface = i; + dev_notice(&pdev->dev, "Phy mode is %s.\n", pm); + break; + } + } + + if (phy_interface > PHYTMAC_PHY_INTERFACE_MODE_MAX) { + dev_err(&pdev->dev, "Invalid phy mode value: %s!\n", pm); + return -EINVAL; + } + + return phy_interface; +} + static int phytmac_get_phy_mode(struct platform_device *pdev) { const char *pm; @@ -80,6 +171,8 @@ static int phytmac_plat_probe(struct platform_device *pdev) struct phytmac *pdata; int ret, i; u32 queue_num; + const struct of_device_id *match = NULL; + const struct acpi_device_id *match_acpi = NULL; pdata = phytmac_alloc_pdata(&pdev->dev); if (IS_ERR(pdata)) { @@ -92,8 +185,6 @@ static int phytmac_plat_probe(struct platform_device *pdev) pdata->platdev = pdev; if (pdev->dev.of_node) { - const struct of_device_id *match; - match = of_match_node(phytmac_dt_ids, np); if (match && match->data) { phytmac_config = match->data; @@ -102,11 +193,9 @@ static int phytmac_plat_probe(struct platform_device *pdev) pdata->queues_max_num = phytmac_config->queue_num; } } else if (has_acpi_companion(&pdev->dev)) { - const struct acpi_device_id *match; - - match = acpi_match_device(phytmac_acpi_ids, &pdev->dev); - if (match && match->driver_data) { - phytmac_config = (void *)match->driver_data; + match_acpi = acpi_match_device(phytmac_acpi_ids, &pdev->dev); + if (match_acpi && match_acpi->driver_data) { + phytmac_config = (void *)match_acpi->driver_data; pdata->hw_if = phytmac_config->hw_if; pdata->capacities = phytmac_config->caps; pdata->queues_max_num = phytmac_config->queue_num; @@ -114,7 +203,8 @@ static int phytmac_plat_probe(struct platform_device *pdev) } i = 0; - pdata->mac_regs = devm_platform_get_and_ioremap_resource(pdev, i, ®s); + regs = platform_get_resource(pdev, IORESOURCE_MEM, i); + pdata->mac_regs = phytmac_devm_ioremap_resource_np(&pdev->dev, regs); if (IS_ERR(pdata->mac_regs)) { dev_err(&pdev->dev, "mac_regs ioremap failed\n"); ret = PTR_ERR(pdata->mac_regs); @@ -122,6 +212,18 @@ static int phytmac_plat_probe(struct platform_device *pdev) } pdata->ndev->base_addr = regs->start; + if (pdev->dev.of_node && match) { + if (!strcmp(match->compatible, "phytium,gmac-1.0")) + pdata->version = PHYTMAC_READ(pdata, PHYTMAC_VERSION) & 0xff; + else + pdata->version = VERSION_V3; + } else if (has_acpi_companion(&pdev->dev) && match_acpi) { + if (!strcmp(match_acpi->id, "PHYT0046")) + pdata->version = PHYTMAC_READ(pdata, PHYTMAC_VERSION) & 0xff; + else + pdata->version = VERSION_V3; + } + if (pdata->capacities & PHYTMAC_CAPS_MSG) { ++i; regs = platform_get_resource(pdev, IORESOURCE_MEM, i); @@ -134,11 +236,10 @@ static int phytmac_plat_probe(struct platform_device *pdev) } } - if (device_property_read_bool(&pdev->dev, "lpi")) - pdata->capacities |= PHYTMAC_CAPS_LPI; + if (device_property_read_bool(&pdev->dev, "powerctrl")) + pdata->capacities |= PHYTMAC_CAPS_PWCTRL; - if (pdata->capacities & PHYTMAC_CAPS_LPI) { - /* lpi resource */ + if (pdata->version == VERSION_V3 && pdev->dev.of_node) { ++i; regs = platform_get_resource(pdev, IORESOURCE_MEM, i); if (regs) { @@ -181,6 +282,14 @@ static int phytmac_plat_probe(struct platform_device *pdev) else pdata->phy_interface = ret; + if (pdata->version == VERSION_V3) { + ret = phytmac_v2_get_phy_mode(pdev); + if (ret < 0) + pdata->phytmac_v2_interface = PHYTMAC_PHY_INTERFACE_MODE_USXGMII; + else + pdata->phytmac_v2_interface = ret; + } + ret = phytmac_drv_probe(pdata); if (ret) goto err_mem; @@ -212,6 +321,13 @@ static int phytmac_plat_remove(struct platform_device *pdev) return 0; } +static void phytmac_plat_shutdown(struct platform_device *pdev) +{ + struct phytmac *pdata = platform_get_drvdata(pdev); + + phytmac_drv_shutdown(pdata); +} + static int __maybe_unused phytmac_plat_suspend(struct device *dev) { struct phytmac *pdata = dev_get_drvdata(dev); @@ -240,11 +356,12 @@ static struct platform_driver phytmac_driver = { .probe = phytmac_plat_probe, .remove = phytmac_plat_remove, .driver = { - .name = PHYTMAC_DRV_NAME, + .name = PHYTMAC_PLAT_DRV_NAME, .of_match_table = of_match_ptr(phytmac_dt_ids), .acpi_match_table = phytmac_acpi_ids, .pm = &phytmac_plat_pm_ops, }, + .shutdown = phytmac_plat_shutdown, }; module_platform_driver(phytmac_driver); diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.c b/drivers/net/ethernet/phytium/phytmac_ptp.c index 26b1b75edbde1ded359a5e6fbef51c3306dc7379..4803842bd7c17ccc6a3bb7163c8819155b473fc5 100644 --- a/drivers/net/ethernet/phytium/phytmac_ptp.c +++ b/drivers/net/ethernet/phytium/phytmac_ptp.c @@ -2,6 +2,9 @@ /** * 1588 PTP support for Phytium GMAC device. * + * Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. + * + * Author: Wenting Song */ #include #include diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.h b/drivers/net/ethernet/phytium/phytmac_ptp.h index 72c8b7c67413706496d44d7c7fac110a70513aa4..e9497a172d25e235a90e6cd2185abce85cdaf2d2 100644 --- a/drivers/net/ethernet/phytium/phytmac_ptp.h +++ b/drivers/net/ethernet/phytium/phytmac_ptp.h @@ -2,6 +2,7 @@ /* * Phytium Ethernet Controller driver * + * Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/net/ethernet/phytium/phytmac_v1.c b/drivers/net/ethernet/phytium/phytmac_v1.c index 72a6eeaec3563dd67c0cc9bb853ca65ee71ba95e..870f07edc58fc55b19f95f9d0191a0d8a084f720 100644 --- a/drivers/net/ethernet/phytium/phytmac_v1.c +++ b/drivers/net/ethernet/phytium/phytmac_v1.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include "phytmac.h" #include "phytmac_v1.h" @@ -211,6 +213,11 @@ static int phytmac_mac_linkup(struct phytmac *pdata, phy_interface_t interface, else PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_100M); + if (interface == PHY_INTERFACE_MODE_SGMII) { + if (speed == SPEED_2500) + phytmac_enable_autoneg(pdata, 0); + } + return 0; } @@ -289,7 +296,6 @@ static int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) static void phytmac_reset_hw(struct phytmac *pdata) { - struct phytmac_queue *queue; unsigned int q; u32 ctrl; @@ -300,7 +306,7 @@ static void phytmac_reset_hw(struct phytmac *pdata) PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); /* Disable and clear all interrupts and disable queues */ - for (q = 0, queue = pdata->queues; q < pdata->queues_max_num; ++q, ++queue) { + for (q = 0; q < pdata->queues_max_num; ++q) { if (q == 0) { PHYTMAC_WRITE(pdata, PHYTMAC_ID, -1); PHYTMAC_WRITE(pdata, PHYTMAC_IS, -1); @@ -317,7 +323,10 @@ static void phytmac_reset_hw(struct phytmac *pdata) PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), 0); if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) - PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_TAILPTR(q), 0); + + if (pdata->capacities & PHYTMAC_CAPS_RXPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_RX_TAILPTR(q), 0); } } @@ -352,6 +361,7 @@ static int phytmac_init_hw(struct phytmac *pdata) u32 config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); u32 dmaconfig; u32 nctrlconfig; + u32 ptrconfig = 0; nctrlconfig = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); nctrlconfig |= PHYTMAC_BIT(MPE); @@ -414,7 +424,14 @@ static int phytmac_init_hw(struct phytmac *pdata) PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, dmaconfig); if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) - PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_ENABLE, PHYTMAC_BIT(TXTAIL_ENABLE)); + ptrconfig |= PHYTMAC_BIT(TXPTR_EN); + if (pdata->capacities & PHYTMAC_CAPS_RXPTR) + ptrconfig |= PHYTMAC_BIT(RXPTR_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_ENABLE, ptrconfig); + + if (pdata->capacities & PHYTMAC_CAPS_START) + PHYTMAC_WRITE(pdata, PHYTMAC_TXSTARTSEL, PHYTMAC_BIT(TSTARTSEL_ENABLE)); if (phy_interface_mode_is_8023z(pdata->phy_interface)) phytmac_pcs_software_reset(pdata, 1); @@ -426,8 +443,46 @@ static int phytmac_powerup_hw(struct phytmac *pdata, int on) { u32 status, data0, data1, rdata1; int ret; + acpi_handle handle; + union acpi_object args[3]; + struct acpi_object_list arg_list = { + .pointer = args, + .count = ARRAY_SIZE(args), + }; + acpi_status acpi_sts; + unsigned long long rv; + + if (!(pdata->capacities & PHYTMAC_CAPS_PWCTRL)) { + pdata->power_state = on; + return 0; + } - if (pdata->capacities & PHYTMAC_CAPS_LPI) { + if (has_acpi_companion(pdata->dev)) { + handle = ACPI_HANDLE(pdata->dev); + + netdev_info(pdata->ndev, "set gmac power %s\n", + on == PHYTMAC_POWERON ? "on" : "off"); + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = PHYTMAC_PWCTL_GMAC_ID; + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = PHYTMAC_PWCTL_DEFAULT_VAL; + args[2].type = ACPI_TYPE_INTEGER; + args[2].integer.value = PHYTMAC_PWCTL_DEFAULT_VAL; + + if (on == PHYTMAC_POWERON) { + acpi_sts = acpi_evaluate_integer(handle, "PPWO", &arg_list, &rv); + if (ACPI_FAILURE(acpi_sts)) + netdev_err(pdata->ndev, "NO PPWO Method\n"); + if (rv) + netdev_err(pdata->ndev, "Failed to power on\n"); + } else { + acpi_sts = acpi_evaluate_integer(handle, "PPWD", &arg_list, &rv); + if (ACPI_FAILURE(acpi_sts)) + netdev_err(pdata->ndev, "NO PPWD Method\n"); + if (rv) + netdev_err(pdata->ndev, "Failed to power off\n"); + } + } else { ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, 1, PHYTMAC_TIMEOUT); if (ret) @@ -460,16 +515,17 @@ static int phytmac_powerup_hw(struct phytmac *pdata, int on) data0 & PHYTMAC_BIT(DATA0_FREE), 1, PHYTMAC_TIMEOUT); if (ret) - netdev_err(pdata->ndev, "mnh data0 is busy"); + netdev_err(pdata->ndev, "mnh data0 is busy\n"); rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); if (rdata1 == data1) netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", - on ? "up" : "down", data1, rdata1); + on == PHYTMAC_POWERON ? "up" : "down", data1, rdata1); else netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", - on ? "up" : "down", data1, rdata1); + on == PHYTMAC_POWERON ? "up" : "down", data1, rdata1); } + pdata->power_state = on; return 0; @@ -502,16 +558,18 @@ static int phytmac_set_wake(struct phytmac *pdata, int wake) return 0; } -static void phytmac_mdio_idle(struct phytmac *pdata) +static int phytmac_mdio_idle(struct phytmac *pdata) { u32 val; + int ret; /* wait for end of transfer */ - val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); - while (!(val & PHYTMAC_BIT(MDIO_IDLE))) { - cpu_relax(); - val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); - } + ret = readx_poll_timeout(PHYTMAC_READ_NSTATUS, pdata, val, val & PHYTMAC_BIT(MDIO_IDLE), + 1, PHYTMAC_MDIO_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mdio wait for idle time out!"); + + return ret; } static int phytmac_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int regnum) @@ -626,6 +684,9 @@ static int phytmac_get_feature_all(struct phytmac *pdata) /* max rx fs */ pdata->max_rx_fs = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT3, SCR2CMP); + if (pdata->version == VERSION_V3) + pdata->capacities |= PHYTMAC_CAPS_RXPTR; + if (netif_msg_hw(pdata)) netdev_info(pdata->ndev, "get feature queue_num=%d, daw=%d, dbw=%d, rx_fs=%d, rx_bd=%d, tx_bd=%d\n", pdata->queues_num, pdata->dma_addr_width, pdata->dma_data_width, @@ -756,7 +817,7 @@ static int phytmac_init_ring_hw(struct phytmac *pdata) PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), upper_32_bits(queue->rx_ring_addr)); if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) - PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), queue->tx_tail); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_TAILPTR(q), queue->tx_tail); } return 0; @@ -928,7 +989,8 @@ static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, desc->desc1 = 0; desc->desc2 = upper_32_bits(addr); /* Make newly descriptor to hardware */ - dma_wmb(); + if (!(pdata->capacities & PHYTMAC_CAPS_RXPTR)) + dma_wmb(); desc->desc0 = lower_32_bits(addr); } else { desc->desc1 = 0; @@ -943,7 +1005,16 @@ static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, static unsigned int phytmac_zero_rx_desc_addr(struct phytmac_dma_desc *desc) { desc->desc2 = 0; - desc->desc0 = PHYTMAC_BIT(RX_USED); + desc->desc0 = (desc->desc0 & PHYTMAC_BIT(RX_TS_VALID)) | PHYTMAC_BIT(RX_USED); + + return 0; +} + +static unsigned int phytmac_zero_tx_desc(struct phytmac_dma_desc *desc) +{ + desc->desc2 = 0; + desc->desc0 = 0; + desc->desc1 &= ~PHYTMAC_BIT(TX_USED); return 0; } @@ -953,27 +1024,19 @@ static void phytmac_tx_start(struct phytmac_queue *queue) struct phytmac *pdata = queue->pdata; if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) - PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), + PHYTMAC_WRITE(pdata, PHYTMAC_TX_TAILPTR(queue->index), PHYTMAC_BIT(TXTAIL_UPDATE) | queue->tx_tail); - if (pdata->capacities & PHYTMAC_CAPS_START) - PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, - PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); + if (likely(pdata->capacities & PHYTMAC_CAPS_START)) + PHYTMAC_WRITE(pdata, PHYTMAC_TXSTART, PHYTMAC_BIT(TSTART)); } -static void phytmac_restart(struct phytmac *pdata) +static void phytmac_update_rx_tail(struct phytmac_queue *queue) { - int q; - struct phytmac_queue *queue; + struct phytmac *pdata = queue->pdata; - for (q = 0; q < pdata->queues_num; q++) { - queue = &pdata->queues[q]; - if (queue->tx_head != queue->tx_tail) { - PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, - PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); - break; - } - } + if (pdata->capacities & PHYTMAC_CAPS_RXPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_RX_TAILPTR(queue->index), queue->rx_head); } static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) @@ -1116,7 +1179,7 @@ static void phytmac_mac_interface_config(struct phytmac *pdata, unsigned int mod /* Disable AN for SGMII fixed link or speed equal to 2.5G, enable otherwise.*/ if (state->interface == PHY_INTERFACE_MODE_SGMII) { - if (state->speed == SPEED_2500 || mode == MLO_AN_FIXED) + if (mode == MLO_AN_FIXED) phytmac_enable_autoneg(pdata, 0); else phytmac_enable_autoneg(pdata, 1); @@ -1158,20 +1221,20 @@ static void phytmac_clear_tx_desc(struct phytmac_queue *queue) desc->desc1 |= PHYTMAC_BIT(TX_WRAP); if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) - PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), queue->tx_tail); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_TAILPTR(queue->index), queue->tx_tail); } static void phytmac_get_hw_stats(struct phytmac *pdata) { - u32 stats[45]; + u32 stats[PHYTMAC_STATIS_REG_NUM]; int i, j; u64 val; u64 *p = &pdata->stats.tx_octets; - for (i = 0 ; i < 45; i++) + for (i = 0 ; i < PHYTMAC_STATIS_REG_NUM; i++) stats[i] = PHYTMAC_READ(pdata, PHYTMAC_OCTTX + i * 4); - for (i = 0, j = 0; i < 45; i++) { + for (i = 0, j = 0; i < PHYTMAC_STATIS_REG_NUM; i++) { if (i == 0 || i == 20) { val = (u64)stats[i + 1] << 32 | stats[i]; *p += val; @@ -1355,6 +1418,7 @@ struct phytmac_hw_if phytmac_1p0_hw = { .get_stats = phytmac_get_hw_stats, .set_mac_address = phytmac_set_mac_addr, .get_mac_address = phytmac_get_mac_addr, + .mdio_idle = phytmac_mdio_idle, .mdio_read = phytmac_mdio_data_read_c22, .mdio_write = phytmac_mdio_data_write_c22, .mdio_read_c45 = phytmac_mdio_data_read_c45, @@ -1388,7 +1452,7 @@ struct phytmac_hw_if phytmac_1p0_hw = { /* tx and rx */ .tx_map = phytmac_tx_map_desc, .transmit = phytmac_tx_start, - .restart = phytmac_restart, + .update_rx_tail = phytmac_update_rx_tail, .tx_complete = phytmac_tx_complete, .rx_complete = phytmac_rx_complete, .get_rx_pkt_len = phytmac_rx_pkt_len, @@ -1401,6 +1465,7 @@ struct phytmac_hw_if phytmac_1p0_hw = { .clear_rx_desc = phytmac_clear_rx_desc, .clear_tx_desc = phytmac_clear_tx_desc, .zero_rx_desc_addr = phytmac_zero_rx_desc_addr, + .zero_tx_desc = phytmac_zero_tx_desc, /* ptp */ .init_ts_hw = phytmac_ptp_init_hw, .set_time = phytmac_set_time, diff --git a/drivers/net/ethernet/phytium/phytmac_v1.h b/drivers/net/ethernet/phytium/phytmac_v1.h index 32bb1294910963b71d34d0b2a354d8fbf179d99a..e0ca0ab8354749e85ad0d39159a994ec8bbd3482 100644 --- a/drivers/net/ethernet/phytium/phytmac_v1.h +++ b/drivers/net/ethernet/phytium/phytmac_v1.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ + #ifndef _PHYTMAC_V1_H #define _PHYTMAC_V1_H @@ -53,6 +55,9 @@ extern struct phytmac_hw_if phytmac_1p0_hw; #define PHYTMAC_USXSTATUS 0x0A88 #define PHYTMAC_TXBDCTRL 0x04CC #define PHYTMAC_RXBDCTRL 0x04D0 +#define PHYTMAC_TXSTART 0x0E70 +#define PHYTMAC_TXSTARTSEL 0x0E74 +#define PHYTMAC_VERSION 0x0FF0 /* Fdir match registers */ #define PHYTMAC_FDIR(i) (0x0540 + ((i) << 2)) @@ -75,7 +80,8 @@ extern struct phytmac_hw_if phytmac_1p0_hw; #define PHYTMAC_IDR(i) (0x0620 + ((i) << 2)) #define PHYTMAC_IMR(i) (0x0640 + ((i) << 2)) #define PHYTMAC_TAIL_ENABLE (0x0e7c) -#define PHYTMAC_TAILPTR(i) (0x0e80 + ((i) << 2)) +#define PHYTMAC_TX_TAILPTR(i) (0x0e80 + ((i) << 2)) +#define PHYTMAC_RX_TAILPTR(i) (0x0ec0 + ((i) << 2)) #define PHYTMAC_PHY_INT_ENABLE 0x1C88 #define PHYTMAC_PHY_INT_CLEAR 0x1C8C @@ -363,8 +369,11 @@ extern struct phytmac_hw_if phytmac_1p0_hw; #define PHYTMAC_TXTAIL_UPDATE_WIDTH 1 /* Bitfields in TAIL_ENABLE */ -#define PHYTMAC_TXTAIL_ENABLE_INDEX 0 /* Enable tx tail */ -#define PHYTMAC_TXTAIL_ENABLE_WIDTH 1 +#define PHYTMAC_TXPTR_EN_INDEX 0 /* Enable tx tail */ +#define PHYTMAC_TXPTR_EN_WIDTH 1 + +#define PHYTMAC_RXPTR_EN_INDEX 16 /* Enable rx tail */ +#define PHYTMAC_RXPTR_EN_WIDTH 1 /* Bitfields in INT ENABLE */ #define PHYTMAC_WOL_RECEIVE_ENABLE_INDEX 28 /* Enable wol_event_recieve */ @@ -374,6 +383,10 @@ extern struct phytmac_hw_if phytmac_1p0_hw; #define PHYTMAC_WOL_RECEIVE_DISABLE_INDEX 28 /* Disable wol_event_recieve */ #define PHYTMAC_WOL_RECEIVE_DISABLE_WIDTH 1 +/* Bitfields in PHYTMAC_TXSTARTSEL */ +#define PHYTMAC_TSTARTSEL_ENABLE_INDEX 0 /* Enable Tstart_sel */ +#define PHYTMAC_TSTARTSEL_ENABLE_WIDTH 1 + #define PHYTMAC_TSEC_WIDTH (PHYTMAC_SECH_WIDTH + PHYTMAC_SECL_WIDTH) #define SEC_MAX_VAL (((u64)1 << PHYTMAC_TSEC_WIDTH) - 1) #define NSEC_MAX_VAL ((1 << PHYTMAC_NSEC_WIDTH) - 1) diff --git a/drivers/net/ethernet/phytium/phytmac_v2.c b/drivers/net/ethernet/phytium/phytmac_v2.c index 25f1ba6c1d6ef428f7b521386ebf0eb1f882118c..1afff67d625d99b450f1e4dcc63508f98a1d9db0 100644 --- a/drivers/net/ethernet/phytium/phytmac_v2.c +++ b/drivers/net/ethernet/phytium/phytmac_v2.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ + #include #include #include @@ -13,54 +15,91 @@ #include #include #include +#include #include "phytmac.h" #include "phytmac_v2.h" -static int phytmac_msg_send(struct phytmac *pdata, u16 cmd_id, +static int phytmac_v2_msg_send(struct phytmac *pdata, u16 cmd_id, u16 cmd_subid, void *data, int len, int wait) { - int index = 0; + u32 tx_head, tx_tail, ring_size; struct phytmac_msg_info msg; struct phytmac_msg_info msg_rx; + u32 retry = 0; int ret = 0; - ++pdata->msg_ring.tx_msg_tail; - if (pdata->msg_ring.tx_msg_tail > pdata->msg_ring.tx_msg_ring_size) - pdata->msg_ring.tx_msg_tail = 1; - index = pdata->msg_ring.tx_msg_tail; + spin_lock(&pdata->msg_ring.msg_lock); + tx_head = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_HEAD) & 0xff; + tx_tail = phytmac_v2_tx_ring_wrap(pdata, pdata->msg_ring.tx_msg_wr_tail); + pdata->msg_ring.tx_msg_rd_tail = tx_tail; + ring_size = pdata->msg_ring.tx_msg_ring_size; + + while ((tx_tail + 1) % ring_size == tx_head) { + udelay(1); + tx_head = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_HEAD) & 0xff; + retry++; + if (retry >= PHYTMAC_RETRY_TIMES) { + netdev_err(pdata->ndev, + "Time out waiting for Tx msg ring free, tx_tail:0x%x, tx_head:0x%x", + tx_tail, tx_head); + spin_unlock(&pdata->msg_ring.msg_lock); + return -EINVAL; + } + } + retry = 0; wait = 1; memset(&msg, 0, sizeof(msg)); memset(&msg_rx, 0, sizeof(msg_rx)); - msg.module_id = PHYTMAC_MODULE_ID_GMAC; - msg.cmd_id = cmd_id; + msg.cmd_type = cmd_id; msg.cmd_subid = cmd_subid; - msg.flags = PHYTMAC_FLAGS_MSG_NOINT; - - if (len) + if (len > 0 && len <= PHYTMAC_MSG_PARA_LEN) { memcpy(&msg.para[0], data, len); + } else if (len > PHYTMAC_MSG_PARA_LEN) { + netdev_err(pdata->ndev, "Tx msg para len %d is greater than the max len %d", + len, PHYTMAC_MSG_PARA_LEN); + spin_unlock(&pdata->msg_ring.msg_lock); + return -EINVAL; + } if (netif_msg_hw(pdata)) { - netdev_info(pdata->ndev, "tx msg: cmdid=%d, subid=%d, flags=%d, len=%d, tail=%d\n", - msg.cmd_id, msg.cmd_subid, msg.flags, len, pdata->msg_ring.tx_msg_tail); + netdev_info(pdata->ndev, "Tx msg: cmdid:%d, subid:%d, status0:%d, len:%d, tail:%d", + msg.cmd_type, msg.cmd_subid, msg.status0, len, tx_tail); } - memcpy(pdata->msg_regs + PHYTMAC_MSG(index), &msg, sizeof(msg)); - PHYTMAC_WRITE(pdata, PHYTMAC_TX_MSG_TAIL, - pdata->msg_ring.tx_msg_tail | PHYTMAC_BIT(TX_MSG_INT)); + memcpy(pdata->msg_regs + PHYTMAC_MSG(tx_tail), &msg, sizeof(msg)); + tx_tail = phytmac_v2_tx_ring_wrap(pdata, ++tx_tail); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_MSG_TAIL, tx_tail | PHYTMAC_BIT(TX_MSG_INT)); + pdata->msg_ring.tx_msg_wr_tail = tx_tail; if (wait) { - memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); - while (!(msg_rx.flags & 0x1)) { - cpu_relax(); - memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); + tx_head = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_HEAD) & 0xff; + while (tx_head != tx_tail) { + udelay(1); + tx_head = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_HEAD) & 0xff; + retry++; + if (retry >= PHYTMAC_RETRY_TIMES) { + netdev_err(pdata->ndev, "Msg process time out!"); + spin_unlock(&pdata->msg_ring.msg_lock); + return -EINVAL; + } + } + + memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(pdata->msg_ring.tx_msg_rd_tail), + PHYTMAC_MSG_HDR_LEN); + if (!(msg_rx.status0 & PHYTMAC_CMD_PRC_SUCCESS)) { + netdev_err(pdata->ndev, "Msg process error, cmdid:%d, subid:%d, status0:%d, tail:%d", + msg.cmd_type, msg.cmd_subid, msg.status0, tx_tail); + spin_unlock(&pdata->msg_ring.msg_lock); + return -EINVAL; } } + spin_unlock(&pdata->msg_ring.msg_lock); return ret; } -void phytmac_reset_hw(struct phytmac *pdata) +static void phytmac_v2_reset_hw(struct phytmac *pdata) { int q; u16 cmd_id, cmd_subid; @@ -70,26 +109,27 @@ void phytmac_reset_hw(struct phytmac *pdata) for (q = 0; q < pdata->queues_max_num; ++q) { PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(q), -1); PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(q), -1); - PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_PTR(q), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_RX_PTR(q), 0); } /* reset hw rx/tx enable */ cmd_id = PHYTMAC_MSG_CMD_DEFAULT; cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_HW; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); /* reset tx ring */ memset(&ring, 0, sizeof(ring)); ring.queue_num = pdata->queues_max_num; cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 0); /* reset rx ring */ cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 1); } -static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) +static int phytmac_v2_get_mac_addr(struct phytmac *pdata, u8 *addr) { int index; u16 cmd_id, cmd_subid; @@ -97,12 +137,10 @@ static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) cmd_id = PHYTMAC_MSG_CMD_GET; cmd_subid = PHYTMAC_MSG_CMD_GET_ADDR; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); - index = pdata->msg_ring.tx_msg_tail; - if (index <= 0) - index += pdata->msg_ring.tx_msg_ring_size; - memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + index = phytmac_v2_tx_ring_wrap(pdata, pdata->msg_ring.tx_msg_rd_tail); + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + PHYTMAC_MSG_HDR_LEN, sizeof(struct phytmac_mac)); addr[0] = para.addrl & 0xff; @@ -115,7 +153,7 @@ static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) return 0; } -int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) +static int phytmac_v2_set_mac_addr(struct phytmac *pdata, const u8 *addr) { u16 cmd_id; u16 cmd_subid; @@ -127,46 +165,78 @@ int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) para.addrl = cpu_to_le32(*((u32 *)addr)); para.addrh = cpu_to_le16(*((u16 *)(addr + 4))); - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_v2_pcs_software_reset(struct phytmac *pdata, int reset) +{ + u16 cmd_id; + u16 cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (reset) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PCS_RESET; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PCS_RESET; + + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_init_hw(struct phytmac *pdata) +static int phytmac_v2_init_hw(struct phytmac *pdata) { u16 cmd_id, cmd_subid; struct phytmac_dma_info dma; struct phytmac_eth_info eth; + u32 ptrconfig = 0; + u8 mdc; + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + ptrconfig |= PHYTMAC_BIT(TXTAIL_EN); + if (pdata->capacities & PHYTMAC_CAPS_RXPTR) + ptrconfig |= PHYTMAC_BIT(RXTAIL_EN); - phytmac_set_mac_addr(pdata, pdata->ndev->dev_addr); + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR_ENABLE, ptrconfig); + + if (pdata->mii_bus) { + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_MDIO; + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + phytmac_v2_set_mac_addr(pdata, pdata->ndev->dev_addr); cmd_id = PHYTMAC_MSG_CMD_SET; if (pdata->capacities & PHYTMAC_CAPS_JUMBO) cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_JUMBO; else cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_1536_FRAMES; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); if (pdata->ndev->flags & IFF_PROMISC) { cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); } if (pdata->ndev->features & NETIF_F_RXCSUM) { cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); } - if (!(pdata->ndev->flags & IFF_BROADCAST)) { + if (pdata->ndev->flags & IFF_BROADCAST) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_BC; + else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_BC; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); - } + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_STRIPCRC; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); memset(&dma, 0, sizeof(dma)); cmd_subid = PHYTMAC_MSG_CMD_SET_DMA; @@ -177,22 +247,31 @@ static int phytmac_init_hw(struct phytmac *pdata) dma.hw_dma_cap |= HW_DMA_CAP_CSUM; if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) dma.hw_dma_cap |= HW_DMA_CAP_PTP; + if (pdata->dma_data_width == PHYTMAC_DBW32) + dma.hw_dma_cap |= HW_DMA_CAP_DDW32; if (pdata->dma_data_width == PHYTMAC_DBW64) dma.hw_dma_cap |= HW_DMA_CAP_DDW64; if (pdata->dma_data_width == PHYTMAC_DBW128) dma.hw_dma_cap |= HW_DMA_CAP_DDW128; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&dma, sizeof(dma), 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)&dma, sizeof(dma), 0); + + cmd_subid = PHYTMAC_MSG_CMD_SET_MDC; + mdc = PHYTMAC_CLK_DIV96; + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&mdc), sizeof(mdc), 0); memset(ð, 0, sizeof(eth)); cmd_subid = PHYTMAC_MSG_CMD_SET_ETH_MATCH; eth.index = 0; eth.etype = (uint16_t)ETH_P_IP; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)ð, sizeof(eth), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)ð, sizeof(eth), 1); + + if (phy_interface_mode_is_8023z(pdata->phy_interface)) + phytmac_v2_pcs_software_reset(pdata, 1); return 0; } -static int phytmac_enable_multicast(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_multicast(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -202,11 +281,11 @@ static int phytmac_enable_multicast(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MC; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) +static int phytmac_v2_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) { u16 cmd_id, cmd_subid; struct phytmac_mc_info para; @@ -216,12 +295,12 @@ static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_HASH_MC; para.mc_bottom = (u32)mc_filter[0]; para.mc_top = (u32)mc_filter[1]; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); return 0; } -static int phytmac_init_ring_hw(struct phytmac *pdata) +static int phytmac_v2_init_ring_hw(struct phytmac *pdata) { u16 cmd_id, cmd_subid; struct phytmac_ring_info rxring; @@ -240,45 +319,43 @@ static int phytmac_init_ring_hw(struct phytmac *pdata) txring.hw_dma_cap |= HW_DMA_CAP_64B; rxring.hw_dma_cap |= HW_DMA_CAP_64B; for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { - PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), queue->tx_head); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_PTR(q), queue->tx_head); txring.addr[q] = queue->tx_ring_addr; rxring.addr[q] = queue->rx_ring_addr; } - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&txring), sizeof(txring), 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&txring), sizeof(txring), 0); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE; rxbuf.queue_num = pdata->queues_num; rxbuf.buffer_size = pdata->rx_buffer_len / 64; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxbuf), sizeof(rxbuf), 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxbuf), sizeof(rxbuf), 0); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_RX_RING; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxring), sizeof(rxring), 0); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxring), sizeof(rxring), 1); return 0; } -int phytmac_init_msg_ring(struct phytmac *pdata) +static int phytmac_v2_init_msg_ring(struct phytmac *pdata) { - u32 size = 0; - - pdata->msg_ring.tx_msg_tail = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_TAIL) & 0xff; - size = PHYTMAC_READ_BITS(pdata, PHYTMAC_SIZE, TXRING_SIZE); - pdata->msg_ring.tx_msg_ring_size = size; - if (pdata->msg_ring.tx_msg_tail == size) - pdata->msg_ring.tx_msg_tail = 0; + u32 tx_msg_tail; + pdata->msg_ring.tx_msg_ring_size = PHYTMAC_READ_BITS(pdata, PHYTMAC_SIZE, TXRING_SIZE); + tx_msg_tail = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_TAIL) & 0xff; + pdata->msg_ring.tx_msg_wr_tail = phytmac_v2_tx_ring_wrap(pdata, tx_msg_tail); + pdata->msg_ring.tx_msg_rd_tail = pdata->msg_ring.tx_msg_wr_tail; PHYTMAC_WRITE(pdata, PHYTMAC_MSG_IMR, 0xfffffffe); if (netif_msg_hw(pdata)) - netdev_info(pdata->ndev, "mac msg ring: tx_msg_ring_size=%d, tx_msg_tail=%d\n", - size, pdata->msg_ring.tx_msg_tail); + netdev_info(pdata->ndev, "Msg ring size:%d, tx msg tail=%d\n", + pdata->msg_ring.tx_msg_ring_size, tx_msg_tail); return 0; } -static int phytmac_get_feature_all(struct phytmac *pdata) +static int phytmac_v2_get_feature_all(struct phytmac *pdata) { u16 cmd_id, cmd_subid; int index; @@ -287,13 +364,9 @@ static int phytmac_get_feature_all(struct phytmac *pdata) memset(¶, 0, sizeof(para)); cmd_id = PHYTMAC_MSG_CMD_GET; cmd_subid = PHYTMAC_MSG_CMD_GET_CAPS; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); - - index = pdata->msg_ring.tx_msg_tail; - if (index <= 0) - index += pdata->msg_ring.tx_msg_ring_size; - - memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + index = phytmac_v2_tx_ring_wrap(pdata, pdata->msg_ring.tx_msg_rd_tail); + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + PHYTMAC_MSG_HDR_LEN, sizeof(struct phytmac_feature)); pdata->queues_max_num = para.queue_num; @@ -303,10 +376,13 @@ static int phytmac_get_feature_all(struct phytmac *pdata) pdata->dma_addr_width = 32; pdata->dma_data_width = para.dma_data_width; pdata->max_rx_fs = para.max_rx_fs; - pdata->tx_bd_prefetch = (2 << (para.tx_bd_prefetch - 1)) * - sizeof(struct phytmac_dma_desc); - pdata->rx_bd_prefetch = (2 << (para.rx_bd_prefetch - 1)) * - sizeof(struct phytmac_dma_desc); + + if (para.tx_bd_prefetch) + pdata->tx_bd_prefetch = (2 << (para.tx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); + if (para.rx_bd_prefetch) + pdata->rx_bd_prefetch = (2 << (para.rx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); if (netif_msg_hw(pdata)) { netdev_info(pdata->ndev, "feature qnum=%d, daw=%d, dbw=%d, rxfs=%d, rxbd=%d, txbd=%d\n", @@ -317,56 +393,42 @@ static int phytmac_get_feature_all(struct phytmac *pdata) return 0; } -void phytmac_get_regs(struct phytmac *pdata, u32 *reg_buff) +static void phytmac_v2_get_regs(struct phytmac *pdata, u32 *reg_buff) { u16 cmd_id, cmd_subid; - u16 reg_num; int index; + struct phytmac_ethtool_reg msg; + memset(&msg, 0, sizeof(msg)); cmd_id = PHYTMAC_MSG_CMD_GET; - cmd_subid = PHYTMAC_MSG_CMD_GET_REG_READ; - reg_num = 16; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)®_num, 2, 1); - - index = pdata->msg_ring.tx_msg_tail; - if (index <= 0) - index += pdata->msg_ring.tx_msg_ring_size; - - memcpy(reg_buff, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); -} - -static void phytmac_get_hw_stats(struct phytmac *pdata) -{ - u16 cmd_id, cmd_subid; - u8 count; - int i, j, index; - u32 stats[48]; + cmd_subid = PHYTMAC_MSG_CMD_GET_REGS_FOR_ETHTOOL; + msg.interface = pdata->phytmac_v2_interface; + /* There are 16 regs in total, read 14 regs at first time, read 2 regs at last time */ + msg.cnt = 0; + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&msg), sizeof(msg), 1); + index = phytmac_v2_tx_ring_wrap(pdata, pdata->msg_ring.tx_msg_rd_tail); + memcpy(reg_buff, pdata->msg_regs + PHYTMAC_MSG(index) + PHYTMAC_MSG_HDR_LEN, + PHYTMAC_MSG_PARA_LEN); + + msg.cnt = 1; + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&msg), sizeof(msg), 1); + index = phytmac_v2_tx_ring_wrap(pdata, pdata->msg_ring.tx_msg_rd_tail); + memcpy(reg_buff + PHYTMAC_MSG_PARA_LEN / sizeof(u32), pdata->msg_regs + + PHYTMAC_MSG(index) + PHYTMAC_MSG_HDR_LEN, + PHYTMAC_ETHTOOLD_REGS_LEN - PHYTMAC_MSG_PARA_LEN); +} + +static void phytmac_v2_get_hw_stats(struct phytmac *pdata) +{ + u32 stats[PHYTMAC_STATIS_REG_NUM]; + int i, j; u64 val; u64 *p = &pdata->stats.tx_octets; - cmd_id = PHYTMAC_MSG_CMD_GET; - cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; - count = 1; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); - - cmd_id = PHYTMAC_MSG_CMD_GET; - cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; - count = 2; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); - - cmd_id = PHYTMAC_MSG_CMD_GET; - cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; - count = 3; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 1); - - for (i = 0; i < 3; i++) { - index = pdata->msg_ring.tx_msg_tail + i - 2; - if (index <= 0) - index += pdata->msg_ring.tx_msg_ring_size; - memcpy(&stats[i * 16], pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); - } + for (i = 0 ; i < PHYTMAC_STATIS_REG_NUM; i++) + stats[i] = PHYTMAC_READ(pdata, PHYTMAC_OCT_TX + i * 4); - for (i = 0, j = 0; i < 44; i++) { + for (i = 0, j = 0; i < PHYTMAC_STATIS_REG_NUM; i++) { if (i == 0 || i == 20) { val = (u64)stats[i + 1] << 32 | stats[i]; *p += val; @@ -385,19 +447,21 @@ static void phytmac_get_hw_stats(struct phytmac *pdata) } } -static void phytmac_mdio_idle(struct phytmac *pdata) +static int phytmac_v2_mdio_idle(struct phytmac *pdata) { u32 val; + int ret; /* wait for end of transfer */ - val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); - while (!(val & PHYTMAC_BIT(MIDLE))) { - cpu_relax(); - val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); - } + ret = readx_poll_timeout(PHYTMAC_READ_NSR, pdata, val, val & PHYTMAC_BIT(MIDLE), + 1, PHYTMAC_MDIO_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mdio wait for idle time out!"); + + return ret; } -static int phytmac_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int regnum) +static int phytmac_v2_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int regnum) { u16 data; @@ -406,14 +470,14 @@ static int phytmac_mdio_data_read_c22(struct phytmac *pdata, int mii_id, int reg | PHYTMAC_BITS(PHYADDR, mii_id) | PHYTMAC_BITS(REGADDR, regnum) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); data = PHYTMAC_READ(pdata, PHYTMAC_MDIO) & 0xffff; - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); return data; } -static int phytmac_mdio_data_write_c22(struct phytmac *pdata, int mii_id, - int regnum, u16 data) +static int phytmac_v2_mdio_data_write_c22(struct phytmac *pdata, int mii_id, + int regnum, u16 data) { PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C22) | PHYTMAC_BITS(MDCOPS, PHYTMAC_C22_WRITE) @@ -421,12 +485,12 @@ static int phytmac_mdio_data_write_c22(struct phytmac *pdata, int mii_id, | PHYTMAC_BITS(REGADDR, regnum) | PHYTMAC_BITS(VALUE, data) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); return 0; } -static int phytmac_mdio_data_read_c45(struct phytmac *pdata, int mii_id, int devad, int regnum) +static int phytmac_v2_mdio_data_read_c45(struct phytmac *pdata, int mii_id, int devad, int regnum) { u16 data; @@ -436,21 +500,21 @@ static int phytmac_mdio_data_read_c45(struct phytmac *pdata, int mii_id, int dev | PHYTMAC_BITS(REGADDR, devad & 0x1F) | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_READ) | PHYTMAC_BITS(PHYADDR, mii_id) | PHYTMAC_BITS(REGADDR, devad & 0x1F) | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); data = PHYTMAC_READ(pdata, PHYTMAC_MDIO) & 0xffff; - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); return data; } -static int phytmac_mdio_data_write_c45(struct phytmac *pdata, int mii_id, int devad, - int regnum, u16 data) +static int phytmac_v2_mdio_data_write_c45(struct phytmac *pdata, int mii_id, int devad, + int regnum, u16 data) { PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_ADDR) @@ -458,24 +522,62 @@ static int phytmac_mdio_data_write_c45(struct phytmac *pdata, int mii_id, int de | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_WRITE) | PHYTMAC_BITS(PHYADDR, mii_id) | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) | PHYTMAC_BITS(VALUE, data) | PHYTMAC_BITS(CONST, 2))); - phytmac_mdio_idle(pdata); + phytmac_v2_mdio_idle(pdata); return 0; } -static int phytmac_powerup_hw(struct phytmac *pdata, int on) +static int phytmac_v2_powerup_hw(struct phytmac *pdata, int on) { u32 status, data0, data1, rdata1; int ret; + acpi_handle handle; + union acpi_object args[3]; + struct acpi_object_list arg_list = { + .pointer = args, + .count = ARRAY_SIZE(args), + }; + acpi_status acpi_sts; + unsigned long long rv; + + if (!(pdata->capacities & PHYTMAC_CAPS_PWCTRL)) { + pdata->power_state = on; + return 0; + } + + if (has_acpi_companion(pdata->dev)) { + handle = ACPI_HANDLE(pdata->dev); + + netdev_info(pdata->ndev, "set gmac power %s\n", + on == PHYTMAC_POWERON ? "on" : "off"); + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = PHYTMAC_PWCTL_GMAC_ID; + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = PHYTMAC_PWCTL_DEFAULT_VAL; + args[2].type = ACPI_TYPE_INTEGER; + args[2].integer.value = PHYTMAC_PWCTL_DEFAULT_VAL; - if (pdata->capacities & PHYTMAC_CAPS_LPI) { + if (on == PHYTMAC_POWERON) { + acpi_sts = acpi_evaluate_integer(handle, "PPWO", &arg_list, &rv); + if (ACPI_FAILURE(acpi_sts)) + netdev_err(pdata->ndev, "NO PPWO Method\n"); + if (rv) + netdev_err(pdata->ndev, "Failed to power on\n"); + } else { + acpi_sts = acpi_evaluate_integer(handle, "PPWD", &arg_list, &rv); + if (ACPI_FAILURE(acpi_sts)) + netdev_err(pdata->ndev, "NO PPWD Method\n"); + if (rv) + netdev_err(pdata->ndev, "Failed to power off\n"); + } + } else { ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, 1, PHYTMAC_TIMEOUT); if (ret) @@ -513,10 +615,10 @@ static int phytmac_powerup_hw(struct phytmac *pdata, int on) rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); if (rdata1 == data1) netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", - on ? "up" : "down", data1, rdata1); + on == PHYTMAC_POWERON ? "up" : "down", data1, rdata1); else netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", - on ? "up" : "down", data1, rdata1); + on == PHYTMAC_POWERON ? "up" : "down", data1, rdata1); } pdata->power_state = on; @@ -524,20 +626,32 @@ static int phytmac_powerup_hw(struct phytmac *pdata, int on) return 0; } -static int phytmac_set_wake(struct phytmac *pdata, int wake) +static int phytmac_v2_set_wake(struct phytmac *pdata, int wake) { u16 cmd_id, cmd_subid; - u8 wol = (u8)wake; + struct phytmac_wol para; + u32 wol_type = 0; + + if (wake & PHYTMAC_WAKE_MAGIC) + wol_type |= PHYTMAC_BIT(MAGIC); + if (wake & PHYTMAC_WAKE_ARP) + wol_type |= PHYTMAC_BIT(ARP); + if (wake & PHYTMAC_WAKE_UCAST) + wol_type |= PHYTMAC_BIT(UCAST); + if (wake & PHYTMAC_WAKE_MCAST) + wol_type |= PHYTMAC_BIT(MCAST); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_WOL; - - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&wol), 1, 1); + memset(¶, 0, sizeof(para)); + para.wol_type = cpu_to_le32(wol_type); + para.wake = (u8)wake; + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); return 0; } -static int phytmac_enable_promise(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_promise(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; u8 rxcsum = 0; @@ -551,12 +665,12 @@ static int phytmac_enable_promise(struct phytmac *pdata, int enable) rxcsum = 1; } - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxcsum), 1, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxcsum), 1, 1); return 0; } -static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_rxcsum(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -566,12 +680,12 @@ static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_RXCSUM; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_txcsum(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -581,12 +695,12 @@ static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_TXCSUM; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_enable_mdio(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_mdio(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -596,12 +710,12 @@ static int phytmac_enable_mdio(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MDIO; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_autoneg(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -611,13 +725,13 @@ static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); pdata->autoneg = enable; return 0; } -static int phytmac_enable_pause(struct phytmac *pdata, int enable) +static int phytmac_v2_enable_pause(struct phytmac *pdata, int enable) { u16 cmd_id, cmd_subid; @@ -627,12 +741,12 @@ static int phytmac_enable_pause(struct phytmac *pdata, int enable) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PAUSE; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) +static int phytmac_v2_enable_network(struct phytmac *pdata, int enable, int rx_tx) { u16 cmd_id, cmd_subid; @@ -642,12 +756,12 @@ static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) else cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_NETWORK; - phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); return 0; } -static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +static int phytmac_v2_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) { struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; struct phytmac_fdir_info fdir; @@ -675,19 +789,19 @@ static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_ fdir.srcport_mask = tp4sp_m->psrc; } - fdir.location = rx_flow->location; - fdir.queue = rx_flow->ring_cookie; + fdir.location = (u8)(rx_flow->location); + fdir.queue = (u8)(rx_flow->ring_cookie); if (fdir.ipsrc_en || fdir.ipdst_en || fdir.port_en) { cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_ADD_FDIR; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); } return 0; } -static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +static int phytmac_v2_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) { struct phytmac_fdir_info fdir; u16 cmd_id, cmd_subid; @@ -695,21 +809,21 @@ static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_ memset(&fdir, 0, sizeof(fdir)); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_DEL_FDIR; - fdir.location = (u8)rx_flow->location; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + fdir.location = (u8)(rx_flow->location); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); return 0; } -static void phytmac_tx_start(struct phytmac_queue *queue) +static void phytmac_v2_tx_start(struct phytmac_queue *queue) { struct phytmac *pdata = queue->pdata; - PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_PTR(queue->index), queue->tx_tail); queue->tx_xmit_more = 0; } -static u32 phytmac_get_irq_mask(u32 mask) +static u32 phytmac_v2_get_irq_mask(u32 mask) { u32 value = 0; @@ -722,7 +836,7 @@ static u32 phytmac_get_irq_mask(u32 mask) return value; } -static u32 phytmac_get_irq_status(u32 value) +static u32 phytmac_v2_get_irq_status(u32 value) { u32 status = 0; @@ -735,111 +849,124 @@ static u32 phytmac_get_irq_status(u32 value) return status; } -static void phytmac_enable_irq(struct phytmac *pdata, +static void phytmac_v2_enable_irq(struct phytmac *pdata, int queue_index, u32 mask) { u32 value; - value = phytmac_get_irq_mask(mask); + value = phytmac_v2_get_irq_mask(mask); PHYTMAC_WRITE(pdata, PHYTMAC_INT_ER(queue_index), value); } -static void phytmac_disable_irq(struct phytmac *pdata, +static void phytmac_v2_disable_irq(struct phytmac *pdata, int queue_index, u32 mask) { u32 value; - value = phytmac_get_irq_mask(mask); + value = phytmac_v2_get_irq_mask(mask); PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(queue_index), value); } -static void phytmac_clear_irq(struct phytmac *pdata, +static void phytmac_v2_clear_irq(struct phytmac *pdata, int queue_index, u32 mask) { u32 value; - value = phytmac_get_irq_mask(mask); + value = phytmac_v2_get_irq_mask(mask); PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(queue_index), value); } -static unsigned int phytmac_get_irq(struct phytmac *pdata, int queue_index) +static unsigned int phytmac_v2_get_irq(struct phytmac *pdata, int queue_index) { u32 status; u32 value; value = PHYTMAC_READ(pdata, PHYTMAC_INT_SR(queue_index)); - status = phytmac_get_irq_status(value); + status = phytmac_v2_get_irq_status(value); return status; } -static void phytmac_interface_config(struct phytmac *pdata, unsigned int mode, +static void phytmac_v2_interface_config(struct phytmac *pdata, unsigned int mode, const struct phylink_link_state *state) { struct phytmac_interface_info para; u16 cmd_id, cmd_subid; + u8 autoneg = 0; + + if (state->interface == PHY_INTERFACE_MODE_SGMII) { + if (mode == MLO_AN_FIXED) + autoneg = 0; + else + autoneg = 1; + } + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + autoneg = 1; + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + autoneg = 0; memset(¶, 0, sizeof(para)); cmd_id = PHYTMAC_MSG_CMD_SET; - cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_CONFIG; - para.interface = state->interface; - para.autoneg = (mode == MLO_AN_FIXED ? 0 : 1); + cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_MAC_CONFIG; + para.interface = pdata->phytmac_v2_interface; + para.autoneg = autoneg; para.speed = state->speed; para.duplex = state->duplex; pdata->autoneg = para.autoneg; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); } -static int phytmac_interface_linkup(struct phytmac *pdata, phy_interface_t interface, +static int phytmac_v2_interface_linkup(struct phytmac *pdata, phy_interface_t interface, int speed, int duplex) { struct phytmac_interface_info para; u16 cmd_id, cmd_subid; + if (interface == PHY_INTERFACE_MODE_SGMII) { + if (speed == SPEED_2500) + pdata->autoneg = 0; + } + memset(¶, 0, sizeof(para)); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_LINK_CONFIG; - para.interface = interface; + para.interface = pdata->phytmac_v2_interface; para.duplex = duplex; para.speed = speed; para.autoneg = pdata->autoneg; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); return 0; } -static int phytmac_interface_linkdown(struct phytmac *pdata) +static int phytmac_v2_interface_linkdown(struct phytmac *pdata) { return 0; } -static int phytmac_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, +static int phytmac_v2_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, int speed, int duplex) { - struct phytmac_interface_info para; u16 cmd_id, cmd_subid; if (interface == PHY_INTERFACE_MODE_USXGMII || interface == PHY_INTERFACE_MODE_10GBASER) { - memset(¶, 0, sizeof(para)); cmd_id = PHYTMAC_MSG_CMD_SET; cmd_subid = PHYTMAC_MSG_CMD_SET_PCS_LINK_UP; - para.interface = interface; - para.duplex = duplex; - para.speed = speed; - para.autoneg = 0; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); } return 0; } -static int phytmac_pcs_linkdown(struct phytmac *pdata) +static int phytmac_v2_pcs_linkdown(struct phytmac *pdata) { return 0; } -static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, phy_interface_t interface) +static unsigned int phytmac_v2_pcs_get_link(struct phytmac *pdata, phy_interface_t interface) { if (interface == PHY_INTERFACE_MODE_SGMII || interface == PHY_INTERFACE_MODE_1000BASEX || @@ -852,7 +979,7 @@ static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, phy_interface_t return 0; } -static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, +static unsigned int phytmac_v2_tx_map_desc(struct phytmac_queue *queue, u32 tx_tail, struct packet_info *packet) { unsigned int i, ctrl; @@ -896,7 +1023,7 @@ static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, return 0; } -static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, +static void phytmac_v2_init_rx_map_desc(struct phytmac_queue *queue, u32 index) { struct phytmac_dma_desc *desc; @@ -909,7 +1036,7 @@ static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, desc->desc0 |= PHYTMAC_BIT(RXUSED); } -static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, u32 index, dma_addr_t addr) +static unsigned int phytmac_v2_rx_map_desc(struct phytmac_queue *queue, u32 index, dma_addr_t addr) { struct phytmac *pdata = queue->pdata; struct phytmac_dma_desc *desc; @@ -922,32 +1049,51 @@ static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, u32 index, desc->desc1 = 0; desc->desc2 = upper_32_bits(addr); /* Make newly descriptor to hardware */ - dma_wmb(); + if (!(pdata->capacities & PHYTMAC_CAPS_RXPTR)) + dma_wmb(); desc->desc0 = lower_32_bits(addr); } else { desc->desc1 = 0; /* make newly descriptor to hardware */ - dma_wmb(); + if (!(pdata->capacities & PHYTMAC_CAPS_RXPTR)) + dma_wmb(); desc->desc0 &= ~PHYTMAC_BIT(RXUSED); } return 0; } -static unsigned int phytmac_zero_rx_desc_addr(struct phytmac_dma_desc *desc) +static unsigned int phytmac_v2_zero_rx_desc_addr(struct phytmac_dma_desc *desc) { desc->desc2 = 0; - desc->desc0 = PHYTMAC_BIT(RXUSED); + desc->desc0 = (desc->desc0 & PHYTMAC_BIT(RXTSVALID)) | PHYTMAC_BIT(RXUSED); return 0; } -static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) +static unsigned int phytmac_v2_zero_tx_desc(struct phytmac_dma_desc *desc) +{ + desc->desc2 = 0; + desc->desc0 = 0; + desc->desc1 &= ~PHYTMAC_BIT(TXUSED); + + return 0; +} + +static void phytmac_v2_update_rx_tail(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + if (pdata->capacities & PHYTMAC_CAPS_RXPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_RX_PTR(queue->index), queue->rx_head); +} + +static int phytmac_v2_tx_complete(const struct phytmac_dma_desc *desc) { return PHYTMAC_GET_BITS(desc->desc1, TXUSED); } -static bool phytmac_rx_complete(const struct phytmac_dma_desc *desc) +static bool phytmac_v2_rx_complete(const struct phytmac_dma_desc *desc) { dma_addr_t addr; bool used; @@ -962,7 +1108,7 @@ static bool phytmac_rx_complete(const struct phytmac_dma_desc *desc) return false; } -static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) +static int phytmac_v2_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) { if (pdata->capacities & PHYTMAC_CAPS_JUMBO) return desc->desc1 & PHYTMAC_RXJFRMLEN_MASK; @@ -970,7 +1116,7 @@ static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_de return desc->desc1 & PHYTMAC_RXFRMLEN_MASK; } -static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) +static bool phytmac_v2_rx_checksum(const struct phytmac_dma_desc *desc) { u32 value = desc->desc1; u32 check = value >> PHYTMAC_RXCSUM_INDEX & 0x3; @@ -978,28 +1124,28 @@ static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) return (check == PHYTMAC_RXCSUM_IP_TCP || check == PHYTMAC_RXCSUM_IP_UDP); } -static bool phytmac_rx_single_buffer(const struct phytmac_dma_desc *desc) +static bool phytmac_v2_rx_single_buffer(const struct phytmac_dma_desc *desc) { u32 value = desc->desc1; return ((value & PHYTMAC_BIT(RXSOF)) && (value & PHYTMAC_BIT(RXEOF))); } -static bool phytmac_rx_sof(const struct phytmac_dma_desc *desc) +static bool phytmac_v2_rx_sof(const struct phytmac_dma_desc *desc) { u32 value = desc->desc1; return (value & PHYTMAC_BIT(RXSOF)); } -static bool phytmac_rx_eof(const struct phytmac_dma_desc *desc) +static bool phytmac_v2_rx_eof(const struct phytmac_dma_desc *desc) { u32 value = desc->desc1; return (value & PHYTMAC_BIT(RXEOF)); } -static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) +static void phytmac_v2_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) { unsigned int frag; unsigned int tmp = end; @@ -1014,7 +1160,7 @@ static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int en } } -static void phytmac_clear_tx_desc(struct phytmac_queue *queue) +static void phytmac_v2_clear_tx_desc(struct phytmac_queue *queue) { struct phytmac *pdata = queue->pdata; struct phytmac_dma_desc *desc = NULL; @@ -1031,10 +1177,10 @@ static void phytmac_clear_tx_desc(struct phytmac_queue *queue) desc->desc1 = PHYTMAC_BIT(TXUSED); } desc->desc1 |= PHYTMAC_BIT(TXWRAP); - PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_PTR(queue->index), queue->tx_tail); } -static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) +static void phytmac_v2_get_time(struct phytmac *pdata, struct timespec64 *ts) { u32 ns, secl, sech; @@ -1046,7 +1192,7 @@ static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) ts->tv_sec = (((u64)sech << 32) | secl) & TIMER_SEC_MAX_VAL; } -void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) +static void phytmac_v2_set_time(struct phytmac *pdata, time64_t sec, long nsec) { u32 secl, sech; @@ -1059,7 +1205,7 @@ void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_NSEC, nsec); } -void phytmac_clear_time(struct phytmac *pdata) +static void phytmac_v2_clear_time(struct phytmac *pdata) { u32 value; @@ -1077,7 +1223,7 @@ void phytmac_clear_time(struct phytmac *pdata) PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, 0); } -int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) +static int phytmac_v2_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) { u16 cmd_id, cmd_subid; struct phytmac_ts_config para; @@ -1087,12 +1233,12 @@ int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) para.tx_mode = ctrl->tx_control; para.rx_mode = ctrl->rx_control; para.one_step = ctrl->one_step; - phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + phytmac_v2_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); return 0; } -static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) +static int phytmac_v2_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) { u32 value; @@ -1103,17 +1249,17 @@ static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) return 0; } -static void phytmac_ptp_init_hw(struct phytmac *pdata) +static void phytmac_v2_ptp_init_hw(struct phytmac *pdata) { struct timespec64 ts; ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); - phytmac_set_time(pdata, ts.tv_sec, ts.tv_nsec); + phytmac_v2_set_time(pdata, ts.tv_sec, ts.tv_nsec); - phytmac_set_tsincr(pdata, &pdata->ts_incr); + phytmac_v2_set_tsincr(pdata, &pdata->ts_incr); } -static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) +static int phytmac_v2_adjust_fine(struct phytmac *pdata, long ppm, bool negative) { struct ts_incr ts_incr; u32 tmp; @@ -1133,22 +1279,25 @@ static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) & ((1 << PHYTMAC_INCR_NSEC_WIDTH) - 1); ts_incr.sub_ns = adj & ((1 << PHYTMAC_INCR_SNSEC_WIDTH) - 1); - phytmac_set_tsincr(pdata, &ts_incr); + phytmac_v2_set_tsincr(pdata, &ts_incr); return 0; } -int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) +static int phytmac_v2_adjust_time(struct phytmac *pdata, s64 delta, int neg) { u32 adj; if (delta > PHYTMAC_ASEC_MAX) { struct timespec64 now, then; - then = ns_to_timespec64(delta); - phytmac_get_time(pdata, &now); + if (neg) + then = ns_to_timespec64(-delta); + else + then = ns_to_timespec64(delta); + phytmac_v2_get_time(pdata, &now); now = timespec64_add(now, then); - phytmac_set_time(pdata, now.tv_sec, now.tv_nsec); + phytmac_v2_set_time(pdata, now.tv_sec, now.tv_nsec); } else { adj = (neg << PHYTMAC_AADD_INDEX) | delta; PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, adj); @@ -1157,7 +1306,7 @@ int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) return 0; } -static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) +static int phytmac_v2_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) { int ts_valid = 0; @@ -1168,7 +1317,7 @@ static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc return ts_valid; } -static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) +static void phytmac_v2_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) { struct timespec64 ts2; @@ -1176,7 +1325,7 @@ static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct PHYTMAC_GET_BITS(ts_1, TS_SECL); ts->tv_nsec = PHYTMAC_GET_BITS(ts_1, TS_NSEC); - phytmac_get_time(pdata, &ts2); + phytmac_v2_get_time(pdata, &ts2); if (((ts->tv_sec ^ ts2.tv_sec) & (PHYTMAC_TS_SEC_TOP >> 1)) != 0) ts->tv_sec -= PHYTMAC_TS_SEC_TOP; @@ -1184,80 +1333,83 @@ static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct ts->tv_sec += (ts2.tv_sec & (~PHYTMAC_TS_SEC_MASK)); } -static unsigned int phytmac_get_ts_rate(struct phytmac *pdata) +static unsigned int phytmac_v2_get_ts_rate(struct phytmac *pdata) { return 300000000; } struct phytmac_hw_if phytmac_2p0_hw = { - .init_msg_ring = phytmac_init_msg_ring, - .reset_hw = phytmac_reset_hw, - .init_hw = phytmac_init_hw, - .init_ring_hw = phytmac_init_ring_hw, - .get_feature = phytmac_get_feature_all, - .get_regs = phytmac_get_regs, - .get_stats = phytmac_get_hw_stats, - .set_mac_address = phytmac_set_mac_addr, - .get_mac_address = phytmac_get_mac_addr, - .mdio_read = phytmac_mdio_data_read_c22, - .mdio_write = phytmac_mdio_data_write_c22, - .mdio_read_c45 = phytmac_mdio_data_read_c45, - .mdio_write_c45 = phytmac_mdio_data_write_c45, - .poweron = phytmac_powerup_hw, - .set_wol = phytmac_set_wake, - .enable_promise = phytmac_enable_promise, - .enable_multicast = phytmac_enable_multicast, - .set_hash_table = phytmac_set_mc_hash, - .enable_rx_csum = phytmac_enable_rxcsum, - .enable_tx_csum = phytmac_enable_txcsum, - .enable_mdio_control = phytmac_enable_mdio, - .enable_autoneg = phytmac_enable_autoneg, - .enable_pause = phytmac_enable_pause, - .enable_network = phytmac_enable_network, - .add_fdir_entry = phytmac_add_fdir_entry, - .del_fdir_entry = phytmac_del_fdir_entry, + .init_msg_ring = phytmac_v2_init_msg_ring, + .reset_hw = phytmac_v2_reset_hw, + .init_hw = phytmac_v2_init_hw, + .init_ring_hw = phytmac_v2_init_ring_hw, + .get_feature = phytmac_v2_get_feature_all, + .get_regs = phytmac_v2_get_regs, + .get_stats = phytmac_v2_get_hw_stats, + .set_mac_address = phytmac_v2_set_mac_addr, + .get_mac_address = phytmac_v2_get_mac_addr, + .mdio_idle = phytmac_v2_mdio_idle, + .mdio_read = phytmac_v2_mdio_data_read_c22, + .mdio_write = phytmac_v2_mdio_data_write_c22, + .mdio_read_c45 = phytmac_v2_mdio_data_read_c45, + .mdio_write_c45 = phytmac_v2_mdio_data_write_c45, + .poweron = phytmac_v2_powerup_hw, + .set_wol = phytmac_v2_set_wake, + .enable_promise = phytmac_v2_enable_promise, + .enable_multicast = phytmac_v2_enable_multicast, + .set_hash_table = phytmac_v2_set_mc_hash, + .enable_rx_csum = phytmac_v2_enable_rxcsum, + .enable_tx_csum = phytmac_v2_enable_txcsum, + .enable_mdio_control = phytmac_v2_enable_mdio, + .enable_autoneg = phytmac_v2_enable_autoneg, + .enable_pause = phytmac_v2_enable_pause, + .enable_network = phytmac_v2_enable_network, + .add_fdir_entry = phytmac_v2_add_fdir_entry, + .del_fdir_entry = phytmac_v2_del_fdir_entry, /* mac config */ - .mac_config = phytmac_interface_config, - .mac_linkup = phytmac_interface_linkup, - .mac_linkdown = phytmac_interface_linkdown, - .pcs_linkup = phytmac_pcs_linkup, - .pcs_linkdown = phytmac_pcs_linkdown, - .get_link = phytmac_pcs_get_link, + .mac_config = phytmac_v2_interface_config, + .mac_linkup = phytmac_v2_interface_linkup, + .mac_linkdown = phytmac_v2_interface_linkdown, + .pcs_linkup = phytmac_v2_pcs_linkup, + .pcs_linkdown = phytmac_v2_pcs_linkdown, + .get_link = phytmac_v2_pcs_get_link, /* irq */ - .enable_irq = phytmac_enable_irq, - .disable_irq = phytmac_disable_irq, - .clear_irq = phytmac_clear_irq, - .get_irq = phytmac_get_irq, + .enable_irq = phytmac_v2_enable_irq, + .disable_irq = phytmac_v2_disable_irq, + .clear_irq = phytmac_v2_clear_irq, + .get_irq = phytmac_v2_get_irq, /* tx and rx */ - .tx_map = phytmac_tx_map_desc, - .transmit = phytmac_tx_start, - .tx_complete = phytmac_tx_complete, - .rx_complete = phytmac_rx_complete, - .get_rx_pkt_len = phytmac_rx_pkt_len, - .init_rx_map = phytmac_init_rx_map_desc, - .rx_map = phytmac_rx_map_desc, - .rx_checksum = phytmac_rx_checksum, - .rx_single_buffer = phytmac_rx_single_buffer, - .rx_pkt_start = phytmac_rx_sof, - .rx_pkt_end = phytmac_rx_eof, - .clear_rx_desc = phytmac_clear_rx_desc, - .clear_tx_desc = phytmac_clear_tx_desc, - .zero_rx_desc_addr = phytmac_zero_rx_desc_addr, + .tx_map = phytmac_v2_tx_map_desc, + .transmit = phytmac_v2_tx_start, + .update_rx_tail = phytmac_v2_update_rx_tail, + .tx_complete = phytmac_v2_tx_complete, + .rx_complete = phytmac_v2_rx_complete, + .get_rx_pkt_len = phytmac_v2_rx_pkt_len, + .init_rx_map = phytmac_v2_init_rx_map_desc, + .rx_map = phytmac_v2_rx_map_desc, + .rx_checksum = phytmac_v2_rx_checksum, + .rx_single_buffer = phytmac_v2_rx_single_buffer, + .rx_pkt_start = phytmac_v2_rx_sof, + .rx_pkt_end = phytmac_v2_rx_eof, + .clear_rx_desc = phytmac_v2_clear_rx_desc, + .clear_tx_desc = phytmac_v2_clear_tx_desc, + .zero_rx_desc_addr = phytmac_v2_zero_rx_desc_addr, + .zero_tx_desc = phytmac_v2_zero_tx_desc, /* ptp */ - .init_ts_hw = phytmac_ptp_init_hw, - .set_time = phytmac_set_time, - .clear_time = phytmac_clear_time, - .get_time = phytmac_get_time, - .set_ts_config = phytmac_set_tsmode, - .set_incr = phytmac_set_tsincr, - .adjust_fine = phytmac_adjust_fine, - .adjust_time = phytmac_adjust_time, - .ts_valid = phytmac_ts_valid, - .get_timestamp = phytmac_get_dma_ts, - .get_ts_rate = phytmac_get_ts_rate, + .init_ts_hw = phytmac_v2_ptp_init_hw, + .set_time = phytmac_v2_set_time, + .clear_time = phytmac_v2_clear_time, + .get_time = phytmac_v2_get_time, + .set_ts_config = phytmac_v2_set_tsmode, + .set_incr = phytmac_v2_set_tsincr, + .adjust_fine = phytmac_v2_adjust_fine, + .adjust_time = phytmac_v2_adjust_time, + .ts_valid = phytmac_v2_ts_valid, + .get_timestamp = phytmac_v2_get_dma_ts, + .get_ts_rate = phytmac_v2_get_ts_rate, }; EXPORT_SYMBOL_GPL(phytmac_2p0_hw); diff --git a/drivers/net/ethernet/phytium/phytmac_v2.h b/drivers/net/ethernet/phytium/phytmac_v2.h index 92e4806ac2c1f7f72be6f0f74d34b6624d23800c..c9e159b1eb9b64706961459729e50725c1e39517 100644 --- a/drivers/net/ethernet/phytium/phytmac_v2.h +++ b/drivers/net/ethernet/phytium/phytmac_v2.h @@ -1,11 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2022 - 2025 Phytium Technology Co., Ltd. */ + #ifndef _PHYTMAC_V2_H #define _PHYTMAC_V2_H extern struct phytmac_hw_if phytmac_2p0_hw; +#define PHYTMAC_CMD_PRC_SUCCESS 0x1 #define PHYTMAC_MSG_SRAM_SIZE 4096 -#define MSG_HDR_LEN 8 +#define PHYTMAC_MSG_HDR_LEN 8 +#define PHYTMAC_MSG_PARA_LEN 56 +#define PHYTMAC_READ_REG_NUM_MAX (PHYTMAC_MSG_PARA_LEN / sizeof(u32)) #define PHYTMAC_TX_MSG_HEAD 0x000 #define PHYTMAC_TX_MSG_TAIL 0x004 @@ -13,6 +18,7 @@ extern struct phytmac_hw_if phytmac_2p0_hw; #define PHYTMAC_RX_MSG_TAIL 0x00c #define PHYTMAC_MSG_IMR 0x020 #define PHYTMAC_MSG_ISR 0x02c +#define PHYTMAC_TAILPTR_ENABLE 0x038 #define PHYTMAC_SIZE 0x0048 #define PHYTMAC_NETWORK_STATUS 0x0240 @@ -25,7 +31,8 @@ extern struct phytmac_hw_if phytmac_2p0_hw; #define PHYTMAC_TIMER_SEC 0x0258 #define PHYTMAC_TIMER_NSEC 0x025c #define PHYTMAC_TIMER_ADJUST 0x0260 -#define PHYTMAC_MSG(i) (((i) - 1) * 0x48) +#define PHYTMAC_MSG(i) ((i) * sizeof(struct phytmac_msg_info)) +#define PHYTMAC_OCT_TX 0x400 #define PHYTMAC_MODULE_ID_GMAC 0x60 #define PHYTMAC_FLAGS_MSG_COMP 0x1 @@ -39,6 +46,12 @@ extern struct phytmac_hw_if phytmac_2p0_hw; #define PHYTMAC_MSG_COMPLETE_INDEX 0 #define PHYTMAC_MSG_COMPLETE_WIDTH 1 +/* Bitfields in PHYTMAC_TAILPTR_ENABLE */ +#define PHYTMAC_TXTAIL_EN_INDEX 0 /* Enable tx tail */ +#define PHYTMAC_TXTAIL_EN_WIDTH 1 +#define PHYTMAC_RXTAIL_EN_INDEX 16 /* Enable rx tail */ +#define PHYTMAC_RXTAIL_EN_WIDTH 1 + /* Bitfields in PHYTMAC_SIZE */ #define PHYTMAC_MEM_SIZE_INDEX 0 #define PHYTMAC_MEM_SIZE_WIDTH 4 @@ -83,7 +96,8 @@ extern struct phytmac_hw_if phytmac_2p0_hw; #define TIMER_SEC_MAX_VAL (((u64)1 << PHYTMAC_TIMER_SEC_WIDTH) - 1) #define TIMER_NSEC_MAX_VAL ((1 << PHYTMAC_TIMER_NSEC_WIDTH) - 1) -#define PHYTMAC_TAIL_PTR(i) (0x0100 + ((i) * 4)) +#define PHYTMAC_TX_PTR(i) (0x0100 + ((i) * 4)) +#define PHYTMAC_RX_PTR(i) (0x0030 + ((i) * 4)) #define PHYTMAC_INT_ER(i) (0x0140 + ((i) * 4)) #define PHYTMAC_INT_DR(i) (0x0180 + ((i) * 4)) #define PHYTMAC_INT_MR(i) (0x01c0 + ((i) * 4)) @@ -200,15 +214,42 @@ extern struct phytmac_hw_if phytmac_2p0_hw; #define PHYTMAC_TS_SEC_MASK 0x3f #define PHYTMAC_TS_SEC_TOP 0x40 +/* WOL register */ +#define PHYTMAC_ARP_IP_INDEX 0 +#define PHYTMAC_ARP_IP_WIDTH 16 +#define PHYTMAC_MAGIC_INDEX 16 +#define PHYTMAC_MAGIC_WIDTH 1 +#define PHYTMAC_ARP_INDEX 17 +#define PHYTMAC_ARP_WIDTH 1 +#define PHYTMAC_UCAST_INDEX 18 +#define PHYTMAC_UCAST_WIDTH 1 +#define PHYTMAC_MCAST_INDEX 19 +#define PHYTMAC_MCAST_WIDTH 1 + #define HW_DMA_CAP_64B 0x1 #define HW_DMA_CAP_CSUM 0x2 #define HW_DMA_CAP_PTP 0x4 -#define HW_DMA_CAP_DDW64 0x8 -#define HW_DMA_CAP_DDW128 0x10 +#define HW_DMA_CAP_DDW32 0x8 +#define HW_DMA_CAP_DDW64 0x10 +#define HW_DMA_CAP_DDW128 0x20 +#define PHYTMAC_DBW32 1 #define PHYTMAC_DBW64 2 #define PHYTMAC_DBW128 4 +#define PHYTMAC_CLK_DIV8 0 +#define PHYTMAC_CLK_DIV16 1 +#define PHYTMAC_CLK_DIV32 2 +#define PHYTMAC_CLK_DIV48 3 +#define PHYTMAC_CLK_DIV64 4 +#define PHYTMAC_CLK_DIV96 5 +#define PHYTMAC_CLK_DIV128 6 +#define PHYTMAC_CLK_DIV224 7 + +#define PHYTMAC_RETRY_TIMES 50000 + +#define PHYTMAC_READ_NSR(pdata) PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS) + enum phytmac_msg_cmd_id { PHYTMAC_MSG_CMD_DEFAULT = 0, PHYTMAC_MSG_CMD_SET, @@ -218,7 +259,7 @@ enum phytmac_msg_cmd_id { }; enum phytmac_default_subid { - PHYTMAC_MSG_CMD_DEFAULT_RESET_HW = 0, + PHYTMAC_MSG_CMD_DEFAULT_RESET_HW = 1, PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE, PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE, }; @@ -228,7 +269,7 @@ enum phytmac_set_subid { PHYTMAC_MSG_CMD_SET_INIT_RING = 1, PHYTMAC_MSG_CMD_SET_INIT_TX_RING = 2, PHYTMAC_MSG_CMD_SET_INIT_RX_RING = 3, - PHYTMAC_MSG_CMD_SET_MAC_CONFIG = 4, + PHYTMAC_MSG_CMD_SET_INIT_MAC_CONFIG = 4, PHYTMAC_MSG_CMD_SET_ADDR = 5, PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE = 6, PHYTMAC_MSG_CMD_SET_DMA = 7, @@ -269,6 +310,13 @@ enum phytmac_set_subid { PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG = 42, PHYTMAC_MSG_CMD_SET_RX_DATA_OFFSET = 43, PHYTMAC_MSG_CMD_SET_WOL = 44, + PHYTMAC_MSG_CMD_SET_ENABLE_RSC = 45, + PHYTMAC_MSG_CMD_SET_DISABLE_RSC = 46, + PHYTMAC_MSG_CMD_SET_ENABLE_TX_START = 47, + PHYTMAC_MSG_CMD_SET_ENABLE_PCS_RESET = 48, + PHYTMAC_MSG_CMD_SET_DISABLE_PCS_RESET = 49, + PHYTMAC_MSG_CMD_SET_MDC = 50, + PHYTMAC_MSG_CMD_SET_OUTSTANDING = 51, }; enum phytmac_get_subid { @@ -278,6 +326,8 @@ enum phytmac_get_subid { PHYTMAC_MSG_CMD_GET_BD_PREFETCH, PHYTMAC_MSG_CMD_GET_STATS, PHYTMAC_MSG_CMD_GET_REG_READ, + PHYTMAC_MSG_CMD_GET_RX_FLOW, + PHYTMAC_MSG_CMD_GET_REGS_FOR_ETHTOOL, }; struct phytmac_interface_info { @@ -292,6 +342,11 @@ struct phytmac_mc_info { u32 mc_top; } __packed; +struct phytmac_reg_info { + u32 offset; + u16 regnum; +} __packed; + struct phytmac_fdir_info { u32 ip4src; u32 ip4dst; @@ -351,12 +406,36 @@ struct phytmac_feature { u8 max_rx_fs; } __packed; +struct phytmac_wol { + u32 wol_type; + u8 wake; +} __packed; + struct phytmac_msg_info { - u16 module_id; - u16 cmd_id; - u16 cmd_subid; - u16 flags; - u8 para[64]; + u8 reserved; + u8 seq; + u8 cmd_type; + u8 cmd_subid; + u16 len; + u8 status1; + u8 status0; + u8 para[PHYTMAC_MSG_PARA_LEN]; +} __packed; + +struct phytmac_ots_config { + u32 axi_rd; + u32 axi_wr; + u8 queuenum; } __packed; +struct phytmac_ethtool_reg { + u8 interface; + u8 cnt; +} __packed; + +static inline unsigned int phytmac_v2_tx_ring_wrap(struct phytmac *pdata, unsigned int index) +{ + return index & (pdata->msg_ring.tx_msg_ring_size - 1); +} + #endif diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c index 7a11fdb687cc411d5144a75e212ec426cd327476..d0763fc364980484e092fec69b79609728fb2579 100644 --- a/drivers/net/phy/motorcomm.c +++ b/drivers/net/phy/motorcomm.c @@ -1,968 +1,543 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Motorcomm 8511/8521/8531/8531S PHY driver. + * drivers/net/phy/motorcomm.c * - * Author: Peter Geis - * Author: Frank + * Driver for Motorcomm PHYs + * + * Author: yinghong.zhang + * + * Copyright (c) 2024 Motorcomm, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Support Motorcomm Phys: + * Giga phys: yt8511, yt8521, yt8531, yt8543, yt8614, yt8618 + * 100/10M Phys: yt8510, yt8512, yt8522 + * Automotive 100M Phys: yt8010 + * Automotive 1000M Phys: yt8011 */ -#include #include #include #include #include - -#define PHY_ID_YT8511 0x0000010a -#define PHY_ID_YT8521 0x0000011a -#define PHY_ID_YT8531 0x4f51e91b -#define PHY_ID_YT8531S 0x4f51e91a - -/* YT8521/YT8531S Register Overview - * UTP Register space | FIBER Register space - * ------------------------------------------------------------ - * | UTP MII | FIBER MII | - * | UTP MMD | | - * | UTP Extended | FIBER Extended | - * ------------------------------------------------------------ - * | Common Extended | - * ------------------------------------------------------------ - */ - -/* 0x10 ~ 0x15 , 0x1E and 0x1F are common MII registers of yt phy */ - -/* Specific Function Control Register */ -#define YTPHY_SPECIFIC_FUNCTION_CONTROL_REG 0x10 - -/* 2b00 Manual MDI configuration - * 2b01 Manual MDIX configuration - * 2b10 Reserved - * 2b11 Enable automatic crossover for all modes *default* - */ -#define YTPHY_SFCR_MDI_CROSSOVER_MODE_MASK (BIT(6) | BIT(5)) -#define YTPHY_SFCR_CROSSOVER_EN BIT(3) -#define YTPHY_SFCR_SQE_TEST_EN BIT(2) -#define YTPHY_SFCR_POLARITY_REVERSAL_EN BIT(1) -#define YTPHY_SFCR_JABBER_DIS BIT(0) - -/* Specific Status Register */ -#define YTPHY_SPECIFIC_STATUS_REG 0x11 -#define YTPHY_SSR_SPEED_MODE_OFFSET 14 - -#define YTPHY_SSR_SPEED_MODE_MASK (BIT(15) | BIT(14)) -#define YTPHY_SSR_SPEED_10M 0x0 -#define YTPHY_SSR_SPEED_100M 0x1 -#define YTPHY_SSR_SPEED_1000M 0x2 -#define YTPHY_SSR_DUPLEX_OFFSET 13 -#define YTPHY_SSR_DUPLEX BIT(13) -#define YTPHY_SSR_PAGE_RECEIVED BIT(12) -#define YTPHY_SSR_SPEED_DUPLEX_RESOLVED BIT(11) -#define YTPHY_SSR_LINK BIT(10) -#define YTPHY_SSR_MDIX_CROSSOVER BIT(6) -#define YTPHY_SSR_DOWNGRADE BIT(5) -#define YTPHY_SSR_TRANSMIT_PAUSE BIT(3) -#define YTPHY_SSR_RECEIVE_PAUSE BIT(2) -#define YTPHY_SSR_POLARITY BIT(1) -#define YTPHY_SSR_JABBER BIT(0) - -/* Interrupt enable Register */ -#define YTPHY_INTERRUPT_ENABLE_REG 0x12 -#define YTPHY_IER_WOL BIT(6) - -/* Interrupt Status Register */ -#define YTPHY_INTERRUPT_STATUS_REG 0x13 -#define YTPHY_ISR_AUTONEG_ERR BIT(15) -#define YTPHY_ISR_SPEED_CHANGED BIT(14) -#define YTPHY_ISR_DUPLEX_CHANGED BIT(13) -#define YTPHY_ISR_PAGE_RECEIVED BIT(12) -#define YTPHY_ISR_LINK_FAILED BIT(11) -#define YTPHY_ISR_LINK_SUCCESSED BIT(10) -#define YTPHY_ISR_WOL BIT(6) -#define YTPHY_ISR_WIRESPEED_DOWNGRADE BIT(5) -#define YTPHY_ISR_SERDES_LINK_FAILED BIT(3) -#define YTPHY_ISR_SERDES_LINK_SUCCESSED BIT(2) -#define YTPHY_ISR_POLARITY_CHANGED BIT(1) -#define YTPHY_ISR_JABBER_HAPPENED BIT(0) - -/* Speed Auto Downgrade Control Register */ -#define YTPHY_SPEED_AUTO_DOWNGRADE_CONTROL_REG 0x14 -#define YTPHY_SADCR_SPEED_DOWNGRADE_EN BIT(5) - -/* If these bits are set to 3, the PHY attempts five times ( 3(set value) + - * additional 2) before downgrading, default 0x3 +#include +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +/* for wol feature */ +#include + +#include +#define MODULE_NAME "yt" +#define PHYDRV_VER "phydrv_ver" +static struct dentry *dir; +#include +#define YTPHY_LINUX_VERSION "2.2.42319" + +/**************** configuration section begin ************/ +/* if system depends on ethernet packet to restore from sleep, + * please define this macro to 1 otherwise, define it to 0. */ -#define YTPHY_SADCR_SPEED_RETRY_LIMIT (0x3 << 2) - -/* Rx Error Counter Register */ -#define YTPHY_RX_ERROR_COUNTER_REG 0x15 - -/* Extended Register's Address Offset Register */ -#define YTPHY_PAGE_SELECT 0x1E +#define SYS_WAKEUP_BASED_ON_ETH_PKT 1 -/* Extended Register's Data Register */ -#define YTPHY_PAGE_DATA 0x1F - -/* FIBER Auto-Negotiation link partner ability */ -#define YTPHY_FLPA_PAUSE (0x3 << 7) -#define YTPHY_FLPA_ASYM_PAUSE (0x2 << 7) - -#define YT8511_PAGE_SELECT 0x1e -#define YT8511_PAGE 0x1f -#define YT8511_EXT_CLK_GATE 0x0c -#define YT8511_EXT_DELAY_DRIVE 0x0d -#define YT8511_EXT_SLEEP_CTRL 0x27 - -/* 2b00 25m from pll - * 2b01 25m from xtl *default* - * 2b10 62.m from pll - * 2b11 125m from pll +/* to enable system WOL feature of phy, please define this macro to 1 + * otherwise, define it to 0. */ -#define YT8511_CLK_125M (BIT(2) | BIT(1)) -#define YT8511_PLLON_SLP BIT(14) - -/* RX Delay enabled = 1.8ns 1000T, 8ns 10/100T */ -#define YT8511_DELAY_RX BIT(0) +#define YTPHY_WOL_FEATURE_ENABLE 0 -/* TX Gig-E Delay is bits 7:4, default 0x5 - * TX Fast-E Delay is bits 15:12, default 0xf - * Delay = 150ps * N - 250ps - * On = 2000ps, off = 50ps - */ -#define YT8511_DELAY_GE_TX_EN (0xf << 4) -#define YT8511_DELAY_GE_TX_DIS (0x2 << 4) -#define YT8511_DELAY_FE_TX_EN (0xf << 12) -#define YT8511_DELAY_FE_TX_DIS (0x2 << 12) - -/* Extended register is different from MMD Register and MII Register. - * We can use ytphy_read_ext/ytphy_write_ext/ytphy_modify_ext function to - * operate extended register. - * Extended Register start +/* some GMAC need clock input from PHY, for eg., 125M, + * please enable this macro + * by degault, it is set to 0 + * NOTE: this macro will need macro SYS_WAKEUP_BASED_ON_ETH_PKT to set to 1 */ +#define GMAC_CLOCK_INPUT_NEEDED 0 + +/* for YT8531 package A xtal init config */ +#define YTPHY8531A_XTAL_INIT (0) + +/**** configuration section end ************/ + +/* no need to change below */ +#define MOTORCOMM_PHY_ID_MASK 0xffffffff + +#define PHY_ID_YT8010 0x00000309 +#define PHY_ID_YT8010AS 0x4f51eb19 +#define PHY_ID_YT8011 0x4f51eb01 +#define PHY_ID_YT8510 0x00000109 +#define PHY_ID_YT8511 0x0000010a +#define PHY_ID_YT8512 0x00000128 +#define PHY_ID_YT8522 0x4f51e928 +#define PHY_ID_YT8521 0x0000011a +#define PHY_ID_YT8531S 0x4f51e91a +#define PHY_ID_YT8531 0x4f51e91b +/* YT8543 phy driver disable(default) */ +/* #define YTPHY_YT8543_ENABLE */ +#ifdef YTPHY_YT8543_ENABLE +#define PHY_ID_YT8543 0x0008011b +#endif +#define PHY_ID_YT8614 0x4f51e899 +#define PHY_ID_YT8614Q 0x4f51e8a9 +#define PHY_ID_YT8618 0x4f51e889 +#define PHY_ID_YT8821 0x4f51ea19 + +#define REG_PHY_SPEC_STATUS 0x11 +#define REG_DEBUG_ADDR_OFFSET 0x1e +#define REG_DEBUG_DATA 0x1f +#define REG_MII_MMD_CTRL 0x0D +#define REG_MII_MMD_DATA 0x0E + +#define YT8512_EXTREG_LED0 0x40c0 +#define YT8512_EXTREG_LED1 0x40c3 + +#define YT8512_EXTREG_SLEEP_CONTROL1 0x2027 +#define YT8512_EXTENDED_COMBO_CONTROL1 0x4000 +#define YT8512_10BT_DEBUG_LPBKS 0x200A + +#define YT_SOFTWARE_RESET 0x8000 + +#define YT8512_LED0_ACT_BLK_IND 0x1000 +#define YT8512_LED0_DIS_LED_AN_TRY 0x0001 +#define YT8512_LED0_BT_BLK_EN 0x0002 +#define YT8512_LED0_HT_BLK_EN 0x0004 +#define YT8512_LED0_COL_BLK_EN 0x0008 +#define YT8512_LED0_BT_ON_EN 0x0010 +#define YT8512_LED1_BT_ON_EN 0x0010 +#define YT8512_LED1_TXACT_BLK_EN 0x0100 +#define YT8512_LED1_RXACT_BLK_EN 0x0200 +#define YT8512_EN_SLEEP_SW_BIT 15 + +#define YT8522_TX_CLK_DELAY 0x4210 +#define YT8522_ANAGLOG_IF_CTRL 0x4008 +#define YT8522_DAC_CTRL 0x2057 +#define YT8522_INTERPOLATOR_FILTER_1 0x14 +#define YT8522_INTERPOLATOR_FILTER_2 0x15 +#define YT8522_EXTENDED_COMBO_CTRL_1 0x4000 +#define YT8522_TX_DELAY_CONTROL 0x19 +#define YT8522_EXTENDED_PAD_CONTROL 0x4001 + +#define YT8521_EXTREG_SLEEP_CONTROL1 0x27 +#define YT8521_EN_SLEEP_SW_BIT 15 + +#define YTXXXX_SPEED_MODE 0xc000 +#define YTXXXX_DUPLEX 0x2000 +#define YTXXXX_SPEED_MODE_BIT 14 +#define YTXXXX_DUPLEX_BIT 13 +#define YTXXXX_AUTO_NEGOTIATION_BIT 12 +#define YTXXXX_ASYMMETRIC_PAUSE_BIT 11 +#define YTXXXX_PAUSE_BIT 10 +#define YTXXXX_LINK_STATUS_BIT 10 + +#define YT8821_SDS_ASYMMETRIC_PAUSE_BIT 8 +#define YT8821_SDS_PAUSE_BIT 7 + +/* based on yt8521 wol feature config register */ +#define YTPHY_UTP_INTR_REG 0x12 +/* WOL Feature Event Interrupt Enable */ +#define YTPHY_WOL_FEATURE_INTR BIT(6) + +/* Magic Packet MAC address registers */ +#define YTPHY_WOL_FEATURE_MACADDR2_4_MAGIC_PACKET 0xa007 +#define YTPHY_WOL_FEATURE_MACADDR1_4_MAGIC_PACKET 0xa008 +#define YTPHY_WOL_FEATURE_MACADDR0_4_MAGIC_PACKET 0xa009 + +#define YTPHY_WOL_FEATURE_REG_CFG 0xa00a +#define YTPHY_WOL_FEATURE_TYPE_CFG BIT(0) +#define YTPHY_WOL_FEATURE_ENABLE_CFG BIT(3) +#define YTPHY_WOL_FEATURE_INTR_SEL_CFG BIT(6) +#define YTPHY_WOL_FEATURE_WIDTH1_CFG BIT(1) +#define YTPHY_WOL_FEATURE_WIDTH2_CFG BIT(2) + +#define YTPHY_REG_SPACE_UTP 0 +#define YTPHY_REG_SPACE_FIBER 2 + +#define YTPHY_REG_SMI_MUX 0xa000 +#define YT8614_REG_SPACE_UTP 0 +#define YT8614_REG_SPACE_QSGMII 2 +#define YT8614_REG_SPACE_SGMII 3 + +enum ytphy_wol_feature_trigger_type_e { + YTPHY_WOL_FEATURE_PULSE_TRIGGER, + YTPHY_WOL_FEATURE_LEVEL_TRIGGER, + YTPHY_WOL_FEATURE_TRIGGER_TYPE_MAX +}; -/* Phy gmii clock gating Register */ -#define YT8521_CLOCK_GATING_REG 0xC -#define YT8521_CGR_RX_CLK_EN BIT(12) +enum ytphy_wol_feature_pulse_width_e { + YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_336MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_168MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_84MS_PULSE_WIDTH, + YTPHY_WOL_FEATURE_PULSE_WIDTH_MAX +}; -#define YT8521_EXTREG_SLEEP_CONTROL1_REG 0x27 -#define YT8521_ESC1R_SLEEP_SW BIT(15) -#define YT8521_ESC1R_PLLON_SLP BIT(14) +struct ytphy_wol_feature_cfg { + bool enable; + int type; + int width; +}; -/* Phy fiber Link timer cfg2 Register */ -#define YT8521_LINK_TIMER_CFG2_REG 0xA5 -#define YT8521_LTCR_EN_AUTOSEN BIT(15) +#if (YTPHY_WOL_FEATURE_ENABLE) +#undef SYS_WAKEUP_BASED_ON_ETH_PKT +#define SYS_WAKEUP_BASED_ON_ETH_PKT 1 +#endif -/* 0xA000, 0xA001, 0xA003, 0xA006 ~ 0xA00A and 0xA012 are common ext registers - * of yt8521 phy. There is no need to switch reg space when operating these - * registers. - */ +struct yt8xxx_priv { + u8 polling_mode; + u8 chip_mode; +}; -#define YT8521_REG_SPACE_SELECT_REG 0xA000 -#define YT8521_RSSR_SPACE_MASK BIT(1) -#define YT8521_RSSR_FIBER_SPACE (0x1 << 1) -#define YT8521_RSSR_UTP_SPACE (0x0 << 1) -#define YT8521_RSSR_TO_BE_ARBITRATED (0xFF) +/* polling mode */ +#define YT_PHY_MODE_FIBER 1 /* fiber mode only */ +#define YT_PHY_MODE_UTP 2 /* utp mode only */ +#define YT_PHY_MODE_POLL (YT_PHY_MODE_FIBER | YT_PHY_MODE_UTP) -#define YT8521_CHIP_CONFIG_REG 0xA001 -#define YT8521_CCR_SW_RST BIT(15) -#define YT8531_RGMII_LDO_VOL_MASK GENMASK(5, 4) -#define YT8531_LDO_VOL_3V3 0x0 -#define YT8531_LDO_VOL_1V8 0x2 +static int ytxxxx_soft_reset(struct phy_device *phydev); +static int yt861x_soft_reset_paged(struct phy_device *phydev, int reg_space); +static int yt861x_aneg_done_paged(struct phy_device *phydev, int reg_space); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) +static int ytphy_config_init(struct phy_device *phydev) +{ + int val; -/* 1b0 disable 1.9ns rxc clock delay *default* - * 1b1 enable 1.9ns rxc clock delay - */ -#define YT8521_CCR_RXC_DLY_EN BIT(8) -#define YT8521_CCR_RXC_DLY_1_900_NS 1900 - -#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0)) -#define YT8521_CCR_MODE_UTP_TO_RGMII 0 -#define YT8521_CCR_MODE_FIBER_TO_RGMII 1 -#define YT8521_CCR_MODE_UTP_FIBER_TO_RGMII 2 -#define YT8521_CCR_MODE_UTP_TO_SGMII 3 -#define YT8521_CCR_MODE_SGPHY_TO_RGMAC 4 -#define YT8521_CCR_MODE_SGMAC_TO_RGPHY 5 -#define YT8521_CCR_MODE_UTP_TO_FIBER_AUTO 6 -#define YT8521_CCR_MODE_UTP_TO_FIBER_FORCE 7 - -/* 3 phy polling modes,poll mode combines utp and fiber mode*/ -#define YT8521_MODE_FIBER 0x1 -#define YT8521_MODE_UTP 0x2 -#define YT8521_MODE_POLL 0x3 - -#define YT8521_RGMII_CONFIG1_REG 0xA003 -/* 1b0 use original tx_clk_rgmii *default* - * 1b1 use inverted tx_clk_rgmii. - */ -#define YT8521_RC1R_TX_CLK_SEL_INVERTED BIT(14) -#define YT8521_RC1R_RX_DELAY_MASK GENMASK(13, 10) -#define YT8521_RC1R_FE_TX_DELAY_MASK GENMASK(7, 4) -#define YT8521_RC1R_GE_TX_DELAY_MASK GENMASK(3, 0) -#define YT8521_RC1R_RGMII_0_000_NS 0 -#define YT8521_RC1R_RGMII_0_150_NS 1 -#define YT8521_RC1R_RGMII_0_300_NS 2 -#define YT8521_RC1R_RGMII_0_450_NS 3 -#define YT8521_RC1R_RGMII_0_600_NS 4 -#define YT8521_RC1R_RGMII_0_750_NS 5 -#define YT8521_RC1R_RGMII_0_900_NS 6 -#define YT8521_RC1R_RGMII_1_050_NS 7 -#define YT8521_RC1R_RGMII_1_200_NS 8 -#define YT8521_RC1R_RGMII_1_350_NS 9 -#define YT8521_RC1R_RGMII_1_500_NS 10 -#define YT8521_RC1R_RGMII_1_650_NS 11 -#define YT8521_RC1R_RGMII_1_800_NS 12 -#define YT8521_RC1R_RGMII_1_950_NS 13 -#define YT8521_RC1R_RGMII_2_100_NS 14 -#define YT8521_RC1R_RGMII_2_250_NS 15 - -#define YTPHY_MISC_CONFIG_REG 0xA006 -#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0) -#define YTPHY_MCR_FIBER_1000BX (0x1 << 0) -#define YTPHY_MCR_FIBER_100FX (0x0 << 0) - -/* WOL MAC ADDR: MACADDR2(highest), MACADDR1(middle), MACADDR0(lowest) */ -#define YTPHY_WOL_MACADDR2_REG 0xA007 -#define YTPHY_WOL_MACADDR1_REG 0xA008 -#define YTPHY_WOL_MACADDR0_REG 0xA009 - -#define YTPHY_WOL_CONFIG_REG 0xA00A -#define YTPHY_WCR_INTR_SEL BIT(6) -#define YTPHY_WCR_ENABLE BIT(3) - -/* 2b00 84ms - * 2b01 168ms *default* - * 2b10 336ms - * 2b11 672ms - */ -#define YTPHY_WCR_PULSE_WIDTH_MASK (BIT(2) | BIT(1)) -#define YTPHY_WCR_PULSE_WIDTH_672MS (BIT(2) | BIT(1)) + val = phy_read(phydev, 3); -/* 1b0 Interrupt and WOL events is level triggered and active LOW *default* - * 1b1 Interrupt and WOL events is pulse triggered and active LOW - */ -#define YTPHY_WCR_TYPE_PULSE BIT(0) - -#define YTPHY_PAD_DRIVE_STRENGTH_REG 0xA010 -#define YT8531_RGMII_RXC_DS_MASK GENMASK(15, 13) -#define YT8531_RGMII_RXD_DS_HI_MASK BIT(12) /* Bit 2 of rxd_ds */ -#define YT8531_RGMII_RXD_DS_LOW_MASK GENMASK(5, 4) /* Bit 1/0 of rxd_ds */ -#define YT8531_RGMII_RX_DS_DEFAULT 0x3 - -#define YTPHY_SYNCE_CFG_REG 0xA012 -#define YT8521_SCR_SYNCE_ENABLE BIT(5) -/* 1b0 output 25m clock - * 1b1 output 125m clock *default* - */ -#define YT8521_SCR_CLK_FRE_SEL_125M BIT(3) -#define YT8521_SCR_CLK_SRC_MASK GENMASK(2, 1) -#define YT8521_SCR_CLK_SRC_PLL_125M 0 -#define YT8521_SCR_CLK_SRC_UTP_RX 1 -#define YT8521_SCR_CLK_SRC_SDS_RX 2 -#define YT8521_SCR_CLK_SRC_REF_25M 3 -#define YT8531_SCR_SYNCE_ENABLE BIT(6) -/* 1b0 output 25m clock *default* - * 1b1 output 125m clock - */ -#define YT8531_SCR_CLK_FRE_SEL_125M BIT(4) -#define YT8531_SCR_CLK_SRC_MASK GENMASK(3, 1) -#define YT8531_SCR_CLK_SRC_PLL_125M 0 -#define YT8531_SCR_CLK_SRC_UTP_RX 1 -#define YT8531_SCR_CLK_SRC_SDS_RX 2 -#define YT8531_SCR_CLK_SRC_CLOCK_FROM_DIGITAL 3 -#define YT8531_SCR_CLK_SRC_REF_25M 4 -#define YT8531_SCR_CLK_SRC_SSC_25M 5 - -/* Extended Register end */ - -#define YTPHY_DTS_OUTPUT_CLK_DIS 0 -#define YTPHY_DTS_OUTPUT_CLK_25M 25000000 -#define YTPHY_DTS_OUTPUT_CLK_125M 125000000 - -struct yt8521_priv { - /* combo_advertising is used for case of YT8521 in combo mode, - * this means that yt8521 may work in utp or fiber mode which depends - * on which media is connected (YT8521_RSSR_TO_BE_ARBITRATED). - */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(combo_advertising); + return 0; +} +#endif - /* YT8521_MODE_FIBER / YT8521_MODE_UTP / YT8521_MODE_POLL*/ - u8 polling_mode; - u8 strap_mode; /* 8 working modes */ - /* current reg page of yt8521 phy: - * YT8521_RSSR_UTP_SPACE - * YT8521_RSSR_FIBER_SPACE - * YT8521_RSSR_TO_BE_ARBITRATED - */ - u8 reg_page; -}; -/** - * ytphy_read_ext() - read a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to read - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns the value of regnum reg or negative error code - */ -static int ytphy_read_ext(struct phy_device *phydev, u16 regnum) +#if (KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE) +static inline void phy_lock_mdio_bus(struct phy_device *phydev) { - int ret; - - ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); - if (ret < 0) - return ret; - - return __phy_read(phydev, YTPHY_PAGE_DATA); +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->bus->mdio_lock); +#else + mutex_lock(&phydev->mdio.bus->mdio_lock); +#endif } -/** - * ytphy_read_ext_with_lock() - read a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to read - * - * returns the value of regnum reg or negative error code - */ -static int ytphy_read_ext_with_lock(struct phy_device *phydev, u16 regnum) +static inline void phy_unlock_mdio_bus(struct phy_device *phydev) { - int ret; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->bus->mdio_lock); +#else + mutex_unlock(&phydev->mdio.bus->mdio_lock); +#endif +} +#endif - phy_lock_mdio_bus(phydev); - ret = ytphy_read_ext(phydev, regnum); - phy_unlock_mdio_bus(phydev); +#if (KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE) +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; + return bus->read(bus, phydev->addr, regnum); +#else + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; +#endif + return bus->read(bus, addr, regnum); +} - return ret; +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct mii_bus *bus = phydev->bus; + int addr = phydev->addr; +#else + struct mii_bus *bus = phydev->mdio.bus; + int addr = phydev->mdio.addr; +#endif + return bus->write(bus, addr, regnum, val); } +#endif -/** - * ytphy_write_ext() - write a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to write - * @val: value to write to @regnum - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 or negative error code - */ -static int ytphy_write_ext(struct phy_device *phydev, u16 regnum, u16 val) +static int __ytphy_read_ext(struct phy_device *phydev, u32 regnum) { int ret; - ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; + + ret = __phy_read(phydev, REG_DEBUG_DATA); if (ret < 0) return ret; - return __phy_write(phydev, YTPHY_PAGE_DATA, val); + return 0; } -/** - * ytphy_write_ext_with_lock() - write a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to write - * @val: value to write to @regnum - * - * returns 0 or negative error code - */ -static int ytphy_write_ext_with_lock(struct phy_device *phydev, u16 regnum, - u16 val) +static int ytphy_read_ext(struct phy_device *phydev, u32 regnum) { int ret; phy_lock_mdio_bus(phydev); - ret = ytphy_write_ext(phydev, regnum, val); + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + goto err_handle; + + ret = __phy_read(phydev, REG_DEBUG_DATA); + if (ret < 0) + goto err_handle; + +err_handle: phy_unlock_mdio_bus(phydev); return ret; } -/** - * ytphy_modify_ext() - bits modify a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to write - * @mask: bit mask of bits to clear - * @set: bit mask of bits to set - * - * NOTE: Convenience function which allows a PHY's extended register to be - * modified as new register value = (old register value & ~mask) | set. - * The caller must have taken the MDIO bus lock. - * - * returns 0 or negative error code - */ -static int ytphy_modify_ext(struct phy_device *phydev, u16 regnum, u16 mask, - u16 set) +static int ytphy_write_ext(struct phy_device *phydev, u32 regnum, u16 val) { int ret; - ret = __phy_write(phydev, YTPHY_PAGE_SELECT, regnum); + phy_lock_mdio_bus(phydev); + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); if (ret < 0) - return ret; - - return __phy_modify(phydev, YTPHY_PAGE_DATA, mask, set); -} + goto err_handle; -/** - * ytphy_modify_ext_with_lock() - bits modify a PHY's extended register - * @phydev: a pointer to a &struct phy_device - * @regnum: register number to write - * @mask: bit mask of bits to clear - * @set: bit mask of bits to set - * - * NOTE: Convenience function which allows a PHY's extended register to be - * modified as new register value = (old register value & ~mask) | set. - * - * returns 0 or negative error code - */ -static int ytphy_modify_ext_with_lock(struct phy_device *phydev, u16 regnum, - u16 mask, u16 set) -{ - int ret; + ret = __phy_write(phydev, REG_DEBUG_DATA, val); + if (ret < 0) + goto err_handle; - phy_lock_mdio_bus(phydev); - ret = ytphy_modify_ext(phydev, regnum, mask, set); +err_handle: phy_unlock_mdio_bus(phydev); return ret; } -/** - * ytphy_get_wol() - report whether wake-on-lan is enabled - * @phydev: a pointer to a &struct phy_device - * @wol: a pointer to a &struct ethtool_wolinfo - * - * NOTE: YTPHY_WOL_CONFIG_REG is common ext reg. - */ -static void ytphy_get_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) +static int __ytphy_write_ext(struct phy_device *phydev, u32 regnum, u16 val) { - int wol_config; + int ret; - wol->supported = WAKE_MAGIC; - wol->wolopts = 0; + ret = __phy_write(phydev, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; - wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); - if (wol_config < 0) - return; + ret = __phy_write(phydev, REG_DEBUG_DATA, val); + if (ret < 0) + return ret; - if (wol_config & YTPHY_WCR_ENABLE) - wol->wolopts |= WAKE_MAGIC; + return 0; } -/** - * ytphy_set_wol() - turn wake-on-lan on or off - * @phydev: a pointer to a &struct phy_device - * @wol: a pointer to a &struct ethtool_wolinfo - * - * NOTE: YTPHY_WOL_CONFIG_REG, YTPHY_WOL_MACADDR2_REG, YTPHY_WOL_MACADDR1_REG - * and YTPHY_WOL_MACADDR0_REG are common ext reg. The - * YTPHY_INTERRUPT_ENABLE_REG of UTP is special, fiber also use this register. - * - * returns 0 or negative errno code - */ -static int ytphy_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) -{ - struct net_device *p_attached_dev; - const u16 mac_addr_reg[] = { - YTPHY_WOL_MACADDR2_REG, - YTPHY_WOL_MACADDR1_REG, - YTPHY_WOL_MACADDR0_REG, - }; - const u8 *mac_addr; - int old_page; - int ret = 0; - u16 mask; - u16 val; - u8 i; - - if (wol->wolopts & WAKE_MAGIC) { - p_attached_dev = phydev->attached_dev; - if (!p_attached_dev) - return -ENODEV; - - mac_addr = (const u8 *)p_attached_dev->dev_addr; - if (!is_valid_ether_addr(mac_addr)) - return -EINVAL; - - /* lock mdio bus then switch to utp reg space */ - old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); - if (old_page < 0) - goto err_restore_page; - - /* Store the device address for the magic packet */ - for (i = 0; i < 3; i++) { - ret = ytphy_write_ext(phydev, mac_addr_reg[i], - ((mac_addr[i * 2] << 8)) | - (mac_addr[i * 2 + 1])); - if (ret < 0) - goto err_restore_page; - } - - /* Enable WOL feature */ - mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL; - val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; - val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS; - ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, val); - if (ret < 0) - goto err_restore_page; - - /* Enable WOL interrupt */ - ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0, - YTPHY_IER_WOL); - if (ret < 0) - goto err_restore_page; - - } else { - old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); - if (old_page < 0) - goto err_restore_page; +__attribute__((unused)) static int ytphy_read_mmd(struct phy_device* phydev, + u16 device, u16 reg) +{ + int val; - /* Disable WOL feature */ - mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; - ret = ytphy_modify_ext(phydev, YTPHY_WOL_CONFIG_REG, mask, 0); + phy_lock_mdio_bus(phydev); - /* Disable WOL interrupt */ - ret = __phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, - YTPHY_IER_WOL, 0); - if (ret < 0) - goto err_restore_page; + __phy_write(phydev, REG_MII_MMD_CTRL, device); + __phy_write(phydev, REG_MII_MMD_DATA, reg); + __phy_write(phydev, REG_MII_MMD_CTRL, device | 0x4000); + val = __phy_read(phydev, REG_MII_MMD_DATA); + if (val < 0) { +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + dev_err(&phydev->dev, "error read mmd device(%u) reg (%u)\n", + device, reg); +#else + dev_err(&phydev->mdio.dev, + "error read mmd device(%u) reg (%u)\n", device, reg); +#endif + + goto err_handle; } -err_restore_page: - return phy_restore_page(phydev, old_page, ret); +err_handle: + phy_unlock_mdio_bus(phydev); + + return val; } -static int yt8531_set_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) +__attribute__((unused)) static int ytphy_write_mmd(struct phy_device* phydev, + u16 device, u16 reg, + u16 value) { - const u16 mac_addr_reg[] = { - YTPHY_WOL_MACADDR2_REG, - YTPHY_WOL_MACADDR1_REG, - YTPHY_WOL_MACADDR0_REG, - }; - const u8 *mac_addr; - u16 mask, val; - int ret; - u8 i; - - if (wol->wolopts & WAKE_MAGIC) { - mac_addr = phydev->attached_dev->dev_addr; + int ret = 0; - /* Store the device address for the magic packet */ - for (i = 0; i < 3; i++) { - ret = ytphy_write_ext_with_lock(phydev, mac_addr_reg[i], - ((mac_addr[i * 2] << 8)) | - (mac_addr[i * 2 + 1])); - if (ret < 0) - return ret; - } + phy_lock_mdio_bus(phydev); - /* Enable WOL feature */ - mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL; - val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; - val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS; - ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG, - mask, val); - if (ret < 0) - return ret; + __phy_write(phydev, REG_MII_MMD_CTRL, device); + __phy_write(phydev, REG_MII_MMD_DATA, reg); + __phy_write(phydev, REG_MII_MMD_CTRL, device | 0x4000); + __phy_write(phydev, REG_MII_MMD_DATA, value); - /* Enable WOL interrupt */ - ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0, - YTPHY_IER_WOL); - if (ret < 0) - return ret; - } else { - /* Disable WOL feature */ - mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL; - ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG, - mask, 0); - - /* Disable WOL interrupt */ - ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, - YTPHY_IER_WOL, 0); - if (ret < 0) - return ret; - } + phy_unlock_mdio_bus(phydev); - return 0; + return ret; } -static int yt8511_read_page(struct phy_device *phydev) +static int __ytphy_soft_reset(struct phy_device *phydev) { - return __phy_read(phydev, YT8511_PAGE_SELECT); -}; - -static int yt8511_write_page(struct phy_device *phydev, int page) -{ - return __phy_write(phydev, YT8511_PAGE_SELECT, page); -}; - -static int yt8511_config_init(struct phy_device *phydev) -{ - int oldpage, ret = 0; - unsigned int ge, fe; - - oldpage = phy_select_page(phydev, YT8511_EXT_CLK_GATE); - if (oldpage < 0) - goto err_restore_page; - - /* set rgmii delay mode */ - switch (phydev->interface) { - case PHY_INTERFACE_MODE_RGMII: - ge = YT8511_DELAY_GE_TX_DIS; - fe = YT8511_DELAY_FE_TX_DIS; - break; - case PHY_INTERFACE_MODE_RGMII_RXID: - ge = YT8511_DELAY_RX | YT8511_DELAY_GE_TX_DIS; - fe = YT8511_DELAY_FE_TX_DIS; - break; - case PHY_INTERFACE_MODE_RGMII_TXID: - ge = YT8511_DELAY_GE_TX_EN; - fe = YT8511_DELAY_FE_TX_EN; - break; - case PHY_INTERFACE_MODE_RGMII_ID: - ge = YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN; - fe = YT8511_DELAY_FE_TX_EN; - break; - default: /* do not support other modes */ - ret = -EOPNOTSUPP; - goto err_restore_page; - } + int ret = 0, val = 0; - ret = __phy_modify(phydev, YT8511_PAGE, (YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN), ge); - if (ret < 0) - goto err_restore_page; + val = __phy_read(phydev, MII_BMCR); + if (val < 0) + return val; - /* set clock mode to 125mhz */ - ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_CLK_125M); + ret = __phy_write(phydev, MII_BMCR, val | BMCR_RESET); if (ret < 0) - goto err_restore_page; + return ret; - /* fast ethernet delay is in a separate page */ - ret = __phy_write(phydev, YT8511_PAGE_SELECT, YT8511_EXT_DELAY_DRIVE); - if (ret < 0) - goto err_restore_page; + return ret; +} - ret = __phy_modify(phydev, YT8511_PAGE, YT8511_DELAY_FE_TX_EN, fe); - if (ret < 0) - goto err_restore_page; +static int ytphy_soft_reset(struct phy_device *phydev) +{ + int ret = 0, val = 0; - /* leave pll enabled in sleep */ - ret = __phy_write(phydev, YT8511_PAGE_SELECT, YT8511_EXT_SLEEP_CTRL); - if (ret < 0) - goto err_restore_page; + val = phy_read(phydev, MII_BMCR); + if (val < 0) + return val; - ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_PLLON_SLP); + ret = phy_write(phydev, MII_BMCR, val | BMCR_RESET); if (ret < 0) - goto err_restore_page; + return ret; -err_restore_page: - return phy_restore_page(phydev, oldpage, ret); + return ret; } -/** - * yt8521_read_page() - read reg page - * @phydev: a pointer to a &struct phy_device - * - * returns current reg space of yt8521 (YT8521_RSSR_FIBER_SPACE/ - * YT8521_RSSR_UTP_SPACE) or negative errno code - */ -static int yt8521_read_page(struct phy_device *phydev) +#if (YTPHY8531A_XTAL_INIT) +static int yt8531a_xtal_init(struct phy_device *phydev) { - int old_page; - - old_page = ytphy_read_ext(phydev, YT8521_REG_SPACE_SELECT_REG); - if (old_page < 0) - return old_page; + int ret = 0; + int val = 0; + bool state = false; - if ((old_page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE) - return YT8521_RSSR_FIBER_SPACE; + msleep(50); - return YT8521_RSSR_UTP_SPACE; -}; + do { + ret = ytphy_write_ext(phydev, 0xa012, 0x88); + if (ret < 0) + return ret; -/** - * yt8521_write_page() - write reg page - * @phydev: a pointer to a &struct phy_device - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to write. - * - * returns 0 or negative errno code - */ -static int yt8521_write_page(struct phy_device *phydev, int page) -{ - int mask = YT8521_RSSR_SPACE_MASK; - int set; + msleep(100); - if ((page & YT8521_RSSR_SPACE_MASK) == YT8521_RSSR_FIBER_SPACE) - set = YT8521_RSSR_FIBER_SPACE; - else - set = YT8521_RSSR_UTP_SPACE; + val = ytphy_read_ext(phydev, 0xa012); + if (val < 0) + return val; - return ytphy_modify_ext(phydev, YT8521_REG_SPACE_SELECT_REG, mask, set); -}; + usleep_range(10000, 20000); + } while (val != 0x88); -/** - * struct ytphy_cfg_reg_map - map a config value to a register value - * @cfg: value in device configuration - * @reg: value in the register - */ -struct ytphy_cfg_reg_map { - u32 cfg; - u32 reg; -}; + ret = ytphy_write_ext(phydev, 0xa012, 0xc8); + if (ret < 0) + return ret; -static const struct ytphy_cfg_reg_map ytphy_rgmii_delays[] = { - /* for tx delay / rx delay with YT8521_CCR_RXC_DLY_EN is not set. */ - { 0, YT8521_RC1R_RGMII_0_000_NS }, - { 150, YT8521_RC1R_RGMII_0_150_NS }, - { 300, YT8521_RC1R_RGMII_0_300_NS }, - { 450, YT8521_RC1R_RGMII_0_450_NS }, - { 600, YT8521_RC1R_RGMII_0_600_NS }, - { 750, YT8521_RC1R_RGMII_0_750_NS }, - { 900, YT8521_RC1R_RGMII_0_900_NS }, - { 1050, YT8521_RC1R_RGMII_1_050_NS }, - { 1200, YT8521_RC1R_RGMII_1_200_NS }, - { 1350, YT8521_RC1R_RGMII_1_350_NS }, - { 1500, YT8521_RC1R_RGMII_1_500_NS }, - { 1650, YT8521_RC1R_RGMII_1_650_NS }, - { 1800, YT8521_RC1R_RGMII_1_800_NS }, - { 1950, YT8521_RC1R_RGMII_1_950_NS }, /* default tx/rx delay */ - { 2100, YT8521_RC1R_RGMII_2_100_NS }, - { 2250, YT8521_RC1R_RGMII_2_250_NS }, - - /* only for rx delay with YT8521_CCR_RXC_DLY_EN is set. */ - { 0 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_000_NS }, - { 150 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_150_NS }, - { 300 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_300_NS }, - { 450 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_450_NS }, - { 600 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_600_NS }, - { 750 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_750_NS }, - { 900 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_900_NS }, - { 1050 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_050_NS }, - { 1200 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_200_NS }, - { 1350 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_350_NS }, - { 1500 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_500_NS }, - { 1650 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_650_NS }, - { 1800 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_800_NS }, - { 1950 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_950_NS }, - { 2100 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_100_NS }, - { 2250 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_250_NS } -}; + return ret; +} +#endif -static u32 ytphy_get_delay_reg_value(struct phy_device *phydev, - const char *prop_name, - const struct ytphy_cfg_reg_map *tbl, - int tb_size, - u16 *rxc_dly_en, - u32 dflt) +static int yt8010_soft_reset(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; - int tb_size_half = tb_size / 2; - u32 val; - int i; - - if (of_property_read_u32(node, prop_name, &val)) - goto err_dts_val; - - /* when rxc_dly_en is NULL, it is get the delay for tx, only half of - * tb_size is valid. - */ - if (!rxc_dly_en) - tb_size = tb_size_half; - - for (i = 0; i < tb_size; i++) { - if (tbl[i].cfg == val) { - if (rxc_dly_en && i < tb_size_half) - *rxc_dly_en = 0; - return tbl[i].reg; - } - } - - phydev_warn(phydev, "Unsupported value %d for %s using default (%u)\n", - val, prop_name, dflt); - -err_dts_val: - /* when rxc_dly_en is not NULL, it is get the delay for rx. - * The rx default in dts and ytphy_rgmii_clk_delay_config is 1950 ps, - * so YT8521_CCR_RXC_DLY_EN should not be set. - */ - if (rxc_dly_en) - *rxc_dly_en = 0; + ytphy_soft_reset(phydev); - return dflt; + return 0; } -static int ytphy_rgmii_clk_delay_config(struct phy_device *phydev) +static int yt8010AS_soft_reset(struct phy_device *phydev) { - int tb_size = ARRAY_SIZE(ytphy_rgmii_delays); - u16 rxc_dly_en = YT8521_CCR_RXC_DLY_EN; - u32 rx_reg, tx_reg; - u16 mask, val = 0; - int ret; + int ret = 0; - rx_reg = ytphy_get_delay_reg_value(phydev, "rx-internal-delay-ps", - ytphy_rgmii_delays, tb_size, - &rxc_dly_en, - YT8521_RC1R_RGMII_1_950_NS); - tx_reg = ytphy_get_delay_reg_value(phydev, "tx-internal-delay-ps", - ytphy_rgmii_delays, tb_size, NULL, - YT8521_RC1R_RGMII_1_950_NS); - - switch (phydev->interface) { - case PHY_INTERFACE_MODE_RGMII: - rxc_dly_en = 0; - break; - case PHY_INTERFACE_MODE_RGMII_RXID: - val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg); - break; - case PHY_INTERFACE_MODE_RGMII_TXID: - rxc_dly_en = 0; - val |= FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg); - break; - case PHY_INTERFACE_MODE_RGMII_ID: - val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg) | - FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg); - break; - default: /* do not support other modes */ - return -EOPNOTSUPP; + /* sgmii */ + ytphy_write_ext(phydev, 0xe, 1); + ret = ytphy_soft_reset(phydev); + if (ret < 0) { + ytphy_write_ext(phydev, 0xe, 0); + return ret; } - ret = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG, - YT8521_CCR_RXC_DLY_EN, rxc_dly_en); + /* utp */ + ytphy_write_ext(phydev, 0xe, 0); + ret = ytphy_soft_reset(phydev); if (ret < 0) return ret; - /* Generally, it is not necessary to adjust YT8521_RC1R_FE_TX_DELAY */ - mask = YT8521_RC1R_RX_DELAY_MASK | YT8521_RC1R_GE_TX_DELAY_MASK; - return ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG, mask, val); + return 0; } -static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev) +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) +static int yt8010_aneg_done(struct phy_device *phydev) { - int ret; + int val = 0; - phy_lock_mdio_bus(phydev); - ret = ytphy_rgmii_clk_delay_config(phydev); - phy_unlock_mdio_bus(phydev); + val = phy_read(phydev, 0x1); + val = phy_read(phydev, 0x1); - return ret; + return (val < 0) ? val : (val & BMSR_LSTATUS); } +#endif -/** - * struct ytphy_ldo_vol_map - map a current value to a register value - * @vol: ldo voltage - * @ds: value in the register - * @cur: value in device configuration - */ -struct ytphy_ldo_vol_map { - u32 vol; - u32 ds; - u32 cur; -}; - -static const struct ytphy_ldo_vol_map yt8531_ldo_vol[] = { - {.vol = YT8531_LDO_VOL_1V8, .ds = 0, .cur = 1200}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 1, .cur = 2100}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 2, .cur = 2700}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 3, .cur = 2910}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 4, .cur = 3110}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 5, .cur = 3600}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 6, .cur = 3970}, - {.vol = YT8531_LDO_VOL_1V8, .ds = 7, .cur = 4350}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 0, .cur = 3070}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 1, .cur = 4080}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 2, .cur = 4370}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 3, .cur = 4680}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 4, .cur = 5020}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 5, .cur = 5450}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 6, .cur = 5740}, - {.vol = YT8531_LDO_VOL_3V3, .ds = 7, .cur = 6140}, -}; - -static u32 yt8531_get_ldo_vol(struct phy_device *phydev) +static int yt8010_config_init(struct phy_device *phydev) { - u32 val; + int val; + + phydev->autoneg = AUTONEG_DISABLE; - val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); - val = FIELD_GET(YT8531_RGMII_LDO_VOL_MASK, val); + ytphy_write_ext(phydev, 0x1023, 0xfc00); + ytphy_write_ext(phydev, 0x101d, 0x12c0); + val = ytphy_read_ext(phydev, 0x1000); + val &= ~(1 << 7); + ytphy_write_ext(phydev, 0x1000, val); + ytphy_write_ext(phydev, 0x101d, 0x12c0); + ytphy_write_ext(phydev, 0x101e, 0x1900); + ytphy_write_ext(phydev, 0x101f, 0x1900); + ytphy_write_ext(phydev, 0x4083, 0x4327); + ytphy_write_ext(phydev, 0x4082, 0xc20); + ytphy_soft_reset(phydev); - return val <= YT8531_LDO_VOL_1V8 ? val : YT8531_LDO_VOL_1V8; + return 0; } -static int yt8531_get_ds_map(struct phy_device *phydev, u32 cur) +static int yt8010_config_aneg(struct phy_device *phydev) { - u32 vol; - int i; - - vol = yt8531_get_ldo_vol(phydev); - for (i = 0; i < ARRAY_SIZE(yt8531_ldo_vol); i++) { - if (yt8531_ldo_vol[i].vol == vol && yt8531_ldo_vol[i].cur == cur) - return yt8531_ldo_vol[i].ds; - } + phydev->speed = SPEED_100; - return -EINVAL; + return 0; } -static int yt8531_set_ds(struct phy_device *phydev) +static int yt8010_read_status(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; - u32 ds_field_low, ds_field_hi, val; - int ret, ds; - - /* set rgmii rx clk driver strength */ - if (!of_property_read_u32(node, "motorcomm,rx-clk-drv-microamp", &val)) { - ds = yt8531_get_ds_map(phydev, val); - if (ds < 0) - return dev_err_probe(&phydev->mdio.dev, ds, - "No matching current value was found.\n"); - } else { - ds = YT8531_RGMII_RX_DS_DEFAULT; - } + int ret = 0; - ret = ytphy_modify_ext_with_lock(phydev, - YTPHY_PAD_DRIVE_STRENGTH_REG, - YT8531_RGMII_RXC_DS_MASK, - FIELD_PREP(YT8531_RGMII_RXC_DS_MASK, ds)); - if (ret < 0) + ret = genphy_update_link(phydev); + if (ret) return ret; - /* set rgmii rx data driver strength */ - if (!of_property_read_u32(node, "motorcomm,rx-data-drv-microamp", &val)) { - ds = yt8531_get_ds_map(phydev, val); - if (ds < 0) - return dev_err_probe(&phydev->mdio.dev, ds, - "No matching current value was found.\n"); - } else { - ds = YT8531_RGMII_RX_DS_DEFAULT; - } + /* for 8010, no definition mii reg 0x04, 0x11, here force 100/full */ + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; - ds_field_hi = FIELD_GET(BIT(2), ds); - ds_field_hi = FIELD_PREP(YT8531_RGMII_RXD_DS_HI_MASK, ds_field_hi); + return 0; +} - ds_field_low = FIELD_GET(GENMASK(1, 0), ds); - ds_field_low = FIELD_PREP(YT8531_RGMII_RXD_DS_LOW_MASK, ds_field_low); +static int yt8010AS_config_init(struct phy_device *phydev) +{ + phydev->autoneg = AUTONEG_DISABLE; - ret = ytphy_modify_ext_with_lock(phydev, - YTPHY_PAD_DRIVE_STRENGTH_REG, - YT8531_RGMII_RXD_DS_LOW_MASK | YT8531_RGMII_RXD_DS_HI_MASK, - ds_field_low | ds_field_hi); - if (ret < 0) - return ret; + ytphy_write_ext(phydev, 0x1009, 0x0); + + yt8010AS_soft_reset(phydev); return 0; } -/** - * yt8521_probe() - read chip config then set suitable polling_mode - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_probe(struct phy_device *phydev) +static int yt8011_probe(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else struct device *dev = &phydev->mdio.dev; - struct yt8521_priv *priv; +#endif + struct yt8xxx_priv *priv; int chip_config; - u16 mask, val; - u32 freq; - int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -970,1358 +545,3201 @@ static int yt8521_probe(struct phy_device *phydev) phydev->priv = priv; - chip_config = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG); - if (chip_config < 0) - return chip_config; - - priv->strap_mode = chip_config & YT8521_CCR_MODE_SEL_MASK; - switch (priv->strap_mode) { - case YT8521_CCR_MODE_FIBER_TO_RGMII: - case YT8521_CCR_MODE_SGPHY_TO_RGMAC: - case YT8521_CCR_MODE_SGMAC_TO_RGPHY: - priv->polling_mode = YT8521_MODE_FIBER; - priv->reg_page = YT8521_RSSR_FIBER_SPACE; - phydev->port = PORT_FIBRE; - break; - case YT8521_CCR_MODE_UTP_FIBER_TO_RGMII: - case YT8521_CCR_MODE_UTP_TO_FIBER_AUTO: - case YT8521_CCR_MODE_UTP_TO_FIBER_FORCE: - priv->polling_mode = YT8521_MODE_POLL; - priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED; - phydev->port = PORT_NONE; - break; - case YT8521_CCR_MODE_UTP_TO_SGMII: - case YT8521_CCR_MODE_UTP_TO_RGMII: - priv->polling_mode = YT8521_MODE_UTP; - priv->reg_page = YT8521_RSSR_UTP_SPACE; - phydev->port = PORT_TP; - break; - } - /* set default reg space */ - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - ret = ytphy_write_ext_with_lock(phydev, - YT8521_REG_SPACE_SELECT_REG, - priv->reg_page); - if (ret < 0) - return ret; - } - - if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq)) - freq = YTPHY_DTS_OUTPUT_CLK_DIS; - - if (phydev->drv->phy_id == PHY_ID_YT8521) { - switch (freq) { - case YTPHY_DTS_OUTPUT_CLK_DIS: - mask = YT8521_SCR_SYNCE_ENABLE; - val = 0; - break; - case YTPHY_DTS_OUTPUT_CLK_25M: - mask = YT8521_SCR_SYNCE_ENABLE | - YT8521_SCR_CLK_SRC_MASK | - YT8521_SCR_CLK_FRE_SEL_125M; - val = YT8521_SCR_SYNCE_ENABLE | - FIELD_PREP(YT8521_SCR_CLK_SRC_MASK, - YT8521_SCR_CLK_SRC_REF_25M); - break; - case YTPHY_DTS_OUTPUT_CLK_125M: - mask = YT8521_SCR_SYNCE_ENABLE | - YT8521_SCR_CLK_SRC_MASK | - YT8521_SCR_CLK_FRE_SEL_125M; - val = YT8521_SCR_SYNCE_ENABLE | - YT8521_SCR_CLK_FRE_SEL_125M | - FIELD_PREP(YT8521_SCR_CLK_SRC_MASK, - YT8521_SCR_CLK_SRC_PLL_125M); - break; - default: - phydev_warn(phydev, "Freq err:%u\n", freq); - return -EINVAL; - } - } else if (phydev->drv->phy_id == PHY_ID_YT8531S) { - switch (freq) { - case YTPHY_DTS_OUTPUT_CLK_DIS: - mask = YT8531_SCR_SYNCE_ENABLE; - val = 0; - break; - case YTPHY_DTS_OUTPUT_CLK_25M: - mask = YT8531_SCR_SYNCE_ENABLE | - YT8531_SCR_CLK_SRC_MASK | - YT8531_SCR_CLK_FRE_SEL_125M; - val = YT8531_SCR_SYNCE_ENABLE | - FIELD_PREP(YT8531_SCR_CLK_SRC_MASK, - YT8531_SCR_CLK_SRC_REF_25M); - break; - case YTPHY_DTS_OUTPUT_CLK_125M: - mask = YT8531_SCR_SYNCE_ENABLE | - YT8531_SCR_CLK_SRC_MASK | - YT8531_SCR_CLK_FRE_SEL_125M; - val = YT8531_SCR_SYNCE_ENABLE | - YT8531_SCR_CLK_FRE_SEL_125M | - FIELD_PREP(YT8531_SCR_CLK_SRC_MASK, - YT8531_SCR_CLK_SRC_PLL_125M); - break; - default: - phydev_warn(phydev, "Freq err:%u\n", freq); - return -EINVAL; - } - } else { - phydev_warn(phydev, "PHY id err\n"); - return -EINVAL; - } + /* ext reg 0x9030 bit0 + * 0 = chip works in RGMII mode; 1 = chip works in SGMII mode + */ + chip_config = ytphy_read_ext(phydev, 0x9030); + priv->chip_mode = chip_config & 0x1; - return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask, - val); + return 0; } -static int yt8531_probe(struct phy_device *phydev) +static int yt8011_soft_reset(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; - u16 mask, val; - u32 freq; + struct yt8xxx_priv *priv = phydev->priv; - if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq)) - freq = YTPHY_DTS_OUTPUT_CLK_DIS; + /* utp */ + ytphy_write_ext(phydev, 0x9000, 0x0); + ytphy_soft_reset(phydev); - switch (freq) { - case YTPHY_DTS_OUTPUT_CLK_DIS: - mask = YT8531_SCR_SYNCE_ENABLE; - val = 0; - break; - case YTPHY_DTS_OUTPUT_CLK_25M: - mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK | - YT8531_SCR_CLK_FRE_SEL_125M; - val = YT8531_SCR_SYNCE_ENABLE | - FIELD_PREP(YT8531_SCR_CLK_SRC_MASK, - YT8531_SCR_CLK_SRC_REF_25M); - break; - case YTPHY_DTS_OUTPUT_CLK_125M: - mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK | - YT8531_SCR_CLK_FRE_SEL_125M; - val = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_FRE_SEL_125M | - FIELD_PREP(YT8531_SCR_CLK_SRC_MASK, - YT8531_SCR_CLK_SRC_PLL_125M); - break; - default: - phydev_warn(phydev, "Freq err:%u\n", freq); - return -EINVAL; + if (priv->chip_mode) { /* sgmii */ + ytphy_write_ext(phydev, 0x9000, 0x8000); + ytphy_soft_reset(phydev); + + /* restore utp space */ + ytphy_write_ext(phydev, 0x9000, 0x0); } - return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask, - val); + return 0; } -/** - * ytphy_utp_read_lpa() - read LPA then setup lp_advertising for utp - * @phydev: a pointer to a &struct phy_device - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_utp_read_lpa(struct phy_device *phydev) +static int yt8011_config_aneg(struct phy_device *phydev) { - int lpa, lpagb; - - if (phydev->autoneg == AUTONEG_ENABLE) { - if (!phydev->autoneg_complete) { - mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, - 0); - mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0); - return 0; - } + phydev->speed = SPEED_1000; - if (phydev->is_gigabit_capable) { - lpagb = __phy_read(phydev, MII_STAT1000); - if (lpagb < 0) - return lpagb; - - if (lpagb & LPA_1000MSFAIL) { - int adv = __phy_read(phydev, MII_CTRL1000); - - if (adv < 0) - return adv; + return 0; +} - if (adv & CTL1000_ENABLE_MASTER) - phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n"); - else - phydev_err(phydev, "Master/Slave resolution failed\n"); - return -ENOLINK; - } +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) +static int yt8011_aneg_done(struct phy_device *phydev) +{ + int link_utp = 0; + + /* UTP */ + ytphy_write_ext(phydev, 0x9000, 0); + link_utp = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & + (BIT(YTXXXX_LINK_STATUS_BIT))); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_utp: %d\n", + __func__, phydev->addr, link_utp); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_utp: %d\n", + __func__, phydev->mdio.addr, link_utp); +#endif + + return !!(link_utp); +} +#endif - mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, - lpagb); - } +/* #define YT8011_RGMII_DVDDIO_3V3 */ +/* #define YT8011_RGMII_DVDDIO_2V5 */ +/* #define YT8011_RGMII_DVDDIO_1V8 */ +static int yt8011_config_init(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + + phydev->autoneg = AUTONEG_DISABLE; + + /* UTP */ + ytphy_write_ext(phydev, 0x9000, 0x0); + + ytphy_write_ext(phydev, 0x1008, 0x2119); + ytphy_write_ext(phydev, 0x1092, 0x712); + ytphy_write_ext(phydev, 0x90bc, 0x6661); + ytphy_write_ext(phydev, 0x90b9, 0x620b); + ytphy_write_ext(phydev, 0x2001, 0x6418); + ytphy_write_ext(phydev, 0x1019, 0x3712); + ytphy_write_ext(phydev, 0x101a, 0x3713); + ytphy_write_ext(phydev, 0x2015, 0x1012); + ytphy_write_ext(phydev, 0x2005, 0x810); + ytphy_write_ext(phydev, 0x2013, 0xff06); + ytphy_write_ext(phydev, 0x1053, 0xf); + ytphy_write_ext(phydev, 0x105e, 0xa46c); + ytphy_write_ext(phydev, 0x1088, 0x002b); + ytphy_write_ext(phydev, 0x1088, 0x002b); + ytphy_write_ext(phydev, 0x1088, 0xb); + ytphy_write_ext(phydev, 0x3008, 0x143); + ytphy_write_ext(phydev, 0x3009, 0x1918); + ytphy_write_ext(phydev, 0x9095, 0x1a1a); + ytphy_write_ext(phydev, 0x9096, 0x1a10); + ytphy_write_ext(phydev, 0x9097, 0x101a); + ytphy_write_ext(phydev, 0x9098, 0x01ff); + if (!(priv->chip_mode)) { /* rgmii config */ +#if defined (YT8011_RGMII_DVDDIO_3V3) + ytphy_write_ext(phydev, 0x9000, 0x8000); + ytphy_write_ext(phydev, 0x0062, 0x0000); + ytphy_write_ext(phydev, 0x9000, 0x0000); + ytphy_write_ext(phydev, 0x9031, 0xb200); + ytphy_write_ext(phydev, 0x903b, 0x0040); + ytphy_write_ext(phydev, 0x903e, 0x3b3b); + ytphy_write_ext(phydev, 0x903c, 0xf); + ytphy_write_ext(phydev, 0x903d, 0x1000); + ytphy_write_ext(phydev, 0x9038, 0x0000); +#elif defined (YT8011_RGMII_DVDDIO_2V5) + ytphy_write_ext(phydev, 0x9000, 0x8000); + ytphy_write_ext(phydev, 0x0062, 0x0000); + ytphy_write_ext(phydev, 0x9000, 0x0000); + ytphy_write_ext(phydev, 0x9031, 0xb200); + ytphy_write_ext(phydev, 0x9111, 0x5); + ytphy_write_ext(phydev, 0x9114, 0x3939); + ytphy_write_ext(phydev, 0x9112, 0xf); + ytphy_write_ext(phydev, 0x9110, 0x0); + ytphy_write_ext(phydev, 0x9113, 0x10); + ytphy_write_ext(phydev, 0x903d, 0x2); +#elif defined (YT8011_RGMII_DVDDIO_1V8) + ytphy_write_ext(phydev, 0x9000, 0x8000); + ytphy_write_ext(phydev, 0x0062, 0x0000); + ytphy_write_ext(phydev, 0x9000, 0x0000); + ytphy_write_ext(phydev, 0x9031, 0xb200); + ytphy_write_ext(phydev, 0x9116, 0x6); + ytphy_write_ext(phydev, 0x9119, 0x3939); + ytphy_write_ext(phydev, 0x9117, 0xf); + ytphy_write_ext(phydev, 0x9115, 0x0); + ytphy_write_ext(phydev, 0x9118, 0x20); + ytphy_write_ext(phydev, 0x903d, 0x3); +#endif + } - lpa = __phy_read(phydev, MII_LPA); - if (lpa < 0) - return lpa; + ytphy_soft_reset(phydev); - mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); - } else { - linkmode_zero(phydev->lp_advertising); - } +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->mdio.addr); +#endif return 0; } -/** - * yt8521_adjust_status() - update speed and duplex to phydev. when in fiber - * mode, adjust speed and duplex. - * @phydev: a pointer to a &struct phy_device - * @status: yt8521 status read from YTPHY_SPECIFIC_STATUS_REG - * @is_utp: false(yt8521 work in fiber mode) or true(yt8521 work in utp mode) - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 - */ -static int yt8521_adjust_status(struct phy_device *phydev, int status, - bool is_utp) +static int ytxxxx_automotive_adjust_status(struct phy_device *phydev, int val) { - int speed_mode, duplex; - int speed; - int err; - int lpa; - - if (is_utp) - duplex = (status & YTPHY_SSR_DUPLEX) >> YTPHY_SSR_DUPLEX_OFFSET; - else - duplex = DUPLEX_FULL; /* for fiber, it always DUPLEX_FULL */ - - speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >> - YTPHY_SSR_SPEED_MODE_OFFSET; - + int speed_mode; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + int speed = -1; +#else + int speed = SPEED_UNKNOWN; +#endif + + speed_mode = (val & YTXXXX_SPEED_MODE) >> YTXXXX_SPEED_MODE_BIT; switch (speed_mode) { - case YTPHY_SSR_SPEED_10M: - if (is_utp) - speed = SPEED_10; - else - /* for fiber, it will never run here, default to - * SPEED_UNKNOWN - */ - speed = SPEED_UNKNOWN; - break; - case YTPHY_SSR_SPEED_100M: + case 1: speed = SPEED_100; break; - case YTPHY_SSR_SPEED_1000M: + case 2: speed = SPEED_1000; break; default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else speed = SPEED_UNKNOWN; +#endif break; } phydev->speed = speed; - phydev->duplex = duplex; - - if (is_utp) { - err = ytphy_utp_read_lpa(phydev); - if (err < 0) - return err; - - phy_resolve_aneg_pause(phydev); - } else { - lpa = __phy_read(phydev, MII_LPA); - if (lpa < 0) - return lpa; - - /* only support 1000baseX Full */ - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - phydev->lp_advertising, lpa & LPA_1000XFULL); - - if (!(lpa & YTPHY_FLPA_PAUSE)) { - phydev->pause = 0; - phydev->asym_pause = 0; - } else if ((lpa & YTPHY_FLPA_ASYM_PAUSE)) { - phydev->pause = 1; - phydev->asym_pause = 1; - } else { - phydev->pause = 1; - phydev->asym_pause = 0; - } - } + phydev->duplex = DUPLEX_FULL; return 0; } -/** - * yt8521_read_status_paged() - determines the speed and duplex of one page - * @phydev: a pointer to a &struct phy_device - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to - * operate. - * - * returns 1 (utp or fiber link),0 (no link) or negative errno code - */ -static int yt8521_read_status_paged(struct phy_device *phydev, int page) +static int yt8011_read_status(struct phy_device *phydev) { - int fiber_latch_val; - int fiber_curr_val; - int old_page; - int ret = 0; - int status; + int ret; + int val; int link; + int link_utp = 0; - linkmode_zero(phydev->lp_advertising); - phydev->duplex = DUPLEX_UNKNOWN; - phydev->speed = SPEED_UNKNOWN; - phydev->asym_pause = 0; - phydev->pause = 0; - - /* YT8521 has two reg space (utp/fiber) for linkup with utp/fiber - * respectively. but for utp/fiber combo mode, reg space should be - * arbitrated based on media priority. by default, utp takes - * priority. reg space should be properly set before read - * YTPHY_SPECIFIC_STATUS_REG. - */ - - page &= YT8521_RSSR_SPACE_MASK; - old_page = phy_select_page(phydev, page); - if (old_page < 0) - goto err_restore_page; - - /* Read YTPHY_SPECIFIC_STATUS_REG, which indicates the speed and duplex - * of the PHY is actually using. - */ - ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG); + /* UTP */ + ret = ytphy_write_ext(phydev, 0x9000, 0x0); if (ret < 0) - goto err_restore_page; - - status = ret; - link = !!(status & YTPHY_SSR_LINK); - - /* When PHY is in fiber mode, speed transferred from 1000Mbps to - * 100Mbps,there is not link down from YTPHY_SPECIFIC_STATUS_REG, so - * we need check MII_BMSR to identify such case. - */ - if (page == YT8521_RSSR_FIBER_SPACE) { - ret = __phy_read(phydev, MII_BMSR); - if (ret < 0) - goto err_restore_page; + return ret; - fiber_latch_val = ret; - ret = __phy_read(phydev, MII_BMSR); - if (ret < 0) - goto err_restore_page; + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; - fiber_curr_val = ret; - if (link && fiber_latch_val != fiber_curr_val) { - link = 0; - phydev_info(phydev, - "%s, fiber link down detect, latch = %04x, curr = %04x\n", - __func__, fiber_latch_val, fiber_curr_val); - } + link = val & (BIT(YTXXXX_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + ytxxxx_automotive_adjust_status(phydev, val); } else { - /* Read autonegotiation status */ - ret = __phy_read(phydev, MII_BMSR); - if (ret < 0) - goto err_restore_page; - - phydev->autoneg_complete = ret & BMSR_ANEGCOMPLETE ? 1 : 0; + link_utp = 0; } - if (link) { - if (page == YT8521_RSSR_UTP_SPACE) - yt8521_adjust_status(phydev, status, true); - else - yt8521_adjust_status(phydev, status, false); + if (link_utp) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, (unsigned int)val); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, + (unsigned int)val); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif + phydev->link = 0; } - return phy_restore_page(phydev, old_page, link); -err_restore_page: - return phy_restore_page(phydev, old_page, ret); + if (link_utp) + ytphy_write_ext(phydev, 0x9000, 0x0); + + return 0; } -/** - * yt8521_read_status() - determines the negotiated speed and duplex - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_read_status(struct phy_device *phydev) +static int yt8512_led_init(struct phy_device *phydev) { - struct yt8521_priv *priv = phydev->priv; - int link_fiber = 0; - int link_utp; - int link; + int mask; int ret; + int val; - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - link = yt8521_read_status_paged(phydev, priv->reg_page); - if (link < 0) - return link; - } else { - /* when page is YT8521_RSSR_TO_BE_ARBITRATED, arbitration is - * needed. by default, utp is higher priority. - */ + val = ytphy_read_ext(phydev, YT8512_EXTREG_LED0); + if (val < 0) + return val; - link_utp = yt8521_read_status_paged(phydev, - YT8521_RSSR_UTP_SPACE); - if (link_utp < 0) - return link_utp; + val |= YT8512_LED0_ACT_BLK_IND; - if (!link_utp) { - link_fiber = yt8521_read_status_paged(phydev, - YT8521_RSSR_FIBER_SPACE); - if (link_fiber < 0) - return link_fiber; - } + mask = YT8512_LED0_DIS_LED_AN_TRY | YT8512_LED0_BT_BLK_EN | + YT8512_LED0_HT_BLK_EN | YT8512_LED0_COL_BLK_EN | + YT8512_LED0_BT_ON_EN; + val &= ~mask; - link = link_utp || link_fiber; - } + ret = ytphy_write_ext(phydev, YT8512_EXTREG_LED0, val); + if (ret < 0) + return ret; - if (link) { - if (phydev->link == 0) { - /* arbitrate reg space based on linkup media type. */ - if (priv->polling_mode == YT8521_MODE_POLL && - priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) { - if (link_fiber) - priv->reg_page = - YT8521_RSSR_FIBER_SPACE; - else - priv->reg_page = YT8521_RSSR_UTP_SPACE; - - ret = ytphy_write_ext_with_lock(phydev, - YT8521_REG_SPACE_SELECT_REG, - priv->reg_page); - if (ret < 0) - return ret; + val = ytphy_read_ext(phydev, YT8512_EXTREG_LED1); + if (val < 0) + return val; - phydev->port = link_fiber ? PORT_FIBRE : PORT_TP; + val |= YT8512_LED1_BT_ON_EN; - phydev_info(phydev, "%s, link up, media: %s\n", - __func__, - (phydev->port == PORT_TP) ? - "UTP" : "Fiber"); - } - } - phydev->link = 1; - } else { - if (phydev->link == 1) { - phydev_info(phydev, "%s, link down, media: %s\n", - __func__, (phydev->port == PORT_TP) ? - "UTP" : "Fiber"); - - /* When in YT8521_MODE_POLL mode, need prepare for next - * arbitration. - */ - if (priv->polling_mode == YT8521_MODE_POLL) { - priv->reg_page = YT8521_RSSR_TO_BE_ARBITRATED; - phydev->port = PORT_NONE; - } - } + mask = YT8512_LED1_TXACT_BLK_EN | YT8512_LED1_RXACT_BLK_EN; + val &= ~mask; - phydev->link = 0; - } + ret = ytphy_write_ext(phydev, YT8512_EXTREG_LED1, val); - return 0; + return ret; } -/** - * yt8521_modify_bmcr_paged - bits modify a PHY's BMCR register of one page - * @phydev: the phy_device struct - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to operate - * @mask: bit mask of bits to clear - * @set: bit mask of bits to set - * - * NOTE: Convenience function which allows a PHY's BMCR register to be - * modified as new register value = (old register value & ~mask) | set. - * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space - * has MII_BMCR. poll mode combines utp and faber,so need do both. - * If it is reset, it will wait for completion. - * - * returns 0 or negative errno code - */ -static int yt8521_modify_bmcr_paged(struct phy_device *phydev, int page, - u16 mask, u16 set) +static int yt8512_probe(struct phy_device *phydev) { - int max_cnt = 500; /* the max wait time of reset ~ 500 ms */ - int old_page; - int ret = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else + struct device *dev = &phydev->mdio.dev; +#endif + struct yt8xxx_priv *priv; + int chip_config; - old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK); - if (old_page < 0) - goto err_restore_page; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - ret = __phy_modify(phydev, MII_BMCR, mask, set); - if (ret < 0) - goto err_restore_page; + phydev->priv = priv; - /* If it is reset, need to wait for the reset to complete */ - if (set == BMCR_RESET) { - while (max_cnt--) { - usleep_range(1000, 1100); - ret = __phy_read(phydev, MII_BMCR); - if (ret < 0) - goto err_restore_page; + chip_config = ytphy_read_ext(phydev, YT8512_EXTENDED_COMBO_CONTROL1); - if (!(ret & BMCR_RESET)) - return phy_restore_page(phydev, old_page, 0); - } - } + priv->chip_mode = (chip_config & (BIT(1) | BIT(0))); -err_restore_page: - return phy_restore_page(phydev, old_page, ret); + return 0; } -/** - * yt8521_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register - * @phydev: the phy_device struct - * @mask: bit mask of bits to clear - * @set: bit mask of bits to set - * - * NOTE: Convenience function which allows a PHY's BMCR register to be - * modified as new register value = (old register value & ~mask) | set. - * YT8521 has two space (utp/fiber) and three mode (utp/fiber/poll), each space - * has MII_BMCR. poll mode combines utp and faber,so need do both. - * - * returns 0 or negative errno code - */ -static int yt8521_modify_utp_fiber_bmcr(struct phy_device *phydev, u16 mask, - u16 set) +static int yt8512_config_init(struct phy_device *phydev) { - struct yt8521_priv *priv = phydev->priv; + struct yt8xxx_priv *priv = phydev->priv; int ret; + int val; - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - ret = yt8521_modify_bmcr_paged(phydev, priv->reg_page, mask, - set); - if (ret < 0) - return ret; - } else { - ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE, - mask, set); - if (ret < 0) - return ret; + val = ytphy_read_ext(phydev, YT8512_10BT_DEBUG_LPBKS); + if (val < 0) + return val; + + val &= ~BIT(10); + ret = ytphy_write_ext(phydev, YT8512_10BT_DEBUG_LPBKS, val); + if (ret < 0) + return ret; - ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE, - mask, set); + if (!(priv->chip_mode)) { /* MII mode */ + val &= ~BIT(15); + ret = ytphy_write_ext(phydev, YT8512_10BT_DEBUG_LPBKS, val); if (ret < 0) return ret; } - return 0; -} - -/** - * yt8521_soft_reset() - called to issue a PHY software reset - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_soft_reset(struct phy_device *phydev) -{ - return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_RESET); -} - -/** - * yt8521_suspend() - suspend the hardware - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_suspend(struct phy_device *phydev) -{ - int wol_config; - /* YTPHY_WOL_CONFIG_REG is common ext reg */ - wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); - if (wol_config < 0) - return wol_config; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; - /* if wol enable, do nothing */ - if (wol_config & YTPHY_WCR_ENABLE) - return 0; + ret = yt8512_led_init(phydev); - return yt8521_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN); -} + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; -/** - * yt8521_resume() - resume the hardware - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_resume(struct phy_device *phydev) -{ - int ret; - int wol_config; + val &= (~BIT(YT8512_EN_SLEEP_SW_BIT)); - /* disable auto sleep */ - ret = ytphy_modify_ext_with_lock(phydev, - YT8521_EXTREG_SLEEP_CONTROL1_REG, - YT8521_ESC1R_SLEEP_SW, 0); + ret = ytphy_write_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1, val); if (ret < 0) return ret; - wol_config = ytphy_read_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG); - if (wol_config < 0) - return wol_config; - - /* if wol enable, do nothing */ - if (wol_config & YTPHY_WCR_ENABLE) - return 0; + ytphy_soft_reset(phydev); - return yt8521_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0); + return ret; } -/** - * yt8521_config_init() - called to initialize the PHY - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_config_init(struct phy_device *phydev) +static int yt8512_read_status(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; - int old_page; - int ret = 0; + int speed, speed_mode, duplex; + int val; - old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE); - if (old_page < 0) - goto err_restore_page; + genphy_read_status(phydev); - /* set rgmii delay mode */ - if (phydev->interface != PHY_INTERFACE_MODE_SGMII) { - ret = ytphy_rgmii_clk_delay_config(phydev); - if (ret < 0) - goto err_restore_page; - } + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; - if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) { - /* disable auto sleep */ - ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG, - YT8521_ESC1R_SLEEP_SW, 0); - if (ret < 0) - goto err_restore_page; + duplex = (val & YTXXXX_DUPLEX) >> YTXXXX_DUPLEX_BIT; + speed_mode = (val & YTXXXX_SPEED_MODE) >> YTXXXX_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + case 3: + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; } - if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) { - /* enable RXC clock when no wire plug */ - ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG, - YT8521_CGR_RX_CLK_EN, 0); - if (ret < 0) - goto err_restore_page; - } -err_restore_page: - return phy_restore_page(phydev, old_page, ret); + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; } -static int yt8531_config_init(struct phy_device *phydev) +static int yt8522_read_status(struct phy_device *phydev) { - struct device_node *node = phydev->mdio.dev.of_node; - int ret; + int speed, speed_mode, duplex; + int val; - ret = ytphy_rgmii_clk_delay_config_with_lock(phydev); - if (ret < 0) - return ret; + genphy_read_status(phydev); - if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) { - /* disable auto sleep */ - ret = ytphy_modify_ext_with_lock(phydev, - YT8521_EXTREG_SLEEP_CONTROL1_REG, - YT8521_ESC1R_SLEEP_SW, 0); - if (ret < 0) - return ret; - } + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; - if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) { - /* enable RXC clock when no wire plug */ - ret = ytphy_modify_ext_with_lock(phydev, - YT8521_CLOCK_GATING_REG, - YT8521_CGR_RX_CLK_EN, 0); - if (ret < 0) - return ret; + if ((val & BIT(10)) >> YTXXXX_LINK_STATUS_BIT) { + duplex = (val & BIT(13)) >> YTXXXX_DUPLEX_BIT; + speed_mode = (val & (BIT(15) | BIT(14))) >> + YTXXXX_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + case 3: + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } + + phydev->link = 1; + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; } - ret = yt8531_set_ds(phydev); - if (ret < 0) - return ret; + phydev->link = 0; return 0; } -/** - * yt8531_link_change_notify() - Adjust the tx clock direction according to - * the current speed and dts config. - * @phydev: a pointer to a &struct phy_device - * - * NOTE: This function is only used to adapt to VF2 with JH7110 SoC. Please - * keep "motorcomm,tx-clk-adj-enabled" not exist in dts when the soc is not - * JH7110. - */ -static void yt8531_link_change_notify(struct phy_device *phydev) -{ - struct device_node *node = phydev->mdio.dev.of_node; - bool tx_clk_1000_inverted = false; - bool tx_clk_100_inverted = false; - bool tx_clk_10_inverted = false; - bool tx_clk_adj_enabled = false; - u16 val = 0; - int ret; - - if (of_property_read_bool(node, "motorcomm,tx-clk-adj-enabled")) - tx_clk_adj_enabled = true; +static int yt8522_probe(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else + struct device *dev = &phydev->mdio.dev; +#endif + struct yt8xxx_priv *priv; + int chip_config; - if (!tx_clk_adj_enabled) - return; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; - if (of_property_read_bool(node, "motorcomm,tx-clk-10-inverted")) - tx_clk_10_inverted = true; - if (of_property_read_bool(node, "motorcomm,tx-clk-100-inverted")) - tx_clk_100_inverted = true; - if (of_property_read_bool(node, "motorcomm,tx-clk-1000-inverted")) - tx_clk_1000_inverted = true; + phydev->priv = priv; - if (phydev->speed < 0) - return; + chip_config = ytphy_read_ext(phydev, YT8522_EXTENDED_COMBO_CTRL_1); - switch (phydev->speed) { - case SPEED_1000: - if (tx_clk_1000_inverted) - val = YT8521_RC1R_TX_CLK_SEL_INVERTED; - break; - case SPEED_100: - if (tx_clk_100_inverted) - val = YT8521_RC1R_TX_CLK_SEL_INVERTED; + priv->chip_mode = (chip_config & (BIT(1) | BIT(0))); + + return 0; +} + +static int yt8522_config_init(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int ret; + int val; + + val = ytphy_read_ext(phydev, YT8522_EXTENDED_COMBO_CTRL_1); + if (val < 0) + return val; + + /* RMII2 mode */ + if (0x2 == (priv->chip_mode)) { + val |= BIT(4); + ret = ytphy_write_ext(phydev, YT8522_EXTENDED_COMBO_CTRL_1, + val); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_TX_DELAY_CONTROL, 0x9f); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_EXTENDED_PAD_CONTROL, + 0x81d4); + if (ret < 0) + return ret; + } else if (0x3 == (priv->chip_mode)) { /* RMII1 mode */ + val |= BIT(4); + ret = ytphy_write_ext(phydev, YT8522_EXTENDED_COMBO_CTRL_1, + val); + if (ret < 0) + return ret; + } + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_TX_CLK_DELAY, 0); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_ANAGLOG_IF_CTRL, 0xbf2a); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_DAC_CTRL, 0x297f); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_INTERPOLATOR_FILTER_1, 0x1FE); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, YT8522_INTERPOLATOR_FILTER_2, 0x1FE); + if (ret < 0) + return ret; + + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; + + val &= (~BIT(YT8512_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8512_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; + + ytphy_soft_reset(phydev); + + return 0; +} + +static int yt8521_probe(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else + struct device *dev = &phydev->mdio.dev; +#endif + struct yt8xxx_priv *priv; + int chip_config; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + chip_config = ytphy_read_ext(phydev, 0xa001); + + priv->chip_mode = chip_config & 0x7; + switch (priv->chip_mode) { + case 1: /* fiber<>rgmii */ + case 4: + case 5: + priv->polling_mode = YT_PHY_MODE_FIBER; break; - case SPEED_10: - if (tx_clk_10_inverted) - val = YT8521_RC1R_TX_CLK_SEL_INVERTED; + case 2: /* utp/fiber<>rgmii */ + case 6: + case 7: + priv->polling_mode = YT_PHY_MODE_POLL; break; + case 3: /* utp<>sgmii */ + case 0: /* utp<>rgmii */ default: + priv->polling_mode = YT_PHY_MODE_UTP; + break; + } + + return 0; +} + +#if GMAC_CLOCK_INPUT_NEEDED +static int ytphy_mii_rd_ext(struct mii_bus *bus, int phy_id, u32 regnum) +{ + int ret; + int val; + + ret = bus->write(bus, phy_id, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; + + val = bus->read(bus, phy_id, REG_DEBUG_DATA); + + return val; +} + +static int ytphy_mii_wr_ext(struct mii_bus *bus, int phy_id, u32 regnum, + u16 val) +{ + int ret; + + ret = bus->write(bus, phy_id, REG_DEBUG_ADDR_OFFSET, regnum); + if (ret < 0) + return ret; + + ret = bus->write(bus, phy_id, REG_DEBUG_DATA, val); + + return ret; +} + +static int yt8511_config_dis_txdelay(struct mii_bus *bus, int phy_id) +{ + int ret; + int val; + + /* disable auto sleep */ + val = ytphy_mii_rd_ext(bus, phy_id, 0x27); + if (val < 0) + return val; + + val &= (~BIT(15)); + + ret = ytphy_mii_wr_ext(bus, phy_id, 0x27, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_mii_rd_ext(bus, phy_id, 0xc); + if (val < 0) + return val; + + /* ext reg 0xc b[7:4] + * Tx Delay time = 150ps * N - 250ps + */ + val &= ~(0xf << 4); + ret = ytphy_mii_wr_ext(bus, phy_id, 0xc, val); + + return ret; +} + +static int yt8511_config_out_125m(struct mii_bus *bus, int phy_id) +{ + int ret; + int val; + + /* disable auto sleep */ + val = ytphy_mii_rd_ext(bus, phy_id, 0x27); + if (val < 0) + return val; + + val &= (~BIT(15)); + + ret = ytphy_mii_wr_ext(bus, phy_id, 0x27, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_mii_rd_ext(bus, phy_id, 0xc); + if (val < 0) + return val; + + /* ext reg 0xc.b[2:1] + * 00-----25M from pll; + * 01---- 25M from xtl;(default) + * 10-----62.5M from pll; + * 11----125M from pll(here set to this value) + */ + val |= (3 << 1); + ret = ytphy_mii_wr_ext(bus, phy_id, 0xc, val); + +#ifdef YT_8511_INIT_TO_MASTER + /* for customer, please enable it based on demand. + * configure to master + */ + /* master/slave config reg*/ + val = bus->read(bus, phy_id, 0x9); + /* to be manual config and force to be master */ + val |= (0x3<<11); + /* take effect until phy soft reset */ + ret = bus->write(bus, phy_id, 0x9, val); + if (ret < 0) + return ret; +#endif + + return ret; +} + +static int yt8511_config_init(struct phy_device *phydev) +{ + int ret; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->mdio.addr); +#endif + + ytphy_soft_reset(phydev); + + return ret; +} +#endif /* GMAC_CLOCK_INPUT_NEEDED */ + +#if (YTPHY_WOL_FEATURE_ENABLE) +static int ytphy_switch_reg_space(struct phy_device *phydev, int space) +{ + int ret; + + if (space == YTPHY_REG_SPACE_UTP) + ret = ytphy_write_ext(phydev, 0xa000, 0); + else + ret = ytphy_write_ext(phydev, 0xa000, 2); + + return ret; +} + +static int ytphy_wol_feature_enable_cfg(struct phy_device *phydev, + struct ytphy_wol_feature_cfg wol_cfg) +{ + int ret = 0; + int val = 0; + + val = ytphy_read_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG); + if (val < 0) + return val; + + if (wol_cfg.enable) { + val |= YTPHY_WOL_FEATURE_ENABLE_CFG; + + if (wol_cfg.type == YTPHY_WOL_FEATURE_LEVEL_TRIGGER) { + val &= ~YTPHY_WOL_FEATURE_TYPE_CFG; + val &= ~YTPHY_WOL_FEATURE_INTR_SEL_CFG; + } else if (wol_cfg.type == YTPHY_WOL_FEATURE_PULSE_TRIGGER) { + val |= YTPHY_WOL_FEATURE_TYPE_CFG; + val |= YTPHY_WOL_FEATURE_INTR_SEL_CFG; + + if (wol_cfg.width == YTPHY_WOL_FEATURE_84MS_PULSE_WIDTH) { + val &= ~YTPHY_WOL_FEATURE_WIDTH1_CFG; + val &= ~YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_168MS_PULSE_WIDTH) { + val |= YTPHY_WOL_FEATURE_WIDTH1_CFG; + val &= ~YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_336MS_PULSE_WIDTH) { + val &= ~YTPHY_WOL_FEATURE_WIDTH1_CFG; + val |= YTPHY_WOL_FEATURE_WIDTH2_CFG; + } else if (wol_cfg.width == YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH) { + val |= YTPHY_WOL_FEATURE_WIDTH1_CFG; + val |= YTPHY_WOL_FEATURE_WIDTH2_CFG; + } + } + } else { + val &= ~YTPHY_WOL_FEATURE_ENABLE_CFG; + val &= ~YTPHY_WOL_FEATURE_INTR_SEL_CFG; + } + + ret = ytphy_write_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG, val); + if (ret < 0) + return ret; + + return 0; +} + +static void ytphy_wol_feature_get(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int val = 0; + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + val = ytphy_read_ext(phydev, YTPHY_WOL_FEATURE_REG_CFG); + if (val < 0) return; + + if (val & YTPHY_WOL_FEATURE_ENABLE_CFG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ytphy_wol_feature_set(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct net_device *p_attached_dev = phydev->attached_dev; + struct ytphy_wol_feature_cfg wol_cfg; + int ret, curr_reg_space, val; + + memset(&wol_cfg, 0, sizeof(struct ytphy_wol_feature_cfg)); + curr_reg_space = ytphy_read_ext(phydev, 0xa000); + if (curr_reg_space < 0) + return curr_reg_space; + + /* Switch to phy UTP page */ + ret = ytphy_switch_reg_space(phydev, YTPHY_REG_SPACE_UTP); + if (ret < 0) + return ret; + + if (wol->wolopts & WAKE_MAGIC) { + /* Enable the WOL feature interrupt */ + val = phy_read(phydev, YTPHY_UTP_INTR_REG); + val |= YTPHY_WOL_FEATURE_INTR; + ret = phy_write(phydev, YTPHY_UTP_INTR_REG, val); + if (ret < 0) + return ret; + + /* Set the WOL feature config */ + wol_cfg.enable = true; + wol_cfg.type = YTPHY_WOL_FEATURE_PULSE_TRIGGER; + wol_cfg.width = YTPHY_WOL_FEATURE_672MS_PULSE_WIDTH; + ret = ytphy_wol_feature_enable_cfg(phydev, wol_cfg); + if (ret < 0) + return ret; + + /* Store the device address for the magic packet */ + ret = ytphy_write_ext(phydev, + YTPHY_WOL_FEATURE_MACADDR2_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[0] << 8) | + p_attached_dev->dev_addr[1])); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, + YTPHY_WOL_FEATURE_MACADDR1_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[2] << 8) | + p_attached_dev->dev_addr[3])); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, + YTPHY_WOL_FEATURE_MACADDR0_4_MAGIC_PACKET, + ((p_attached_dev->dev_addr[4] << 8) | + p_attached_dev->dev_addr[5])); + if (ret < 0) + return ret; + } else { + wol_cfg.enable = false; + wol_cfg.type = YTPHY_WOL_FEATURE_TRIGGER_TYPE_MAX; + wol_cfg.width = YTPHY_WOL_FEATURE_PULSE_WIDTH_MAX; + ret = ytphy_wol_feature_enable_cfg(phydev, wol_cfg); + if (ret < 0) + return ret; } - ret = ytphy_modify_ext_with_lock(phydev, YT8521_RGMII_CONFIG1_REG, - YT8521_RC1R_TX_CLK_SEL_INVERTED, val); + /* Recover to previous register space page */ + ret = ytphy_switch_reg_space(phydev, curr_reg_space); if (ret < 0) - phydev_warn(phydev, "Modify TX_CLK_SEL err:%d\n", ret); + return ret; + + return 0; } +#endif /*(YTPHY_WOL_FEATURE_ENABLE)*/ -/** - * yt8521_prepare_fiber_features() - A small helper function that setup - * fiber's features. - * @phydev: a pointer to a &struct phy_device - * @dst: a pointer to store fiber's features - */ -static void yt8521_prepare_fiber_features(struct phy_device *phydev, - unsigned long *dst) +static int yt8521_config_init(struct phy_device *phydev) { - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, dst); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, dst); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, dst); - linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, dst); + int ret; + int val, chip_config; + + struct yt8xxx_priv *priv = phydev->priv; + +#if (YTPHY_WOL_FEATURE_ENABLE) + struct ethtool_wolinfo wol; + + /* set phy wol enable */ + memset(&wol, 0x0, sizeof(struct ethtool_wolinfo)); + wol.wolopts |= WAKE_MAGIC; + ytphy_wol_feature_set(phydev, &wol); +#endif + if(phydev->force_mode) { + chip_config = ytphy_read_ext(phydev, 0xa001) & 0x7; + chip_config = chip_config & 0x7ff8; + chip_config = chip_config | 0x140; + ytphy_write_ext(phydev, 0xa001, chip_config); + + priv->chip_mode = chip_config & 0x7; + priv->polling_mode = YT_PHY_MODE_UTP; + } + + phydev->irq = PHY_POLL; + + ytphy_write_ext(phydev, 0xa000, 0); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; + + val &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; + + /* enable RXC clock when no wire plug */ + val = ytphy_read_ext(phydev, 0xc); + if (val < 0) + return val; + val &= ~(1 << 12); + ret = ytphy_write_ext(phydev, 0xc, val); + if (ret < 0) + return ret; + + ytxxxx_soft_reset(phydev); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode = %d, polling mode = %d\n", + __func__, phydev->addr, + priv->chip_mode, priv->polling_mode); +#else + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode = %d, polling mode = %d\n", + __func__, phydev->mdio.addr, + priv->chip_mode, priv->polling_mode); +#endif + + return ret; } -/** - * yt8521_fiber_setup_forced - configures/forces speed from @phydev - * @phydev: target phy_device struct - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code +/* for fiber mode, there is no 10M speed mode and + * this function is for this purpose. */ -static int yt8521_fiber_setup_forced(struct phy_device *phydev) +static int ytxxxx_adjust_status(struct phy_device *phydev, int val, int is_utp) { - u16 val; - int ret; + int speed_mode, duplex; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + int speed = -1; +#else + int speed = SPEED_UNKNOWN; +#endif + + if (is_utp) + duplex = (val & YTXXXX_DUPLEX) >> YTXXXX_DUPLEX_BIT; + else + duplex = 1; + speed_mode = (val & YTXXXX_SPEED_MODE) >> YTXXXX_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + if (is_utp) + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + speed = SPEED_1000; + break; + case 3: + break; + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } + + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; +} + +/* for fiber mode, when speed is 100M, there is no definition for + * autonegotiation, and this function handles this case and return + * 1 per linux kernel's polling. + */ +static int yt8521_aneg_done(struct phy_device *phydev) +{ + int link_fiber = 0, link_utp = 0; + + /* reading Fiber */ + ytphy_write_ext(phydev, 0xa000, 2); + link_fiber = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & + (BIT(YTXXXX_LINK_STATUS_BIT))); + + /* reading UTP */ + ytphy_write_ext(phydev, 0xa000, 0); + if (!link_fiber) + link_utp = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & + (BIT(YTXXXX_LINK_STATUS_BIT))); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link_fiber: %d, link_utp: %d\n", + __func__, phydev->addr, link_fiber, link_utp); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link_fiber: %d, link_utp: %d\n", + __func__, phydev->mdio.addr, link_fiber, link_utp); +#endif + + return !!(link_fiber | link_utp); +} + +static int yt8521_read_status(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int link_utp = 0, link_fiber = 0; + int yt8521_fiber_latch_val; + int yt8521_fiber_curr_val; + int val_utp, val_fiber; + int link; + int ret; + int lpa; + + phydev->pause = 0; + phydev->asym_pause = 0; + + if(priv->polling_mode != YT_PHY_MODE_FIBER) { + /* reading UTP */ + ret = ytphy_write_ext(phydev, 0xa000, 0); + if (ret < 0) + return ret; + + val_utp = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val_utp < 0) + return val_utp; + + link = val_utp & (BIT(YTXXXX_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + ytxxxx_adjust_status(phydev, val_utp, 1); + } else { + link_utp = 0; + } + + if (link_utp) { + lpa = phy_read(phydev, MII_LPA); + if (ret < 0) + return ret; + + phydev->pause = !!(lpa & BIT(10)); + phydev->asym_pause = !!(lpa & BIT(11)); + } + } + + if (priv->polling_mode != YT_PHY_MODE_UTP) { + /* reading Fiber */ + ret = ytphy_write_ext(phydev, 0xa000, 2); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + val_fiber = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val_fiber < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return val_fiber; + } + + /* for fiber, from 1000m to 100m, there is not link down + * from 0x11, and check reg 1 to identify such case + * this is important for Linux kernel for that, missing linkdown + * event will cause problem. + */ + yt8521_fiber_latch_val = phy_read(phydev, MII_BMSR); + yt8521_fiber_curr_val = phy_read(phydev, MII_BMSR); + link = val_fiber & (BIT(YTXXXX_LINK_STATUS_BIT)); + if (link && yt8521_fiber_latch_val != yt8521_fiber_curr_val) { + link = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->addr, + yt8521_fiber_latch_val, + yt8521_fiber_curr_val); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->mdio.addr, + yt8521_fiber_latch_val, + yt8521_fiber_curr_val); +#endif + } + + if (link) { + link_fiber = 1; + ytxxxx_adjust_status(phydev, val_fiber, 0); + } else { + link_fiber = 0; + } + + if (link_fiber) { + lpa = phy_read(phydev, MII_LPA); + if (ret < 0) { + ytphy_write_ext(phydev, 0xa000, 0); + return ret; + } + + phydev->pause = !!(lpa & BIT(7)); + phydev->asym_pause = !!(lpa & BIT(8)); + } + } + + if (link_utp || link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, + (link_utp && link_fiber) ? + "UNKNOWN MEDIA" : + (link_utp ? "UTP" : "Fiber"), + link_utp ? (unsigned int)val_utp : + (unsigned int)val_fiber); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: %s, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, + (link_utp && link_fiber) ? + "UNKNOWN MEDIA" : + (link_utp ? "UTP" : "Fiber"), + link_utp ? (unsigned int)val_utp : + (unsigned int)val_fiber); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif + phydev->link = 0; + } + + if (priv->polling_mode != YT_PHY_MODE_FIBER) { + if (link_fiber) + ytphy_write_ext(phydev, 0xa000, 2); + else + ytphy_write_ext(phydev, 0xa000, 0); + } + + return 0; +} + +static int yt8521_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int value; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock in 4.19 */ +#endif + struct yt8xxx_priv *priv = phydev->priv; + + if (priv->polling_mode != YT_PHY_MODE_FIBER) { + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + } + + if (priv->polling_mode != YT_PHY_MODE_UTP) { + ytphy_write_ext(phydev, 0xa000, 2); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + } + + ytphy_write_ext(phydev, 0xa000, 0); + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +static int yt8521_resume(struct phy_device *phydev) +{ + int value, ret; + + /* disable auto sleep */ + value = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (value < 0) + return value; + + value &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); + + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, value); + if (ret < 0) + return ret; + +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_lock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif + struct yt8xxx_priv *priv = phydev->priv; + + if (priv->polling_mode != YT_PHY_MODE_FIBER) { + ytphy_write_ext(phydev, 0xa000, 0); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + } + + if (priv->polling_mode != YT_PHY_MODE_UTP) { + ytphy_write_ext(phydev, 0xa000, 2); + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + + ytphy_write_ext(phydev, 0xa000, 0); + } + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + mutex_unlock(&phydev->lock); +#else + /* no need lock/unlock in 4.19 */ +#endif +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +static int yt8531S_config_init(struct phy_device *phydev) +{ + int ret = 0, val = 0; + +#if (YTPHY8531A_XTAL_INIT) + ret = yt8531a_xtal_init(phydev); + if (ret < 0) + return ret; +#endif + ret = ytphy_write_ext(phydev, 0xa023, 0x4031); + if (ret < 0) + return ret; + + ytphy_write_ext(phydev, 0xa000, 0x0); + val = ytphy_read_ext(phydev, 0xf); + + if(0x31 != val && 0x32 != val) { + ret = ytphy_write_ext(phydev, 0xa071, 0x9007); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x52, 0x231d); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x51, 0x04a9); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x57, 0x274c); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa006, 0x10d); + if (ret < 0) + return ret; + + if(0x500 == val) { + val = ytphy_read_ext(phydev, 0xa001); + if((0x30 == (val & 0x30)) || (0x20 == (val & 0x30))) { + ret = ytphy_write_ext(phydev, 0xa010, 0xabff); + if (ret < 0) + return ret; + } + } + } + + ret = yt8521_config_init(phydev); + if (ret < 0) + return ret; + + ytphy_write_ext(phydev, 0xa000, 0x0); + + return ret; +} + +static int yt8531_config_init(struct phy_device *phydev) +{ + int ret = 0, val = 0; + +#if (YTPHY8531A_XTAL_INIT) + ret = yt8531a_xtal_init(phydev); + if (ret < 0) + return ret; +#endif + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + val = ytphy_read_ext(phydev, 0xf); + if(0x31 != val && 0x32 != val) { + ret = ytphy_write_ext(phydev, 0x52, 0x231d); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x51, 0x04a9); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x57, 0x274c); + if (ret < 0) + return ret; + + if(0x500 == val) { + val = ytphy_read_ext(phydev, 0xa001); + if((0x30 == (val & 0x30)) || (0x20 == (val & 0x30))) { + ret = ytphy_write_ext(phydev, 0xa010, 0xabff); + if (ret < 0) + return ret; + } + } + } + + ytphy_soft_reset(phydev); + + return 0; +} + +#if (KERNEL_VERSION(5, 0, 21) < LINUX_VERSION_CODE) +static void ytphy_link_change_notify(struct phy_device *phydev) +{ + int adv; + + adv = phy_read(phydev, MII_ADVERTISE); + if (adv < 0) + return; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phydev->advertising, (adv & ADVERTISE_10HALF)); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phydev->advertising, (adv & ADVERTISE_10FULL)); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->advertising, (adv & ADVERTISE_100HALF)); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->advertising, (adv & ADVERTISE_100FULL)); + + adv = phy_read(phydev, MII_CTRL1000); + if (adv < 0) + return; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising, (adv & ADVERTISE_1000HALF)); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising, (adv & ADVERTISE_1000FULL)); +} +#endif + +#ifdef YTPHY_YT8543_ENABLE +static int yt8543_config_init(struct phy_device *phydev) +{ + int ret = 0, val = 0; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x403c, 0x286); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xdc, 0x855c); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xdd, 0x6040); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x40e, 0xf00); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x40f, 0xf00); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x411, 0x5030); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x1f, 0x110a); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x20, 0xc06); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x40d, 0x1f); + if (ret < 0) + return ret; + + val = ytphy_read_ext(phydev, 0xa088); + if (val < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa088, + ((val & 0xfff0) | BIT(4)) | + (((ytphy_read_ext(phydev, 0xa015) & 0x3c)) >> 2)); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa183, 0x1918); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa184, 0x1818); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa186, 0x2018); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa189, 0x3894); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa187, 0x3838); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa18b, 0x1918); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa18c, 0x1818); + if (ret < 0) + return ret; + + ytphy_soft_reset(phydev); + + return 0; +} + +static int yt8543_read_status(struct phy_device *phydev) +{ + int link_utp = 0; + int link; + int val; + + genphy_read_status(phydev); + + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; + + link = val & (BIT(YTXXXX_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + ytxxxx_adjust_status(phydev, val, 1); + } else { + link_utp = 0; + } + + if (link_utp) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, (unsigned int)val); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, + (unsigned int)val); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif + + phydev->link = 0; + } + + return 0; +} +#endif + +static int yt8614_probe(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else + struct device *dev = &phydev->mdio.dev; +#endif + struct yt8xxx_priv *priv; + int chip_config; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + chip_config = ytphy_read_ext(phydev, 0xa007); + + priv->chip_mode = chip_config & 0xf; + switch (priv->chip_mode) { + case 8: /* 4'b1000, Fiber x4 + Copper x4 */ + case 12: /* 4'b1100, QSGMII x1 + Combo x4 mode; */ + case 13: /* 4'b1101, QSGMII x1 + Combo x4 mode; */ + priv->polling_mode = (YT_PHY_MODE_FIBER | YT_PHY_MODE_UTP); + break; + case 14: /* 4'b1110, QSGMII x1 + SGMII(MAC) x4 mode; */ + case 11: /* 4'b1011, QSGMII x1 + Fiber x4 mode; */ + priv->polling_mode = YT_PHY_MODE_FIBER; + break; + case 9: /* 4'b1001, Reserved. */ + case 10: /* 4'b1010, QSGMII x1 + Copper x4 mode */ + case 15: /* 4'b1111, SGMII(PHY) x4 + Copper x4 mode */ + default: + priv->polling_mode = YT_PHY_MODE_UTP; + break; + } + + return 0; +} + +static int yt8614Q_probe(struct phy_device *phydev) +{ +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + struct device *dev = &phydev->dev; +#else + struct device *dev = &phydev->mdio.dev; +#endif + struct yt8xxx_priv *priv; + int chip_config; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + phydev->priv = priv; + + chip_config = ytphy_read_ext(phydev, 0xa007); + + priv->chip_mode = chip_config & 0xf; + switch (priv->chip_mode) { + case 0x1: /* 4'b0001, QSGMII to 1000BASE-X or 100BASE-FX x 4 */ + priv->polling_mode = YT_PHY_MODE_FIBER; + break; + default: + break; + } + + return 0; +} + +static int yt8614Q_config_aneg(struct phy_device *phydev) +{ + return 0; +} + +static int yt8618_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + return 0; +} + +static int __ytphy_write_page(struct phy_device *phydev, int page) +{ + return __ytphy_write_ext(phydev, YTPHY_REG_SMI_MUX, page); +} + +static int __ytphy_read_page(struct phy_device *phydev) +{ + return __ytphy_read_ext(phydev, YTPHY_REG_SMI_MUX); +} + +static int ytphy_save_page(struct phy_device *phydev) +{ + /* mutex_lock(&phydev->mdio.bus->mdio_lock); */ + phy_lock_mdio_bus(phydev); + return __ytphy_read_page(phydev); +} + +static int ytphy_select_page(struct phy_device *phydev, int page) +{ + int ret, oldpage; + + oldpage = ret = ytphy_save_page(phydev); + if (ret < 0) + return ret; + + if (oldpage != page) { + ret = __ytphy_write_page(phydev, page); + if (ret < 0) + return ret; + } + + return oldpage; +} + +static int ytphy_restore_page(struct phy_device *phydev, int oldpage, int ret) +{ + int r; + + if (oldpage >= 0) { + r = __ytphy_write_page(phydev, oldpage); + + /* Propagate the operation return code if the page write + * was successful. + */ + if (ret >= 0 && r < 0) + ret = r; + } else { + /* Propagate the phy page selection error code */ + ret = oldpage; + } + + /* mutex_unlock(&phydev->mdio.bus->mdio_lock); */ + phy_unlock_mdio_bus(phydev); + + return ret; +} + +static int yt861x_soft_reset_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage >= 0) + ret = __ytphy_soft_reset(phydev); + + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + return 0; +} + +static int yt8614Q_soft_reset(struct phy_device *phydev) +{ + int ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + ret = yt861x_soft_reset_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + return 0; +} + +static int yt8618_config_init_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + if (reg_space == YT8614_REG_SPACE_UTP) { + ret = __ytphy_write_ext(phydev, 0x41, 0x33); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x42, 0x66); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x43, 0xaa); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x44, 0xd0d); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x57, 0x234f); + if (ret < 0) + goto err_restore_page; + } else if (reg_space == YT8614_REG_SPACE_QSGMII) { + ret = __ytphy_write_ext(phydev, 0x3, 0x4F80); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0xe, 0x4F80); + if (ret < 0) + goto err_restore_page; + } + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8618_config_init(struct phy_device *phydev) +{ + int ret; + + phydev->irq = PHY_POLL; + +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + ret = ytphy_config_init(phydev); +#else + ret = genphy_config_init(phydev); +#endif + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa040, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa041, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa042, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa043, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa044, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa045, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa046, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa047, 0xbb00); + if (ret < 0) + return ret; + + ret = yt8618_config_init_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt8618_config_init_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + yt8618_soft_reset(phydev); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d\n", __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d\n", __func__, phydev->mdio.addr); +#endif + + return ret; +} + +static int yt8614_config_init_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + if (reg_space == YT8614_REG_SPACE_UTP) { + ret = __ytphy_write_ext(phydev, 0x41, 0x33); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x42, 0x66); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x43, 0xaa); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x44, 0xd0d); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x57, 0x2929); + if (ret < 0) + goto err_restore_page; + } else if (reg_space == YT8614_REG_SPACE_QSGMII) { + ret = __ytphy_write_ext(phydev, 0x3, 0x4F80); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0xe, 0x4F80); + if (ret < 0) + goto err_restore_page; + } else if (reg_space == YT8614_REG_SPACE_SGMII) { + ret = __ytphy_write_ext(phydev, 0x3, 0x2420); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0xe, 0x24a0); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0x1be, 0xb01f); + if (ret < 0) + goto err_restore_page; + } + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_config_init(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int ret = 0; + + phydev->irq = PHY_POLL; + + ret = ytphy_write_ext(phydev, 0xa040, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa041, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa042, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa043, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa044, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa045, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa046, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa047, 0xbb00); + if (ret < 0) + return ret; + ret = ytphy_write_ext(phydev, 0xa08e, 0xf10); + if (ret < 0) + return ret; + + ret = yt8614_config_init_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt8614_config_init_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + ret = yt8614_config_init_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + yt8614_soft_reset(phydev); + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode: %d, polling mode = %d\n", + __func__, + phydev->addr, priv->chip_mode, priv->polling_mode); +#else + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode: %d, polling mode = %d\n", + __func__, + phydev->mdio.addr, priv->chip_mode, priv->polling_mode); +#endif + + return ret; +} + +static int yt8614Q_config_init_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + if (reg_space == YT8614_REG_SPACE_QSGMII) { + ret = __ytphy_write_ext(phydev, 0x3, 0x4F80); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0xe, 0x4F80); + if (ret < 0) + goto err_restore_page; + } else if (reg_space == YT8614_REG_SPACE_SGMII) { + ret = __ytphy_write_ext(phydev, 0x3, 0x2420); + if (ret < 0) + goto err_restore_page; + ret = __ytphy_write_ext(phydev, 0xe, 0x24a0); + if (ret < 0) + goto err_restore_page; + } + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614Q_config_init(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int ret = 0; + + phydev->irq = PHY_POLL; + + ret = ytphy_write_ext(phydev, 0xa056, 0x7); + if (ret < 0) + return ret; + + ret = yt8614Q_config_init_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; + + ret = yt8614Q_config_init_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt8614Q_soft_reset(phydev); + if (ret < 0) + return ret; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode: %d, polling mode = %d\n", + __func__, + phydev->addr, priv->chip_mode, priv->polling_mode); +#else + netdev_info(phydev->attached_dev, + "%s done, phy addr: %d, chip mode: %d, polling mode = %d\n", + __func__, + phydev->mdio.addr, priv->chip_mode, priv->polling_mode); +#endif + return 0; +} + +static int yt8618_aneg_done(struct phy_device *phydev) +{ + return !!(yt861x_aneg_done_paged(phydev, YT8614_REG_SPACE_UTP)); +} + +static int yt861x_aneg_done_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage >= 0) + ret = !!(__phy_read(phydev, REG_PHY_SPEC_STATUS) & + (BIT(YTXXXX_LINK_STATUS_BIT))); + + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_aneg_done(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int link_fiber = 0, link_utp = 0; + + if (priv->polling_mode & YT_PHY_MODE_FIBER) + link_fiber = yt861x_aneg_done_paged(phydev, + YT8614_REG_SPACE_SGMII); + + if (priv->polling_mode & YT_PHY_MODE_UTP) + link_fiber = yt861x_aneg_done_paged(phydev, + YT8614_REG_SPACE_UTP); + + return !!(link_fiber | link_utp); +} + +static int yt8614Q_aneg_done(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int link_fiber = 0; + + if (priv->polling_mode & YT_PHY_MODE_FIBER) { + /* reading Fiber */ + link_fiber = yt861x_aneg_done_paged(phydev, + YT8614_REG_SPACE_SGMII); + } + + return !!(link_fiber); +} + +static int yt861x_read_status_paged(struct phy_device *phydev, int reg_space, + int *status, int *lpa) +{ + int latch_val, curr_val; + int ret = 0, oldpage; + int link; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + ret = *lpa = __phy_read(phydev, MII_LPA); + if (ret < 0) + goto err_restore_page; + + ret = *status = __phy_read(phydev, REG_PHY_SPEC_STATUS); + if (ret < 0) + goto err_restore_page; + + ret = link = !!(*status & BIT(YTXXXX_LINK_STATUS_BIT)); + + if (YT8614_REG_SPACE_SGMII == reg_space) { + /* for fiber, from 1000m to 100m, + * there is not link down from 0x11, + * and check reg 1 to identify such case + */ + latch_val = __phy_read(phydev, MII_BMSR); + curr_val = __phy_read(phydev, MII_BMSR); + if (link && latch_val != curr_val) { + ret = link = 0; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->addr, + latch_val, curr_val); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, fiber link down detect, latch = %04x, curr = %04x\n", + __func__, phydev->mdio.addr, + latch_val, curr_val); +#endif + } + } + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_read_status(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int link_utp = 0, link_fiber = 0; + int val; + int lpa; + + phydev->pause = 0; + phydev->asym_pause = 0; + if (priv->polling_mode & YT_PHY_MODE_UTP) { + link_utp = yt861x_read_status_paged(phydev, + YT8614_REG_SPACE_UTP, &val, + &lpa); + if (link_utp < 0) + return link_utp; + + if (link_utp) + ytxxxx_adjust_status(phydev, val, 1); + + phydev->pause = !!(lpa & BIT(10)); + phydev->asym_pause = !!(lpa & BIT(11)); + } + + if (priv->polling_mode & YT_PHY_MODE_FIBER) { + link_fiber = yt861x_read_status_paged(phydev, + YT8614_REG_SPACE_SGMII, + &val, &lpa); + if (link_fiber < 0) + return link_fiber; + + if (link_fiber) + ytxxxx_adjust_status(phydev, val, 0); + + phydev->pause = !!(lpa & BIT(7)); + phydev->asym_pause = !!(lpa & BIT(8)); + } + + if (link_utp || link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media %s\n", + __func__, phydev->addr, + (link_utp && link_fiber) ? + "both UTP and Fiber" : + (link_utp ? "UTP" : "Fiber")); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media %s\n", + __func__, phydev->mdio.addr, + (link_utp && link_fiber) ? + "both UTP and Fiber" : + (link_utp ? "UTP" : "Fiber")); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif + phydev->link = 0; + } + + if (priv->polling_mode & YT_PHY_MODE_UTP) { + if (link_utp) + ytphy_write_ext(phydev, YTPHY_REG_SMI_MUX, + YT8614_REG_SPACE_UTP); + } + + return 0; +} + +static int yt8614Q_read_status(struct phy_device *phydev) +{ + struct yt8xxx_priv *priv = phydev->priv; + int link_fiber = 0; + int val; + int lpa; + + phydev->pause = 0; + phydev->asym_pause = 0; + if (priv->polling_mode & YT_PHY_MODE_FIBER) { + /* reading Fiber/sgmii */ + link_fiber = yt861x_read_status_paged(phydev, + YT8614_REG_SPACE_SGMII, + &val, &lpa); + if (link_fiber < 0) + return link_fiber; + + if (link_fiber) + ytxxxx_adjust_status(phydev, val, 0); + + phydev->pause = !!(lpa & BIT(7)); + phydev->asym_pause = !!(lpa & BIT(8)); + } + + if (link_fiber) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media Fiber\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media Fiber\n", + __func__, phydev->mdio.addr); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif + phydev->link = 0; + } + + return 0; +} + +static int yt8618_read_status(struct phy_device *phydev) +{ + int link_utp; + int val; + int lpa; + + phydev->pause = 0; + phydev->asym_pause = 0; + link_utp = yt861x_read_status_paged(phydev, + YT8614_REG_SPACE_UTP, &val, &lpa); + if (link_utp < 0) + return link_utp; + + if (link_utp) + ytxxxx_adjust_status(phydev, val, 1); + + phydev->pause = !!(lpa & BIT(10)); + phydev->asym_pause = !!(lpa & BIT(11)); + + return 0; +} + +static int yt8618_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +static int yt8618_resume(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +__attribute__((unused)) static int +yt861x_suspend_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + int value; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + ret = value = __phy_read(phydev, MII_BMCR); + if (ret < 0) + goto err_restore_page; + ret = __phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +static int yt8614Q_suspend(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt861x_suspend_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /*!(SYS_WAKEUP_BASED_ON_ETH_PKT)*/ + + return 0; +} + +__attribute__((unused)) static int +yt861x_resume_paged(struct phy_device *phydev, int reg_space) +{ + int ret = 0, oldpage; + int value; + + oldpage = ytphy_select_page(phydev, reg_space); + if (oldpage < 0) + goto err_restore_page; + + ret = value = __phy_read(phydev, MII_BMCR); + if (ret < 0) + goto err_restore_page; + ret = __phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); + +err_restore_page: + return ytphy_restore_page(phydev, oldpage, ret); +} + +static int yt8614_resume(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_UTP); + if (ret < 0) + return ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /* !(SYS_WAKEUP_BASED_ON_ETH_PKT) */ + + return 0; +} + +static int yt8614Q_resume(struct phy_device *phydev) +{ +#if !(SYS_WAKEUP_BASED_ON_ETH_PKT) + int ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_SGMII); + if (ret < 0) + return ret; + + ret = yt861x_resume_paged(phydev, YT8614_REG_SPACE_QSGMII); + if (ret < 0) + return ret; +#endif /* !(SYS_WAKEUP_BASED_ON_ETH_PKT) */ + + return 0; +} + +static int ytxxxx_soft_reset(struct phy_device *phydev) +{ + int ret, val; + + val = ytphy_read_ext(phydev, 0xa001); + ytphy_write_ext(phydev, 0xa001, (val & ~0x8000)); + + ret = ytphy_write_ext(phydev, 0xa000, 0); + + return ret; +} + +static int yt8821_init(struct phy_device *phydev) +{ + int ret = 0; + + ret = ytphy_write_ext(phydev, 0xa000, 0x2); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x23, 0x8605); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0xa000, 0x0); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x34e, 0x8080); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x4d2, 0x5200); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x4d3, 0x5200); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x372, 0x5a3c); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x374, 0x7c6c); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x336, 0xaa0a); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x340, 0x3022); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x36a, 0x8000); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x4b3, 0x7711); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x4b5, 0x2211); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x56, 0x20); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x56, 0x3f); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x97, 0x380c); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x660, 0x112a); + if (ret < 0) + return ret; + + ret = ytphy_write_ext(phydev, 0x450, 0xe9); + if (ret < 0) + return ret; - if (phydev->speed == SPEED_1000) - val = YTPHY_MCR_FIBER_1000BX; - else if (phydev->speed == SPEED_100) - val = YTPHY_MCR_FIBER_100FX; - else - return -EINVAL; + ret = ytphy_write_ext(phydev, 0x466, 0x6464); + if (ret < 0) + return ret; - ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0); + ret = ytphy_write_ext(phydev, 0x467, 0x6464); if (ret < 0) return ret; - /* disable Fiber auto sensing */ - ret = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG, - YT8521_LTCR_EN_AUTOSEN, 0); + ret = ytphy_write_ext(phydev, 0x468, 0x6464); if (ret < 0) return ret; - ret = ytphy_modify_ext(phydev, YTPHY_MISC_CONFIG_REG, - YTPHY_MCR_FIBER_SPEED_MASK, val); + ret = ytphy_write_ext(phydev, 0x469, 0x6464); if (ret < 0) return ret; - return ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG, - YT8521_CCR_SW_RST, 0); + return ret; } -/** - * ytphy_check_and_restart_aneg - Enable and restart auto-negotiation - * @phydev: target phy_device struct - * @restart: whether aneg restart is requested - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_check_and_restart_aneg(struct phy_device *phydev, bool restart) +#define YT8821_CHIP_MODE_AUTO_BX2500_SGMII (1) +#define YT8821_CHIP_MODE_FORCE_BX2500 (0) +#define YT8821_CHIP_MODE_UTP_TO_FIBER_FORCE (0) +static int yt8821_config_init(struct phy_device *phydev) { - int ret; + int ret, val; - if (!restart) { - /* Advertisement hasn't changed, but maybe aneg was never on to - * begin with? Or maybe phy was isolated? - */ - ret = __phy_read(phydev, MII_BMCR); - if (ret < 0) - return ret; + phydev->irq = PHY_POLL; - if (!(ret & BMCR_ANENABLE) || (ret & BMCR_ISOLATE)) - restart = true; - } - /* Enable and Restart Autonegotiation - * Don't isolate the PHY if we're negotiating - */ - if (restart) - return __phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, - BMCR_ANENABLE | BMCR_ANRESTART); + val = ytphy_read_ext(phydev, 0xa001); - return 0; -} +#if YT8821_CHIP_MODE_AUTO_BX2500_SGMII + val &= ~(BIT(0)); + val &= ~(BIT(1)); + val &= ~(BIT(2)); + ret = ytphy_write_ext(phydev, 0xa001, val); + if (ret < 0) + return ret; -/** - * yt8521_fiber_config_aneg - restart auto-negotiation or write - * YTPHY_MISC_CONFIG_REG. - * @phydev: target phy_device struct - * - * NOTE:The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int yt8521_fiber_config_aneg(struct phy_device *phydev) -{ - int err, changed = 0; - int bmcr; - u16 adv; + ret = ytphy_write_ext(phydev, 0xa000, 2); + if (ret < 0) + return ret; + + val = phy_read(phydev, MII_BMCR); + val |= BIT(YTXXXX_AUTO_NEGOTIATION_BIT); + phy_write(phydev, MII_BMCR, val); - if (phydev->autoneg != AUTONEG_ENABLE) - return yt8521_fiber_setup_forced(phydev); + ret = ytphy_write_ext(phydev, 0xa000, 0x0); + if (ret < 0) + return ret; - /* enable Fiber auto sensing */ - err = ytphy_modify_ext(phydev, YT8521_LINK_TIMER_CFG2_REG, - 0, YT8521_LTCR_EN_AUTOSEN); - if (err < 0) - return err; +#elif YT8821_CHIP_MODE_FORCE_BX2500 + #if KERNEL_VERSION(4, 10, 17) < LINUX_VERSION_CODE + val |= BIT(0); + val &= ~(BIT(1)); + val &= ~(BIT(2)); + ret = ytphy_write_ext(phydev, 0xa001, val); + if (ret < 0) + return ret; - err = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG, - YT8521_CCR_SW_RST, 0); - if (err < 0) - return err; + ret = ytphy_write_ext(phydev, 0xa000, 0x0); + if (ret < 0) + return ret; - bmcr = __phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; + val = phy_read(phydev, MII_ADVERTISE); + val |= BIT(YTXXXX_PAUSE_BIT); + val |= BIT(YTXXXX_ASYMMETRIC_PAUSE_BIT); + phy_write(phydev, MII_ADVERTISE, val); - /* When it is coming from fiber forced mode, add bmcr power down - * and power up to let aneg work fine. - */ - if (!(bmcr & BMCR_ANENABLE)) { - __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN); - usleep_range(1000, 1100); - __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0); - } + ret = ytphy_write_ext(phydev, 0xa000, 2); + if (ret < 0) + return ret; - adv = linkmode_adv_to_mii_adv_x(phydev->advertising, - ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + val = phy_read(phydev, MII_ADVERTISE); + val |= BIT(YT8821_SDS_PAUSE_BIT); + val |= BIT(YT8821_SDS_ASYMMETRIC_PAUSE_BIT); + phy_write(phydev, MII_ADVERTISE, val); - /* Setup fiber advertisement */ - err = __phy_modify_changed(phydev, MII_ADVERTISE, - ADVERTISE_1000XHALF | ADVERTISE_1000XFULL | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM, - adv); - if (err < 0) - return err; + val = phy_read(phydev, MII_BMCR); + val &= (~BIT(YTXXXX_AUTO_NEGOTIATION_BIT)); + phy_write(phydev, MII_BMCR, val); - if (err > 0) - changed = 1; + ret = ytphy_write_ext(phydev, 0xa000, 0x0); + if (ret < 0) + return ret; + #endif /* KERNEL_VERSION(4, 10, 17) < LINUX_VERSION_CODE */ - return ytphy_check_and_restart_aneg(phydev, changed); -} +#elif YT8821_CHIP_MODE_UTP_TO_FIBER_FORCE + val |= BIT(0); + val &= ~(BIT(1)); + val |= BIT(2); + ret = ytphy_write_ext(phydev, 0xa001, val); + if (ret < 0) + return ret; -/** - * ytphy_setup_master_slave - * @phydev: target phy_device struct - * - * NOTE: The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_setup_master_slave(struct phy_device *phydev) -{ - u16 ctl = 0; +#endif /* Mode selection */ - if (!phydev->is_gigabit_capable) - return 0; + ret = yt8821_init(phydev); + if (ret < 0) + return ret; - switch (phydev->master_slave_set) { - case MASTER_SLAVE_CFG_MASTER_PREFERRED: - ctl |= CTL1000_PREFER_MASTER; - break; - case MASTER_SLAVE_CFG_SLAVE_PREFERRED: - break; - case MASTER_SLAVE_CFG_MASTER_FORCE: - ctl |= CTL1000_AS_MASTER; - fallthrough; - case MASTER_SLAVE_CFG_SLAVE_FORCE: - ctl |= CTL1000_ENABLE_MASTER; - break; - case MASTER_SLAVE_CFG_UNKNOWN: - case MASTER_SLAVE_CFG_UNSUPPORTED: - return 0; - default: - phydev_warn(phydev, "Unsupported Master/Slave mode\n"); - return -EOPNOTSUPP; - } + /* disable auto sleep */ + val = ytphy_read_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1); + if (val < 0) + return val; - return __phy_modify_changed(phydev, MII_CTRL1000, - (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER | - CTL1000_PREFER_MASTER), ctl); -} + val &= (~BIT(YT8521_EN_SLEEP_SW_BIT)); -/** - * ytphy_utp_config_advert - sanitize and advertise auto-negotiation parameters - * @phydev: target phy_device struct - * - * NOTE: Writes MII_ADVERTISE with the appropriate values, - * after sanitizing the values to make sure we only advertise - * what is supported. Returns < 0 on error, 0 if the PHY's advertisement - * hasn't changed, and > 0 if it has changed. - * The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_utp_config_advert(struct phy_device *phydev) -{ - int err, bmsr, changed = 0; - u32 adv; - - /* Only allow advertising what this PHY supports */ - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); - - adv = linkmode_adv_to_mii_adv_t(phydev->advertising); - - /* Setup standard advertisement */ - err = __phy_modify_changed(phydev, MII_ADVERTISE, - ADVERTISE_ALL | ADVERTISE_100BASE4 | - ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - adv); - if (err < 0) - return err; - if (err > 0) - changed = 1; - - bmsr = __phy_read(phydev, MII_BMSR); - if (bmsr < 0) - return bmsr; - - /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all - * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a - * logical 1. - */ - if (!(bmsr & BMSR_ESTATEN)) - return changed; + ret = ytphy_write_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1, val); + if (ret < 0) + return ret; - adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + /* soft reset */ + ytxxxx_soft_reset(phydev); - err = __phy_modify_changed(phydev, MII_CTRL1000, - ADVERTISE_1000FULL | ADVERTISE_1000HALF, - adv); - if (err < 0) - return err; - if (err > 0) - changed = 1; +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, "%s done, phy addr: %d\n", + __func__, phydev->mdio.addr); +#endif - return changed; + return ret; } -/** - * ytphy_utp_config_aneg - restart auto-negotiation or write BMCR - * @phydev: target phy_device struct - * @changed: whether autoneg is requested - * - * NOTE: If auto-negotiation is enabled, we configure the - * advertising, and then restart auto-negotiation. If it is not - * enabled, then we write the BMCR. - * The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_utp_config_aneg(struct phy_device *phydev, bool changed) +#if (KERNEL_VERSION(6, 0, 19) < LINUX_VERSION_CODE) +static int yt8821_get_rate_matching(struct phy_device *phydev, + phy_interface_t iface) { - int err; - u16 ctl; - - err = ytphy_setup_master_slave(phydev); - if (err < 0) - return err; - else if (err) - changed = true; - - if (phydev->autoneg != AUTONEG_ENABLE) { - /* configures/forces speed/duplex from @phydev */ - - ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); + int val; - return __phy_modify(phydev, MII_BMCR, ~(BMCR_LOOPBACK | - BMCR_ISOLATE | BMCR_PDOWN), ctl); - } + val = ytphy_read_ext(phydev, 0xa001); + if (val < 0) + return val; - err = ytphy_utp_config_advert(phydev); - if (err < 0) /* error */ - return err; - else if (err) - changed = true; + if (val & (BIT(2) | BIT(1) | BIT(0))) + return RATE_MATCH_PAUSE; - return ytphy_check_and_restart_aneg(phydev, changed); + return RATE_MATCH_NONE; } +#endif -/** - * yt8521_config_aneg_paged() - switch reg space then call genphy_config_aneg - * of one page - * @phydev: a pointer to a &struct phy_device - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to - * operate. - * - * returns 0 or negative errno code - */ -static int yt8521_config_aneg_paged(struct phy_device *phydev, int page) +static int yt8821_aneg_done(struct phy_device *phydev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(fiber_supported); - struct yt8521_priv *priv = phydev->priv; - int old_page; - int ret = 0; + int link_utp = 0; - page &= YT8521_RSSR_SPACE_MASK; - - old_page = phy_select_page(phydev, page); - if (old_page < 0) - goto err_restore_page; - - /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED, - * phydev->advertising should be updated. - */ - if (priv->reg_page == YT8521_RSSR_TO_BE_ARBITRATED) { - linkmode_zero(fiber_supported); - yt8521_prepare_fiber_features(phydev, fiber_supported); - - /* prepare fiber_supported, then setup advertising. */ - if (page == YT8521_RSSR_FIBER_SPACE) { - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, - fiber_supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - fiber_supported); - linkmode_and(phydev->advertising, - priv->combo_advertising, fiber_supported); - } else { - /* ETHTOOL_LINK_MODE_Autoneg_BIT is also used in utp */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - fiber_supported); - linkmode_andnot(phydev->advertising, - priv->combo_advertising, - fiber_supported); - } - } + /* reading UTP */ + ytphy_write_ext(phydev, 0xa000, 0); + link_utp = !!(phy_read(phydev, REG_PHY_SPEC_STATUS) & (BIT(10))); - if (page == YT8521_RSSR_FIBER_SPACE) - ret = yt8521_fiber_config_aneg(phydev); - else - ret = ytphy_utp_config_aneg(phydev, false); +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_utp: %d\n", + __func__, phydev->addr, link_utp); +#else + netdev_info(phydev->attached_dev, "%s, phy addr: %d, link_utp: %d\n", + __func__, phydev->mdio.addr, link_utp); +#endif -err_restore_page: - return phy_restore_page(phydev, old_page, ret); + return !!(link_utp); } - -/** - * yt8521_config_aneg() - change reg space then call yt8521_config_aneg_paged - * @phydev: a pointer to a &struct phy_device - * - * returns 0 or negative errno code - */ -static int yt8521_config_aneg(struct phy_device *phydev) +static int yt8821_adjust_status(struct phy_device *phydev, int val) { - struct yt8521_priv *priv = phydev->priv; - int ret; - - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - ret = yt8521_config_aneg_paged(phydev, priv->reg_page); - if (ret < 0) - return ret; - } else { - /* If reg_page is YT8521_RSSR_TO_BE_ARBITRATED, - * phydev->advertising need to be saved at first run. - * Because it contains the advertising which supported by both - * mac and yt8521(utp and fiber). - */ - if (linkmode_empty(priv->combo_advertising)) { - linkmode_copy(priv->combo_advertising, - phydev->advertising); - } - - ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_UTP_SPACE); - if (ret < 0) - return ret; + int speed_mode_bit15_14, speed_mode_bit9; + int speed_mode, duplex; +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + int speed = -1; +#else + int speed = SPEED_UNKNOWN; +#endif + + duplex = (val & YTXXXX_DUPLEX) >> YTXXXX_DUPLEX_BIT; + + /* Bit9-Bit15-Bit14 speed mode 100-2.5G; 010-1000M; 001-100M; 000-10M */ + speed_mode_bit15_14 = (val & YTXXXX_SPEED_MODE) >> + YTXXXX_SPEED_MODE_BIT; + speed_mode_bit9 = (val & BIT(9)) >> 9; + speed_mode = (speed_mode_bit9 << 2) | speed_mode_bit15_14; + switch (speed_mode) { + case 0: + speed = SPEED_10; + break; + case 1: + speed = SPEED_100; + break; + case 2: + speed = SPEED_1000; + break; + case 4: + speed = SPEED_2500; + break; + default: +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) + speed = -1; +#else + speed = SPEED_UNKNOWN; +#endif + break; + } - ret = yt8521_config_aneg_paged(phydev, YT8521_RSSR_FIBER_SPACE); - if (ret < 0) - return ret; + phydev->speed = speed; + phydev->duplex = duplex; - /* we don't known which will be link, so restore - * phydev->advertising as default value. - */ - linkmode_copy(phydev->advertising, priv->combo_advertising); - } return 0; } -/** - * yt8521_aneg_done_paged() - determines the auto negotiation result of one - * page. - * @phydev: a pointer to a &struct phy_device - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to - * operate. - * - * returns 0(no link)or 1(fiber or utp link) or negative errno code - */ -static int yt8521_aneg_done_paged(struct phy_device *phydev, int page) +static int yt8821_read_status(struct phy_device *phydev) { - int old_page; - int ret = 0; + int link_utp = 0; int link; + int ret; + int val; - old_page = phy_select_page(phydev, page & YT8521_RSSR_SPACE_MASK); - if (old_page < 0) - goto err_restore_page; - - ret = __phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG); + /* reading UTP */ + ret = ytphy_write_ext(phydev, 0xa000, 0); if (ret < 0) - goto err_restore_page; - - link = !!(ret & YTPHY_SSR_LINK); - ret = link; + return ret; -err_restore_page: - return phy_restore_page(phydev, old_page, ret); -} + genphy_read_status(phydev); -/** - * yt8521_aneg_done() - determines the auto negotiation result - * @phydev: a pointer to a &struct phy_device - * - * returns 0(no link)or 1(fiber or utp link) or negative errno code - */ -static int yt8521_aneg_done(struct phy_device *phydev) -{ - struct yt8521_priv *priv = phydev->priv; - int link_fiber = 0; - int link_utp; - int link; + val = phy_read(phydev, REG_PHY_SPEC_STATUS); + if (val < 0) + return val; - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - link = yt8521_aneg_done_paged(phydev, priv->reg_page); + link = val & (BIT(YTXXXX_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + /* speed(2500), duplex */ + yt8821_adjust_status(phydev, val); } else { - link_utp = yt8521_aneg_done_paged(phydev, - YT8521_RSSR_UTP_SPACE); - if (link_utp < 0) - return link_utp; - - if (!link_utp) { - link_fiber = yt8521_aneg_done_paged(phydev, - YT8521_RSSR_FIBER_SPACE); - if (link_fiber < 0) - return link_fiber; - } - link = link_fiber || link_utp; - phydev_info(phydev, "%s, link_fiber: %d, link_utp: %d\n", - __func__, link_fiber, link_utp); + link_utp = 0; } - return link; -} - -/** - * ytphy_utp_read_abilities - read PHY abilities from Clause 22 registers - * @phydev: target phy_device struct - * - * NOTE: Reads the PHY's abilities and populates - * phydev->supported accordingly. - * The caller must have taken the MDIO bus lock. - * - * returns 0 or negative errno code - */ -static int ytphy_utp_read_abilities(struct phy_device *phydev) -{ - int val; - - linkmode_set_bit_array(phy_basic_ports_array, - ARRAY_SIZE(phy_basic_ports_array), - phydev->supported); - - val = __phy_read(phydev, MII_BMSR); - if (val < 0) - return val; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported, - val & BMSR_ANEGCAPABLE); + if (link_utp) { + if (phydev->link == 0) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->addr, (unsigned int)val); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link up, media: UTP, mii reg 0x11 = 0x%x\n", + __func__, phydev->mdio.addr, + (unsigned int)val); +#endif + phydev->link = 1; + } else { + if (phydev->link == 1) +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->addr); +#else + netdev_info(phydev->attached_dev, + "%s, phy addr: %d, link down\n", + __func__, phydev->mdio.addr); +#endif - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported, - val & BMSR_100FULL); - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported, - val & BMSR_100HALF); - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported, - val & BMSR_10FULL); - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported, - val & BMSR_10HALF); + phydev->link = 0; + } - if (val & BMSR_ESTATEN) { - val = __phy_read(phydev, MII_ESTATUS); - if (val < 0) - return val; + if (link_utp) + ytphy_write_ext(phydev, 0xa000, 0); - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->supported, val & ESTATUS_1000_TFULL); - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->supported, val & ESTATUS_1000_THALF); - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - phydev->supported, val & ESTATUS_1000_XFULL); +#if (KERNEL_VERSION(4, 10, 17) < LINUX_VERSION_CODE) + val = ytphy_read_ext(phydev, 0xa001); + if ((val & (BIT(2) | BIT(1) | BIT(0))) == 0x0) { + switch (phydev->speed) { + case SPEED_2500: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + } } +#endif return 0; } -/** - * yt8521_get_features_paged() - read supported link modes for one page - * @phydev: a pointer to a &struct phy_device - * @page: The reg page(YT8521_RSSR_FIBER_SPACE/YT8521_RSSR_UTP_SPACE) to - * operate. - * - * returns 0 or negative errno code - */ -static int yt8521_get_features_paged(struct phy_device *phydev, int page) +#if (KERNEL_VERSION(5, 0, 21) < LINUX_VERSION_CODE) +static int yt8821_get_features(struct phy_device *phydev) { - int old_page; - int ret = 0; + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported, 1); + return genphy_read_abilities(phydev); +} +#endif - page &= YT8521_RSSR_SPACE_MASK; - old_page = phy_select_page(phydev, page); - if (old_page < 0) - goto err_restore_page; +static int ytxxxx_suspend(struct phy_device *phydev) +{ + int wol_enabled = 0; + int value = 0; - if (page == YT8521_RSSR_FIBER_SPACE) { - linkmode_zero(phydev->supported); - yt8521_prepare_fiber_features(phydev, phydev->supported); - } else { - ret = ytphy_utp_read_abilities(phydev); - if (ret < 0) - goto err_restore_page; +#if (YTPHY_WOL_FEATURE_ENABLE) + value = phy_read(phydev, YTPHY_UTP_INTR_REG); + wol_enabled = value & YTPHY_WOL_FEATURE_INTR; +#endif + + if (!wol_enabled) { + value = phy_read(phydev, MII_BMCR); + phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); } -err_restore_page: - return phy_restore_page(phydev, old_page, ret); + return 0; } -/** - * yt8521_get_features - switch reg space then call yt8521_get_features_paged - * @phydev: target phy_device struct - * - * returns 0 or negative errno code - */ -static int yt8521_get_features(struct phy_device *phydev) +static int ytxxxx_resume(struct phy_device *phydev) { - struct yt8521_priv *priv = phydev->priv; - int ret; + int value; - if (priv->reg_page != YT8521_RSSR_TO_BE_ARBITRATED) { - ret = yt8521_get_features_paged(phydev, priv->reg_page); - } else { - ret = yt8521_get_features_paged(phydev, - YT8521_RSSR_UTP_SPACE); - if (ret < 0) - return ret; + value = phy_read(phydev, MII_BMCR); + value &= ~BMCR_PDOWN; + value &= ~BMCR_ISOLATE; - /* add fiber's features to phydev->supported */ - yt8521_prepare_fiber_features(phydev, phydev->supported); - } - return ret; + phy_write(phydev, MII_BMCR, value); + + return 0; } -static struct phy_driver motorcomm_phy_drvs[] = { +static struct phy_driver ytphy_drvs[] = { { - PHY_ID_MATCH_EXACT(PHY_ID_YT8511), + .phy_id = PHY_ID_YT8010, + .name = "YT8010 100M Automotive Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8010_soft_reset, +#endif + .config_aneg = yt8010_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8010_aneg_done, +#endif + .config_init = yt8010_config_init, + .read_status = yt8010_read_status, + }, { + .phy_id = PHY_ID_YT8010AS, + .name = "YT8010AS 100M Automotive Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8010AS_soft_reset, +#endif + .config_aneg = yt8010_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8010_aneg_done, +#endif + .config_init = yt8010AS_config_init, + .read_status = yt8010_read_status, + }, { + .phy_id = PHY_ID_YT8011, + .name = "YT8011 Automotive Gigabit Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .probe = yt8011_probe, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8011_soft_reset, +#endif + .config_aneg = yt8011_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8011_aneg_done, +#endif + .config_init = yt8011_config_init, + .read_status = yt8011_read_status, + }, { + .phy_id = PHY_ID_YT8510, + .name = "YT8510 100M Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + .config_init = ytphy_config_init, +#else + .config_init = genphy_config_init, +#endif + .read_status = genphy_read_status, + }, { + .phy_id = PHY_ID_YT8511, .name = "YT8511 Gigabit Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if GMAC_CLOCK_INPUT_NEEDED .config_init = yt8511_config_init, +#else +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) || (KERNEL_VERSION(5, 3, 0) < LINUX_VERSION_CODE) + .config_init = ytphy_config_init, +#else + .config_init = genphy_config_init, +#endif +#endif + .read_status = genphy_read_status, .suspend = genphy_suspend, .resume = genphy_resume, - .read_page = yt8511_read_page, - .write_page = yt8511_write_page, - }, - { - PHY_ID_MATCH_EXACT(PHY_ID_YT8521), - .name = "YT8521 Gigabit Ethernet", - .get_features = yt8521_get_features, + }, { + .phy_id = PHY_ID_YT8512, + .name = "YT8512 100M Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif + .config_aneg = genphy_config_aneg, + .config_init = yt8512_config_init, + .probe = yt8512_probe, + .read_status = yt8512_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_YT8522, + .name = "YT8522 100M Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_BASIC_FEATURES, + .flags = PHY_POLL, + .probe = yt8522_probe, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif + .config_aneg = genphy_config_aneg, + .config_init = yt8522_config_init, + .read_status = yt8522_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_YT8521, + .name = "YT8521 Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, .probe = yt8521_probe, - .read_page = yt8521_read_page, - .write_page = yt8521_write_page, - .get_wol = ytphy_get_wol, - .set_wol = ytphy_set_wol, - .config_aneg = yt8521_config_aneg, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytxxxx_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) .aneg_done = yt8521_aneg_done, +#endif .config_init = yt8521_config_init, .read_status = yt8521_read_status, - .soft_reset = yt8521_soft_reset, .suspend = yt8521_suspend, .resume = yt8521_resume, - }, - { - PHY_ID_MATCH_EXACT(PHY_ID_YT8531), +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, { + /* same as 8521 */ + .phy_id = PHY_ID_YT8531S, + .name = "YT8531S Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .probe = yt8521_probe, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytxxxx_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8521_aneg_done, +#endif + .config_init = yt8531S_config_init, + .read_status = yt8521_read_status, + .suspend = yt8521_suspend, + .resume = yt8521_resume, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, { + /* same as 8511 */ + .phy_id = PHY_ID_YT8531, .name = "YT8531 Gigabit Ethernet", - .probe = yt8531_probe, + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif .config_init = yt8531_config_init, + .read_status = genphy_read_status, +#if (KERNEL_VERSION(5, 0, 21) < LINUX_VERSION_CODE) + .link_change_notify = ytphy_link_change_notify, +#endif .suspend = genphy_suspend, .resume = genphy_resume, - .get_wol = ytphy_get_wol, - .set_wol = yt8531_set_wol, - .link_change_notify = yt8531_link_change_notify, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, +#ifdef YTPHY_YT8543_ENABLE + { + .phy_id = PHY_ID_YT8543, + .name = "YT8543 Dual Port Gigabit Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytphy_soft_reset, +#endif + .config_init = yt8543_config_init, + .read_status = yt8543_read_status, + .suspend = ytxxxx_suspend, + .resume = ytxxxx_resume, +#if (YTPHY_WOL_FEATURE_ENABLE) + .get_wol = &ytphy_wol_feature_get, + .set_wol = &ytphy_wol_feature_set, +#endif + }, +#endif + { + .phy_id = PHY_ID_YT8618, + .name = "YT8618 Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8618_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8618_aneg_done, +#endif + .config_init = yt8618_config_init, + .read_status = yt8618_read_status, + .suspend = yt8618_suspend, + .resume = yt8618_resume, }, { - PHY_ID_MATCH_EXACT(PHY_ID_YT8531S), - .name = "YT8531S Gigabit Ethernet", - .get_features = yt8521_get_features, - .probe = yt8521_probe, - .read_page = yt8521_read_page, - .write_page = yt8521_write_page, - .get_wol = ytphy_get_wol, - .set_wol = ytphy_set_wol, - .config_aneg = yt8521_config_aneg, - .aneg_done = yt8521_aneg_done, - .config_init = yt8521_config_init, - .read_status = yt8521_read_status, - .soft_reset = yt8521_soft_reset, - .suspend = yt8521_suspend, - .resume = yt8521_resume, + .phy_id = PHY_ID_YT8614, + .name = "YT8614 Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .probe = yt8614_probe, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8614_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8614_aneg_done, +#endif + .config_init = yt8614_config_init, + .read_status = yt8614_read_status, + .suspend = yt8614_suspend, + .resume = yt8614_resume, + }, + { + .phy_id = PHY_ID_YT8614Q, + .name = "YT8614Q Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, + .features = PHY_GBIT_FEATURES, + .flags = PHY_POLL, + .probe = yt8614Q_probe, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = yt8614Q_soft_reset, +#endif + .config_aneg = yt8614Q_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8614Q_aneg_done, +#endif + .config_init = yt8614Q_config_init, + .read_status = yt8614Q_read_status, + .suspend = yt8614Q_suspend, + .resume = yt8614Q_resume, }, + { + .phy_id = PHY_ID_YT8821, + .name = "YT8821 2.5Gbps Ethernet", + .phy_id_mask = MOTORCOMM_PHY_ID_MASK, +#if (KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE) + .features = PHY_GBIT_FEATURES, +#endif + .flags = PHY_POLL, +#if (KERNEL_VERSION(3, 15, 0) > LINUX_VERSION_CODE) +#else + .soft_reset = ytxxxx_soft_reset, +#endif + .config_aneg = genphy_config_aneg, +#if (KERNEL_VERSION(3, 14, 79) < LINUX_VERSION_CODE) + .aneg_done = yt8821_aneg_done, +#endif +#if (KERNEL_VERSION(5, 0, 21) < LINUX_VERSION_CODE) + .get_features = yt8821_get_features, +#endif + .config_init = yt8821_config_init, +#if (YTPHY_WOL_FEATURE_ENABLE) + .set_wol = &ytphy_wol_feature_set, + .get_wol = &ytphy_wol_feature_get, +#endif +#if (KERNEL_VERSION(6, 0, 19) < LINUX_VERSION_CODE) + .get_rate_matching = yt8821_get_rate_matching, +#endif + .read_status = yt8821_read_status, + .suspend = ytxxxx_suspend, + .resume = ytxxxx_resume, + }, +}; + +static ssize_t phydrv_ver(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) { + char buf[64]; + int len; + + len = snprintf(buf, sizeof(buf), "Driver Version: %s\n", + YTPHY_LINUX_VERSION); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +/* File operations */ +static const struct file_operations ver_fops = { + .read = phydrv_ver, }; -module_phy_driver(motorcomm_phy_drvs); +#if (KERNEL_VERSION(4, 0, 0) > LINUX_VERSION_CODE) +static int ytphy_drivers_register(struct phy_driver *phy_drvs, int size) +{ + int i, j; + int ret; + + for (i = 0; i < size; i++) { + ret = phy_driver_register(&phy_drvs[i]); + if (ret) + goto err; + } + + return 0; + +err: + for (j = 0; j < i; j++) + phy_driver_unregister(&phy_drvs[j]); + + return ret; +} + +static void ytphy_drivers_unregister(struct phy_driver *phy_drvs, int size) +{ + int i; + + for (i = 0; i < size; i++) + phy_driver_unregister(&phy_drvs[i]); +} + +static int __init ytphy_init(void) +{ + /* Create debugfs directory */ + dir = debugfs_create_dir(MODULE_NAME, NULL); + if (!dir) { + pr_err(MODULE_NAME ": Failed to create debugfs directory\n"); + return -ENOMEM; + } + + /* Create debugfs file */ + if (!debugfs_create_file(PHYDRV_VER, 0444, dir, NULL, &ver_fops)) { + pr_err(MODULE_NAME ": Failed to create debugfs file\n"); + debugfs_remove(dir); + return -ENOMEM; + } + + return ytphy_drivers_register(ytphy_drvs, ARRAY_SIZE(ytphy_drvs)); +} + +static void __exit ytphy_exit(void) +{ + debugfs_remove_recursive(dir); + pr_info(MODULE_NAME ": Module unloaded\n"); + ytphy_drivers_unregister(ytphy_drvs, ARRAY_SIZE(ytphy_drvs)); +} + +module_init(ytphy_init); +module_exit(ytphy_exit); +#else +/* for linux 4.x ~ */ +/* module_phy_driver(ytphy_drvs);*/ +static int __init phy_module_init(void) +{ + dir = debugfs_create_dir(MODULE_NAME, NULL); + if (!dir) { + pr_err(MODULE_NAME ": Failed to create debugfs directory\n"); + return -ENOMEM; + } + + /* Create debugfs file */ + if (!debugfs_create_file(PHYDRV_VER, 0444, dir, NULL, &ver_fops)) { + pr_err(MODULE_NAME ": Failed to create debugfs file\n"); + debugfs_remove(dir); + return -ENOMEM; + } + + return phy_drivers_register(ytphy_drvs, ARRAY_SIZE(ytphy_drvs), + THIS_MODULE); +} +static void __exit phy_module_exit(void) +{ + debugfs_remove_recursive(dir); + pr_info(MODULE_NAME ": Module unloaded\n"); -MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver"); -MODULE_AUTHOR("Peter Geis"); -MODULE_AUTHOR("Frank"); + phy_drivers_unregister(ytphy_drvs, ARRAY_SIZE(ytphy_drvs)); +} + +module_init(phy_module_init); +module_exit(phy_module_exit); +#endif + +MODULE_DESCRIPTION("Motorcomm PHY driver"); +MODULE_VERSION(YTPHY_LINUX_VERSION); /* for modinfo xxxx.ko in userspace */ +MODULE_AUTHOR("Leilei Zhao"); MODULE_LICENSE("GPL"); -static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = { - { PHY_ID_MATCH_EXACT(PHY_ID_YT8511) }, - { PHY_ID_MATCH_EXACT(PHY_ID_YT8521) }, - { PHY_ID_MATCH_EXACT(PHY_ID_YT8531) }, - { PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) }, - { /* sentinel */ } +static struct mdio_device_id __maybe_unused motorcomm_tbl[] = { + { PHY_ID_YT8010, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8010AS, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8510, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8511, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8512, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8522, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8521, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8531S, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8531, MOTORCOMM_PHY_ID_MASK }, +#ifdef YTPHY_YT8543_ENABLE + { PHY_ID_YT8543, MOTORCOMM_PHY_ID_MASK }, +#endif + { PHY_ID_YT8614, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8614Q, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8618, MOTORCOMM_PHY_ID_MASK }, + { PHY_ID_YT8821, MOTORCOMM_PHY_ID_MASK }, + { } }; MODULE_DEVICE_TABLE(mdio, motorcomm_tbl); + diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b701969cf1c2aa807bb598b09dfabbb260dc991f..5f44997f5c19848364924ca2c8fe188a9155253c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3000,6 +3000,12 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, "platform quirk: setting simple suspend\n"); quirks |= NVME_QUIRK_SIMPLE_SUSPEND; } + +#ifdef CONFIG_ARCH_PHYTIUM + if (read_cpuid_implementor() == ARM_CPU_IMP_PHYTIUM) + quirks |= NVME_QUIRK_SIMPLE_SUSPEND; +#endif + ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); if (ret) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 93f2f4dcf6d69655678e325295acf0e7f44e20c5..00d8fbbc525a3daa4b4cd0919fb24d07693936cc 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -33,6 +33,9 @@ #include #include #include "pci.h" +#ifdef CONFIG_PSWIOTLB +#include +#endif DEFINE_MUTEX(pci_slot_mutex); @@ -4531,6 +4534,15 @@ void __weak pcibios_set_master(struct pci_dev *dev) */ void pci_set_master(struct pci_dev *dev) { +#ifdef CONFIG_PSWIOTLB + if ((pswiotlb_force_disable != true) && + is_phytium_ps_socs()) { + dev->dev.can_use_pswiotlb = pswiotlb_is_dev_in_passthroughlist(dev); + dev_info(&dev->dev, "The device %s use pswiotlb because vendor 0x%04x %s in pswiotlb passthroughlist\n", + dev->dev.can_use_pswiotlb ? "would" : "would NOT", + dev->vendor, dev->dev.can_use_pswiotlb ? "is NOT" : "is"); + } +#endif __pci_set_master(dev, true); pcibios_set_master(dev); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 43159965e09e932db82759ba5bfa64380357ba8f..68586cf5fc4c3ee1349ef87097bec77420f4b735 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -20,6 +20,9 @@ #include #include #include "pci.h" +#ifdef CONFIG_PSWIOTLB +#include +#endif #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 @@ -2552,6 +2555,13 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dma_set_max_seg_size(&dev->dev, 65536); dma_set_seg_boundary(&dev->dev, 0xffffffff); +#ifdef CONFIG_PSWIOTLB + if ((pswiotlb_force_disable != true) && + is_phytium_ps_socs()) { + pswiotlb_store_local_node(dev, bus); + dma_set_seg_boundary(&dev->dev, 0xffffffffffff); + } +#endif pcie_failed_link_retrain(dev); diff --git a/drivers/perf/phytium/Kconfig b/drivers/perf/phytium/Kconfig index e9dabba7227eb47e4cda5bf935d8868d1eb84a91..9b5b6a331edc82ef1faebc9bb66618598d735a69 100644 --- a/drivers/perf/phytium/Kconfig +++ b/drivers/perf/phytium/Kconfig @@ -7,6 +7,14 @@ menuconfig PHYTIUM_PMU if PHYTIUM_PMU +config PHYT_DMU_PMU_PD2408 + tristate "Phytium PD2408 SoC DDR PMU driver" + depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST + default m + help + Provides support for Phytium PD2408 SoC DDR Controller performance + monitoring unit (PMU). + config PHYT_DDR_PMU tristate "Phytium SoC DDR PMU driver" depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST diff --git a/drivers/perf/phytium/Makefile b/drivers/perf/phytium/Makefile index af37afc6920c57169c16c053eb3ceb70dff064d2..1f9f1e5bf47d4d30e9b9da9056a57eee65c0fd5e 100644 --- a/drivers/perf/phytium/Makefile +++ b/drivers/perf/phytium/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # +obj-$(CONFIG_PHYT_DMU_PMU_PD2408) += phytium_dmu_pmu_pd2408.o obj-$(CONFIG_PHYT_DDR_PMU) += phytium_ddr_pmu.o obj-$(CONFIG_PHYT_PCIE_PMU) += phytium_pcie_pmu.o diff --git a/drivers/perf/phytium/phytium_dmu_pmu_pd2408.c b/drivers/perf/phytium/phytium_dmu_pmu_pd2408.c new file mode 100644 index 0000000000000000000000000000000000000000..df873b018c0e4897a18f40ac7285298a253e2b73 --- /dev/null +++ b/drivers/perf/phytium/phytium_dmu_pmu_pd2408.c @@ -0,0 +1,708 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium PD2408 DMU performance monitoring unit support + * + * Copyright (c) 2025, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "pd2408_dmu_pmu: " fmt + +#define DMU_PERF_DRIVER_VERSION "1.0.0" + +#define DMU_PMU_TIMER_START 0x0 +#define DMU_PMU_TIMER_STOP 0x4 +#define DMU_PMU_CLEAR_EVENT 0x8 +#define DMU_PMU_SET_TIMER_L 0xc +#define DMU_PMU_SET_TIMER_H 0x10 +#define DMU_PMU_AXI_MONITOR_EN 0x1c +#define DMU_PMU_TIMER_INT_CLEAR 0x2c +#define DMU_PMU_AXI_MONITOR_INT_CLEAR 0x30 +#define DMU_PMU_TIMER_INT_STA 0x40 +#define DMU_PMU_AXI_MONITOR_INT_STA 0x44 +#define DMU_PMU_TIMER_INT_MASK 0x54 +#define DMU_PMU_AXI_MONITOR_INT_MASK 0x58 + +#define DMU_PMU_EVENT_CYCLES 0x208 + +#define DMU_PMU_EVENT_AXI_READ_CMD_CNT 0x074 +#define DMU_PMU_EVENT_AXI_WRITE_CMD_CNT 0x07c +#define DMU_PMU_EVENT_AXI_READ_FLUX_CNT 0x084 +#define DMU_PMU_EVENT_AXI_WRITE_FLUX_CNT 0x08c + +#define DMU_PMU_MAX_COUNTERS 5 +#define DMU_PMU_MAX_COUNTERS_TIMER 1 +#define DMU_PMU_MAX_COUNTERS_AXI 4 + +#define ALL_EVENT_CLEAR_BIT 0x1 +#define DMU_PMU_TIMER_OPT_BIT 0x1 +#define AXI_MONITOR_OPT_BIT 0x01010101 + +#define DMU_PMU_NOTICE_START 0x0 +#define DMU_PMU_NOTICE_STOP 0x1 + +#define to_pd2408_dmu_pmu(p) (container_of(p, struct pd2408_dmu_pmu, pmu)) + +#define GET_DMU_EVENTID(hwc) (hwc->config_base & 0x7) +#define EVENT_VALID(idx) ((idx >= 0) && (idx < DMU_PMU_MAX_COUNTERS)) + +static int pd2408_dmu_pmu_hp_state; +int used_event; + +struct pd2408_dmu_pmu_hwevents { + struct perf_event *hw_events[DMU_PMU_MAX_COUNTERS]; + DECLARE_BITMAP(used_event_mask, DMU_PMU_MAX_COUNTERS); +}; + +struct pd2408_dmu_pmu { + struct device *dev; + void __iomem *base; + struct pmu pmu; + struct pd2408_dmu_pmu_hwevents pmu_events; + struct hlist_node node; + int on_cpu; + int irq; + u32 soc_version; + u32 dmu_id; + bool used_flag; +}; + +static const u32 dmu_counter_reg_offset[] = { + DMU_PMU_EVENT_CYCLES, + DMU_PMU_EVENT_AXI_WRITE_FLUX_CNT, DMU_PMU_EVENT_AXI_READ_FLUX_CNT, + DMU_PMU_EVENT_AXI_WRITE_CMD_CNT, DMU_PMU_EVENT_AXI_READ_CMD_CNT +}; + +ssize_t pd2408_dmu_pmu_format_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +ssize_t pd2408_dmu_pmu_event_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct pd2408_dmu_pmu *dmu_pmu = + to_pd2408_dmu_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(dmu_pmu->on_cpu)); +} + +#define PHYTIUM_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]){ \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } })[0] \ + .attr.attr) + +#define PHYTIUM_DMU_PMU_FORMAT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, pd2408_dmu_pmu_format_sysfs_show, \ + (void *)_config) + +#define PHYTIUM_DMU_PMU_EVENT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, pd2408_dmu_pmu_event_sysfs_show, \ + (unsigned long)_config) + +static struct attribute *pd2408_dmu_pmu_format_attr[] = { + PHYTIUM_DMU_PMU_FORMAT_ATTR(event, "config:0-2"), + NULL, +}; + +static const struct attribute_group pd2408_dmu_pmu_format_group = { + .name = "format", + .attrs = pd2408_dmu_pmu_format_attr, +}; + +static struct attribute *pd2408_dmu_pmu_events_attr[] = { + PHYTIUM_DMU_PMU_EVENT_ATTR(cycles, 0x00), + PHYTIUM_DMU_PMU_EVENT_ATTR(axi_write_flux, 0x01), + PHYTIUM_DMU_PMU_EVENT_ATTR(axi_read_flux, 0x02), + PHYTIUM_DMU_PMU_EVENT_ATTR(axi_write_cmd, 0x03), + PHYTIUM_DMU_PMU_EVENT_ATTR(axi_read_cmd, 0x04), + NULL, +}; + +static const struct attribute_group pd2408_dmu_pmu_events_group = { + .name = "events", + .attrs = pd2408_dmu_pmu_events_attr, +}; + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *pd2408_dmu_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group pd2408_dmu_pmu_cpumask_attr_group = { + .attrs = pd2408_dmu_pmu_cpumask_attrs, +}; + +static const struct attribute_group *pd2408_dmu_pmu_attr_groups[] = { + &pd2408_dmu_pmu_format_group, + &pd2408_dmu_pmu_events_group, + &pd2408_dmu_pmu_cpumask_attr_group, + NULL, +}; + +#if IS_ENABLED(CONFIG_ARM_PHYTIUM_DMU_DEVFREQ) +extern struct blocking_notifier_head dmu_pmu_notifier_chain; + +void pd2408_dmu_pmu_notifier_chain_trigger(struct pd2408_dmu_pmu *dmu_pmu, int event) +{ + static bool start_flag; + + if ((event == DMU_PMU_NOTICE_START) && (start_flag == false)) { + blocking_notifier_call_chain(&dmu_pmu_notifier_chain, event, NULL); + start_flag = true; + dmu_pmu->used_flag = true; + } else if ((event == DMU_PMU_NOTICE_STOP) && (start_flag == true)) { + blocking_notifier_call_chain(&dmu_pmu_notifier_chain, event, NULL); + start_flag = false; + dmu_pmu->used_flag = false; + } +} +#endif + +static u64 pd2408_dmu_pmu_read_counter(struct pd2408_dmu_pmu *dmu_pmu, + struct hw_perf_event *hwc) +{ + u32 val32_l, val32_h, idx, counter_offset; + u64 val64; + + idx = GET_DMU_EVENTID(hwc); + counter_offset = dmu_counter_reg_offset[idx]; + + if (!EVENT_VALID(idx)) { + dev_err(dmu_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + val32_l = readl(dmu_pmu->base + counter_offset); + val32_h = readl(dmu_pmu->base + counter_offset + 4); + val64 = (u64)val32_h << 32 | (u64)val32_l; + + return val64; +} + +static void pd2408_dmu_pmu_clear_all_counters(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(BIT(0), dmu_pmu->base + DMU_PMU_CLEAR_EVENT); +} + +static void pd2408_dmu_pmu_disable_axi_cmd_events(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(0x0, dmu_pmu->base + DMU_PMU_AXI_MONITOR_EN); +} + +static void pd2408_dmu_pmu_mask_all_irq(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(DMU_PMU_TIMER_OPT_BIT, dmu_pmu->base + DMU_PMU_TIMER_INT_MASK); + writel(AXI_MONITOR_OPT_BIT, dmu_pmu->base + DMU_PMU_AXI_MONITOR_INT_MASK); +} + +static void pd2408_dmu_pmu_start_all_counters(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(BIT(0), dmu_pmu->base + DMU_PMU_TIMER_START); +} + +static void pd2408_dmu_pmu_stop_all_counters(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(BIT(0), dmu_pmu->base + DMU_PMU_TIMER_STOP); +} + +static void pd2408_dmu_pmu_reset_timer(struct pd2408_dmu_pmu *dmu_pmu) +{ + writel(0xFFFFFFFF, dmu_pmu->base + DMU_PMU_SET_TIMER_L); + writel(0xFFFFFFFF, dmu_pmu->base + DMU_PMU_SET_TIMER_H); +} + +static void pd2408_dmu_pmu_enable_events(struct pd2408_dmu_pmu *dmu_pmu, int idx) +{ + u8 en_bit; + u32 en_offset, irq_offset, val; + + if (idx == 0) { + en_bit = 0; + en_offset = 0; + irq_offset = DMU_PMU_TIMER_INT_MASK; + } else { + en_bit = (idx - 1) * 8; + en_offset = DMU_PMU_AXI_MONITOR_EN; + irq_offset = DMU_PMU_AXI_MONITOR_INT_MASK; + } + + if (en_offset) { + val = readl(dmu_pmu->base + en_offset); + val |= BIT(en_bit); + writel(val, dmu_pmu->base + en_offset); + } + + val = readl(dmu_pmu->base + irq_offset); + val &= ~BIT(en_bit); + writel(val, dmu_pmu->base + irq_offset); +} + +static int pd2408_dmu_pmu_mark_event(struct perf_event *event) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long *used_mask = dmu_pmu->pmu_events.used_event_mask; + + int idx = GET_DMU_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void pd2408_dmu_pmu_unmark_event(struct pd2408_dmu_pmu *dmu_pmu, + int idx) +{ + if (!EVENT_VALID(idx)) { + dev_err(dmu_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, dmu_pmu->pmu_events.used_event_mask); +} + +int pd2408_dmu_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct pd2408_dmu_pmu *dmu_pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + dmu_pmu = to_pd2408_dmu_pmu(event->pmu); + + if (event->cpu < 0) { + dev_warn(dmu_pmu->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + if (event->attr.config > DMU_PMU_MAX_COUNTERS) + return -EINVAL; + + if (dmu_pmu->on_cpu == -1) + return -EINVAL; + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + event->cpu = dmu_pmu->on_cpu; + used_event = 0; + + return 0; +} + +void pd2408_dmu_pmu_event_update(struct perf_event *event) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta; + + pd2408_dmu_pmu_stop_all_counters(dmu_pmu); + delta = pd2408_dmu_pmu_read_counter(dmu_pmu, hwc); + local64_add(delta, &event->count); + +} + +void pd2408_dmu_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state = 0; + perf_event_update_userpage(event); +} + +void pd2408_dmu_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_UPDATE) + pd2408_dmu_pmu_event_update(event); +} + +int pd2408_dmu_pmu_event_add(struct perf_event *event, int flags) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state |= PERF_HES_STOPPED; + + idx = pd2408_dmu_pmu_mark_event(event); + if (!EVENT_VALID(idx)) { + dev_err(dmu_pmu->dev, "Unsupported event index:%d!\n", idx); + return idx; + } +#if IS_ENABLED(CONFIG_ARM_PHYTIUM_DMU_DEVFREQ) + pd2408_dmu_pmu_notifier_chain_trigger(dmu_pmu, DMU_PMU_NOTICE_START); +#endif + event->hw.idx = idx; + dmu_pmu->pmu_events.hw_events[idx] = event; + + pd2408_dmu_pmu_enable_events(dmu_pmu, idx); + used_event += 1; + return 0; +} + +void pd2408_dmu_pmu_event_del(struct perf_event *event, int flags) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + used_event -= 1; + pd2408_dmu_pmu_event_stop(event, PERF_EF_UPDATE); + + pd2408_dmu_pmu_unmark_event(dmu_pmu, hwc->idx); + + perf_event_update_userpage(event); + dmu_pmu->pmu_events.hw_events[hwc->idx] = NULL; +#if IS_ENABLED(CONFIG_ARM_PHYTIUM_DMU_DEVFREQ) + if (used_event == 0) + pd2408_dmu_pmu_notifier_chain_trigger(dmu_pmu, DMU_PMU_NOTICE_STOP); +#endif + +} + +void pd2408_dmu_pmu_enable(struct pmu *pmu) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(pmu); + int event_added; + + event_added = bitmap_weight(dmu_pmu->pmu_events.used_event_mask, + DMU_PMU_MAX_COUNTERS); + + if (event_added) { + pd2408_dmu_pmu_stop_all_counters(dmu_pmu); + pd2408_dmu_pmu_clear_all_counters(dmu_pmu); + pd2408_dmu_pmu_reset_timer(dmu_pmu); + pd2408_dmu_pmu_start_all_counters(dmu_pmu); + } +} + +void pd2408_dmu_pmu_disable(struct pmu *pmu) +{ + struct pd2408_dmu_pmu *dmu_pmu = to_pd2408_dmu_pmu(pmu); + int event_added; + + event_added = bitmap_weight(dmu_pmu->pmu_events.used_event_mask, + DMU_PMU_MAX_COUNTERS); + if (event_added && dmu_pmu->used_flag) { + pd2408_dmu_pmu_mask_all_irq(dmu_pmu); + pd2408_dmu_pmu_disable_axi_cmd_events(dmu_pmu); + } +} + +static const struct acpi_device_id pd2408_dmu_pmu_acpi_match[] = { + { "PHYT0069", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, pd2408_dmu_pmu_acpi_match); + +static irqreturn_t pd2408_dmu_pmu_overflow_handler(int irq, void *dev_id) +{ + struct pd2408_dmu_pmu *dmu_pmu = dev_id; + struct perf_event *event; + int idx; + unsigned long *used_mask = dmu_pmu->pmu_events.used_event_mask; + u32 timer_int_sta, axi_int_sta; + + timer_int_sta = readl(dmu_pmu->base + DMU_PMU_TIMER_INT_STA); + axi_int_sta = readl(dmu_pmu->base + DMU_PMU_AXI_MONITOR_INT_STA); + + if ((timer_int_sta + axi_int_sta) == 0) + return IRQ_NONE; + + if (timer_int_sta) + writel(0x1, dmu_pmu->base + DMU_PMU_TIMER_INT_CLEAR); + + if (axi_int_sta) + writel(axi_int_sta, dmu_pmu->base + DMU_PMU_AXI_MONITOR_INT_CLEAR); + + if (!dmu_pmu->used_flag) { + pd2408_dmu_pmu_mask_all_irq(dmu_pmu); + return IRQ_HANDLED; + } + + for_each_set_bit(idx, used_mask, DMU_PMU_MAX_COUNTERS) { + event = dmu_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + pd2408_dmu_pmu_event_update(event); + } + writel(ALL_EVENT_CLEAR_BIT, dmu_pmu->base + DMU_PMU_CLEAR_EVENT); + pd2408_dmu_pmu_start_all_counters(dmu_pmu); + + return IRQ_HANDLED; +} + +static int pd2408_dmu_pmu_init_irq(struct pd2408_dmu_pmu *dmu_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, + pd2408_dmu_pmu_overflow_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), dmu_pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Fail to request IRQ:%d ret:%d\n", irq, + ret); + return ret; + } + + dmu_pmu->irq = irq; + + return 0; +} + +static int pd2408_dmu_pmu_init_data(struct platform_device *pdev, + struct pd2408_dmu_pmu *dmu_pmu) +{ + struct resource *res; + + if (device_property_read_u32(&pdev->dev, "phytium,ddr-id", + &dmu_pmu->dmu_id)) { + dev_err(&pdev->dev, "Can not read phytium,ddr-id!\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmu_pmu->base = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(dmu_pmu->base)) { + dev_err(&pdev->dev, + "ioremap failed for dmu_pmu base resource\n"); + return PTR_ERR(dmu_pmu->base); + } + + dmu_pmu->used_flag = true; + pd2408_dmu_pmu_mask_all_irq(dmu_pmu); + + return 0; +} + +static int pd2408_dmu_pmu_dev_probe(struct platform_device *pdev, + struct pd2408_dmu_pmu *dmu_pmu) +{ + int ret; + + ret = pd2408_dmu_pmu_init_data(pdev, dmu_pmu); + if (ret) + return ret; + + ret = pd2408_dmu_pmu_init_irq(dmu_pmu, pdev); + if (ret) + return ret; + + dmu_pmu->dev = &pdev->dev; + dmu_pmu->on_cpu = -1; + + return 0; +} + +static int pd2408_dmu_pmu_probe(struct platform_device *pdev) +{ + struct pd2408_dmu_pmu *dmu_pmu; + char *name; + int ret; + + dmu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dmu_pmu), GFP_KERNEL); + if (!dmu_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, dmu_pmu); + + ret = pd2408_dmu_pmu_dev_probe(pdev, dmu_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(pd2408_dmu_pmu_hp_state, + &dmu_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "phyt_dmu%u_pmu", dmu_pmu->dmu_id); + dmu_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = pd2408_dmu_pmu_event_init, + .pmu_enable = pd2408_dmu_pmu_enable, + .pmu_disable = pd2408_dmu_pmu_disable, + .add = pd2408_dmu_pmu_event_add, + .del = pd2408_dmu_pmu_event_del, + .start = pd2408_dmu_pmu_event_start, + .stop = pd2408_dmu_pmu_event_stop, + .read = pd2408_dmu_pmu_event_update, + .attr_groups = pd2408_dmu_pmu_attr_groups, + }; + + ret = perf_pmu_register(&dmu_pmu->pmu, name, -1); + if (ret) { + dev_err(dmu_pmu->dev, "DMU PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls(pd2408_dmu_pmu_hp_state, + &dmu_pmu->node); + } + + pr_info("die_dmu%d_pmu on cpu%d.\n", dmu_pmu->dmu_id, dmu_pmu->on_cpu); + + return ret; +} + +static int pd2408_dmu_pmu_remove(struct platform_device *pdev) +{ + struct pd2408_dmu_pmu *dmu_pmu = platform_get_drvdata(pdev); + + pd2408_dmu_pmu_mask_all_irq(dmu_pmu); + perf_pmu_unregister(&dmu_pmu->pmu); + cpuhp_state_remove_instance_nocalls(pd2408_dmu_pmu_hp_state, + &dmu_pmu->node); + + return 0; +} + +static struct platform_driver pd2408_dmu_pmu_driver = { + .driver = { + .name = "pd2408_dmu_pmu", + .acpi_match_table = ACPI_PTR(pd2408_dmu_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = pd2408_dmu_pmu_probe, + .remove = pd2408_dmu_pmu_remove, +}; + +int pd2408_dmu_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct pd2408_dmu_pmu *dmu_pmu = + hlist_entry_safe(node, struct pd2408_dmu_pmu, node); + + if (dmu_pmu->on_cpu != -1) { + if (!cpumask_test_cpu(dmu_pmu->on_cpu, cpu_online_mask)) { + perf_pmu_migrate_context(&dmu_pmu->pmu, dmu_pmu->on_cpu, cpu); + dmu_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(dmu_pmu->irq, cpumask_of(cpu))); + } + return 0; + } + + dmu_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(dmu_pmu->irq, cpumask_of(cpu))); + + return 0; +} + +int pd2408_dmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct pd2408_dmu_pmu *dmu_pmu = + hlist_entry_safe(node, struct pd2408_dmu_pmu, node); + unsigned int target; + + if (dmu_pmu->on_cpu != cpu) + return 0; + + target = cpumask_last(cpu_online_mask); + + if (target >= nr_cpu_ids) { + dev_err(dmu_pmu->dev, "offline cpu%d with no target to migrate.\n", + cpu); + return 0; + } + + perf_pmu_migrate_context(&dmu_pmu->pmu, cpu, target); + WARN_ON(irq_set_affinity_hint(dmu_pmu->irq, cpumask_of(target))); + dmu_pmu->on_cpu = target; + + return 0; +} + +static int __init pd2408_dmu_pmu_module_init(void) +{ + int ret; + + pd2408_dmu_pmu_hp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/phytium/dmupmu:online", + pd2408_dmu_pmu_online_cpu, pd2408_dmu_pmu_offline_cpu); + if (pd2408_dmu_pmu_hp_state < 0) { + pr_err("DMU PMU: setup hotplug, pd2408_dmu_pmu_hp_state = %d\n", + pd2408_dmu_pmu_hp_state); + return pd2408_dmu_pmu_hp_state; + } + + ret = platform_driver_register(&pd2408_dmu_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + pd2408_dmu_pmu_hp_state); + + return ret; +} +module_init(pd2408_dmu_pmu_module_init); + +static void __exit pd2408_dmu_pmu_module_exit(void) +{ + platform_driver_unregister(&pd2408_dmu_pmu_driver); + cpuhp_remove_multi_state(pd2408_dmu_pmu_hp_state); +} +module_exit(pd2408_dmu_pmu_module_exit); + +MODULE_DESCRIPTION("Phytium DMU PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DMU_PERF_DRIVER_VERSION); +MODULE_AUTHOR("Hu Xianghua "); +MODULE_AUTHOR("Tan Rui "); + diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index ccd59ddd76100a51d56beec797d00bfa8156440b..bb294718b05220b054c1bf8e8c3752906a1461cd 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -292,6 +292,14 @@ config RESET_TN48M_CPLD This driver can also be built as a module. If so, the module will be called reset-tn48m. +config RESET_PHYTIUM + bool "PHYTIUM Reset Driver" + default ARCH_PHYTIUM + help + This enables the reset driver for phytium SoCs. If you wish + to use the reset framework for such memory-mapped devices, + say Y here. Otherwise, say N. + config RESET_UNIPHIER tristate "Reset controller driver for UniPhier SoCs" depends on ARCH_UNIPHIER || COMPILE_TEST diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 8270da8a4baa62a3677eaf50cbd32ffa4c9450f2..86278d65faa3bd5f91a1dc17558030a68c82cdfb 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o +obj-$(CONFIG_RESET_PHYTIUM) += reset-phytium.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o diff --git a/drivers/reset/reset-phytium.c b/drivers/reset/reset-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..ff933e0fd81da89ed3011e2a4c26ab86c361ea79 --- /dev/null +++ b/drivers/reset/reset-phytium.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium Reset Driver. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RESET_PHYTIUM_DRV_VERSION "1.0.0" + +#define PHYTIUM_RESET_MAX_CNTS (19) +#define PHYTIUM_I2C_RESET_ID0 (3) +#define PHYTIUM_RESET_OFFSET_0 (0) +#define PHYTIUM_RESET_OFFSET_1 (4) + +struct reset_phytium_dev { + void __iomem *base; + struct device *dev; + struct reset_controller_dev rcdev; +}; + +struct reset_reg_info { + u32 reg_offset; + u32 data; +}; +static struct reset_reg_info reset_mng[PHYTIUM_RESET_MAX_CNTS] = { + {PHYTIUM_RESET_OFFSET_0, (u32)BIT(28)}, + {PHYTIUM_RESET_OFFSET_0, (u32)BIT(29)}, + {PHYTIUM_RESET_OFFSET_0, (u32)BIT(30)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(16)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(17)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(18)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(19)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(20)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(21)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(22)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(23)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(24)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(25)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(26)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(27)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(28)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(29)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(30)}, + {PHYTIUM_RESET_OFFSET_1, (u32)BIT(31)} + }; + +static inline struct reset_phytium_dev * +to_reset_phytium_data(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct reset_phytium_dev, rcdev); +} + +static int reset_phytium_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct reset_phytium_dev *rdev = to_reset_phytium_data(rcdev); + u32 reg_val, reset_reg_val; + + if (id >= PHYTIUM_RESET_MAX_CNTS) { + dev_err(rdev->dev, "The reset id is out of range %ld\n", id); + return -EINVAL; + } + + reg_val = readl_relaxed(rdev->base + reset_mng[id].reg_offset); + reset_reg_val = reg_val; + reg_val &= ~reset_mng[id].data; + writel_relaxed(reg_val, rdev->base + reset_mng[id].reg_offset); + writel_relaxed(reset_reg_val, rdev->base + reset_mng[id].reg_offset); + + return 0; +} + +const struct reset_control_ops reset_phytium_ops = { + .reset = reset_phytium_reset, +}; +EXPORT_SYMBOL_GPL(reset_phytium_ops); + +static const struct of_device_id reset_phytium_dt_ids[] = { + { .compatible = "phytium,reset", }, + { /* sentinel */ }, +}; + +static int reset_phytium_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_phytium_dev *rdev; + struct resource *res; + + rdev = devm_kzalloc(dev, sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rdev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(rdev->base)) + return PTR_ERR(rdev->base); + + rdev->rcdev.owner = THIS_MODULE; + rdev->rcdev.nr_resets = PHYTIUM_RESET_MAX_CNTS; + rdev->rcdev.ops = &reset_phytium_ops; + rdev->rcdev.of_node = dev->of_node; + rdev->dev = &pdev->dev; + + return devm_reset_controller_register(dev, &rdev->rcdev); +} + +static struct platform_driver reset_phytium_driver = { + .probe = reset_phytium_probe, + .driver = { + .name = "phytium-reset", + .of_match_table = reset_phytium_dt_ids, + }, +}; +builtin_platform_driver(reset_phytium_driver); + +MODULE_AUTHOR("Wu Jinyong "); +MODULE_DESCRIPTION("Phytium reset"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RESET_PHYTIUM_DRV_VERSION); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index d28c76be14a0408320272360ef49efac442cf75d..524228d03c2d4071d54e65b39e0c2281a9ba1d9b 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -823,6 +823,18 @@ config SPI_PHYTIUM_QSPI If unsure, say N. +config SPI_PHYTIUM_V2 + tristate "spi phytium v2" + depends on ARCH_PHYTIUM || COMPILE_TEST + help + This config is similar to the "SPI_PHYTIUM" config. + +config SPI_PHYTIUM_PLAT_V2 + tristate "Phytium SPI-v2 controller platform support" + select SPI_PHYTIUM_V2 + help + This config is similar to the "SPI_PHYTIUM_PLAT" config. + config SPI_PIC32 tristate "Microchip PIC32 series SPI" depends on MACH_PIC32 || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index cfaf68b2d39d98a1f006aeaf6b0dc1cf3a05fb56..cc16157b190bcc875d4b75c47db52bb2567a3819 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -104,6 +104,8 @@ obj-$(CONFIG_SPI_PCI1XXXX) += spi-pci1xxxx.o obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o +obj-$(CONFIG_SPI_PHYTIUM_V2) += spi-phytium-common.o spi-phytium-v2.o +obj-$(CONFIG_SPI_PHYTIUM_PLAT_V2) += spi-phytium-plat-v2.o obj-$(CONFIG_SPI_PHYTIUM_QSPI) += spi-phytium-qspi.o obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium-dma.o obj-$(CONFIG_SPI_PIC32) += spi-pic32.o diff --git a/drivers/spi/spi-phytium-common.c b/drivers/spi/spi-phytium-common.c new file mode 100644 index 0000000000000000000000000000000000000000..b5aabad1dd04e968d34c312195e958c35036c2f7 --- /dev/null +++ b/drivers/spi/spi-phytium-common.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2023-2024, Phytium Technology Co., Ltd.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +#define SPI_SHOW_MSG_DEBUG 0 + +void spi_phytium_show_msg(struct msg *info) +{ + if (SPI_SHOW_MSG_DEBUG) { + pr_err("module:0x%4x, cmd:0x%04x, sub:0x%04x\n", + info->seq, info->cmd_id, info->cmd_subid); + + pr_err("0x%02x 0x%02x 0x%02x 0x%02x", info->data[0], + info->data[1], info->data[2], info->data[3]); + + pr_err("0x%02x 0x%02x 0x%02x 0x%02x", info->data[4], + info->data[5], info->data[6], info->data[7]); + + pr_err("0x%02x 0x%02x 0x%02x 0x%02x", info->data[8], + info->data[9], info->data[10], info->data[11]); + + pr_err("0x%02x 0x%02x 0x%02x 0x%02x", info->data[12], + info->data[13], info->data[14], info->data[15]); + } +} + +void *memcpy_byte(void *_dest, const void *_src, size_t sz) +{ + while (sz >= 8) { + *(u64 *)_dest = *(u64 *)_src; + _dest += 8; + _src += 8; + sz -= 8; + } + + while (sz) { + *(u8 *)_dest = *(u8 *)_src; + _dest++; + _src++; + sz--; + } + + return _dest; +} + +int spi_phytium_print_status(struct phytium_spi *fts, u8 status0, + u8 status1) +{ + if (status1 == 0) + return 0; + + switch (status1) { + case 1: + pr_err("SPI Bus is busy.\n"); + break; + case 2: + pr_err("DMA queue transfer error.\n"); + break; + case 3: + pr_err("DMA transfer timeout.\n"); + break; + case 4: + pr_err("Operating flash timeout.\n"); + break; + case 5: + pr_err("spi_tx channel:DMA initialization failure.\n"); + break; + case 6: + pr_err("spi_rx channel:DMA initialization failure.\n"); + break; + case 7: + pr_err("DMA queue initialization failure.\n"); + break; + default: + pr_err("status=0x%x,Unknown error\n", status1); + break; + } + + if (fts->debug_enabled) + fts->handle_debug_err(fts); + + return -1; +} + +int spi_phytium_check_result(struct phytium_spi *fts) +{ + unsigned long long ms = 300000; + struct msg *msg = (struct msg *)fts->tx_shmem_addr; + + reinit_completion(&fts->cmd_completion); + ms = wait_for_completion_timeout(&fts->cmd_completion, msecs_to_jiffies(ms)); + + if (ms == 0) { + dev_err(&fts->master->dev, "SPI controller timed out\n"); + return -1; + } + + return spi_phytium_print_status(fts, msg->status0, msg->status1); +} + +int spi_phytium_set(struct phytium_spi *fts) +{ + int ret; + + spi_phytium_show_msg(fts->msg); + phytium_write_regfile(fts, SPI_REGFILE_AP2RV_INTR_STATE, 0x10); + ret = spi_phytium_check_result(fts); + + return ret; +} + +void spi_phytium_default(struct phytium_spi *fts) +{ + memset(fts->msg, 0, sizeof(struct msg)); + + fts->msg->cmd_id = PHYTSPI_MSG_CMD_DEFAULT; + + spi_phytium_show_msg(fts->msg); + phytium_write_regfile(fts, SPI_REGFILE_AP2RV_INTR_STATE, 0x10); + spi_phytium_check_result(fts); +} +EXPORT_SYMBOL_GPL(spi_phytium_default); + +void spi_phytium_set_subid(struct phytium_spi *fts, u16 sub_cmd) +{ + fts->msg->cmd_id = PHYTSPI_MSG_CMD_SET; + fts->msg->cmd_subid = sub_cmd; +} + +void spi_phytium_set_cmd8(struct phytium_spi *fts, u16 sub_cmd, + u8 data) +{ + memset(fts->msg, 0, sizeof(struct msg)); + spi_phytium_set_subid(fts, sub_cmd); + fts->msg->data[0] = data; + spi_phytium_show_msg(fts->msg); + phytium_write_regfile(fts, SPI_REGFILE_AP2RV_INTR_STATE, 0x10); + spi_phytium_check_result(fts); +} +EXPORT_SYMBOL_GPL(spi_phytium_set_cmd8); + +void spi_phytium_set_cmd16(struct phytium_spi *fts, u16 sub_cmd, + u16 data) +{ + u16 *cp_data = (u16 *)&fts->msg->data[0]; + + memset(fts->msg, 0, sizeof(struct msg)); + spi_phytium_set_subid(fts, sub_cmd); + *cp_data = data; + spi_phytium_show_msg(fts->msg); + phytium_write_regfile(fts, SPI_REGFILE_AP2RV_INTR_STATE, 0x10); + spi_phytium_check_result(fts); +} +EXPORT_SYMBOL_GPL(spi_phytium_set_cmd16); + +void spi_phytium_set_cmd32(struct phytium_spi *fts, u16 sub_cmd, + u32 data) +{ + u32 *cp_data = (u32 *)&fts->msg->data[0]; + + memset(fts->msg, 0, sizeof(struct msg)); + spi_phytium_set_subid(fts, sub_cmd); + *cp_data = data; + spi_phytium_show_msg(fts->msg); + phytium_write_regfile(fts, SPI_REGFILE_AP2RV_INTR_STATE, 0x10); + spi_phytium_check_result(fts); +} +EXPORT_SYMBOL_GPL(spi_phytium_set_cmd32); + +void spi_phytium_data_subid(struct phytium_spi *fts, u16 sub_cmd) +{ + fts->msg->cmd_id = PHYTSPI_MSG_CMD_DATA; + fts->msg->cmd_subid = sub_cmd; +} + +void spi_phytium_write_pre(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, u8 tmode, + u8 flags, u8 spi_write_flag) +{ + struct msg *msg; + u32 len; + u64 smem_tx; + u8 first = 1; + u64 tx_addr; + + len = min((u32)(fts->tx_end - fts->tx), (u32)SPI_TRANS_DATA_SIZE); + msg = (struct msg *)((u64)fts->msg + (sizeof(struct msg) + FLASH_PAGE_SIZE)*spi_write_flag); + memset(msg, 0, sizeof(struct msg)); + + msg->cmd_id = PHYTSPI_MSG_CMD_DATA; + if (spi_write_flag) { + if (len > 16 && fts->dma_get_ddrdata) + msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_FLASH_DMA_TX; + else + msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_FLASH_TX; + } else { + if (len > 16 && fts->dma_get_ddrdata) + msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_DMA_TX; + else + msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_TX; + } + + if (len > 16 && fts->dma_get_ddrdata) { + tx_addr = (u64)__virt_to_phys((u64)fts->tx); + if (!tx_addr) { + dev_err(&fts->master->dev, "tx address translation failed\n"); + return; + } + *(u64 *)&msg->data[0] = tx_addr; + } else { + smem_tx = (u64)msg + sizeof(struct msg); + memcpy((void *)smem_tx, fts->tx, len); + *(u64 *)&msg->data[0] = sizeof(struct msg); + } + + *(u32 *)&msg->data[8] = len; + fts->tx += len; + msg->data[12] = cs; + msg->data[13] = dfs; + msg->data[14] = mode; + msg->data[15] = tmode; + msg->data[16] = flags; + msg->data[17] = first; + first = 0; +} +EXPORT_SYMBOL_GPL(spi_phytium_write_pre); + +int spi_phytium_flash_erase(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags, u8 cmd) +{ + u32 len; + u64 smem_tx; + u8 first = 1; + u8 cmd_addr[8]; + int ret; + + len = (u32)(fts->tx_end - fts->tx); + + memset(fts->msg, 0, sizeof(struct msg)); + + fts->msg->cmd_id = PHYTSPI_MSG_CMD_DATA; + + if (cmd == SPINOR_OP_BE_4K || cmd == SPINOR_OP_CHIP_ERASE) + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_FLASH_ERASE; + else if (cmd == SPINOR_OP_READ || cmd == SPINOR_OP_READ_FAST || + cmd == SPINOR_OP_READ_4B || cmd == SPINOR_OP_READ_FAST_4B) + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_TX; + + smem_tx = (u64)fts->msg + sizeof(struct msg); + + cmd_addr[0] = cmd; + if (cmd == SPINOR_OP_BE_4K || cmd == SPINOR_OP_READ || + cmd == SPINOR_OP_READ_FAST || cmd == SPINOR_OP_READ_4B || + cmd == SPINOR_OP_READ_FAST_4B) { + memcpy_byte((void *)&cmd_addr[1], fts->tx, len); + memcpy_byte((void *)smem_tx, (void *)&cmd_addr[0], len + 1); + *(u32 *)&fts->msg->data[8] = len + 1; + } else if (cmd == SPINOR_OP_CHIP_ERASE) { + memcpy_byte((void *)smem_tx, (void *)&cmd_addr[0], 1); + *(u32 *)&fts->msg->data[8] = len; + } + + *(u32 *)&fts->msg->data[0] = sizeof(struct msg); + fts->tx += len; + fts->msg->data[12] = cs; + fts->msg->data[13] = dfs; + fts->msg->data[14] = mode; + fts->msg->data[15] = tmode; + fts->msg->data[16] = flags; + fts->msg->data[17] = first; + + ret = spi_phytium_set(fts); + if (ret) { + dev_err(&fts->master->dev, "AP <-> RV interaction failed\n"); + return ret; + } + return ret; +} +EXPORT_SYMBOL_GPL(spi_phytium_flash_erase); + +int spi_phytium_flash_write(struct phytium_spi *fts, u8 cmd) +{ + u8 cmd_addr[8] = {0}; + + cmd_addr[0] = fts->len + 1; + cmd_addr[1] = cmd; + memcpy_byte((void *)&cmd_addr[2], fts->tx, fts->len); + + fts->msg->data[18] = cmd_addr[0]; + fts->msg->data[19] = cmd_addr[1]; + fts->msg->data[20] = cmd_addr[2]; + fts->msg->data[21] = cmd_addr[3]; + fts->msg->data[22] = cmd_addr[4]; + fts->msg->data[23] = cmd_addr[5]; + + return 0; +} +EXPORT_SYMBOL_GPL(spi_phytium_flash_write); + +int spi_phytium_write(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags, u8 spi_write_flag) +{ + int ret; + u32 len; + u64 smem_tx; + u8 first = 1; + u64 tx_addr; + + if (spi_write_flag == 1) { + spi_phytium_set(fts); + if (ret) { + dev_err(&fts->master->dev, "AP <-> RV interaction failed\n"); + return ret; + } + } + + do { + len = min_t(u32, (u32)(fts->tx_end - fts->tx), + (u32)SPI_TRANS_DATA_SIZE); + + fts->msg->cmd_id = PHYTSPI_MSG_CMD_DATA; + + if (spi_write_flag == 2) { + if (len > 16 && fts->dma_get_ddrdata) + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_FLASH_DMA_TX; + else + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_FLASH_TX; + } else { + if (len > 16 && fts->dma_get_ddrdata) + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_DMA_TX; + else + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_TX; + } + + if (len > 16 && fts->dma_get_ddrdata) { + tx_addr = __virt_to_phys((u64)fts->tx); + if (!tx_addr) { + dev_err(&fts->master->dev, "tx address translation failed\n"); + return -1; + } + *(u64 *)&fts->msg->data[0] = tx_addr; + } else { + smem_tx = (u64)fts->msg + sizeof(struct msg); + memcpy_byte((void *)smem_tx, fts->tx, len); + *(u64 *)&fts->msg->data[0] = sizeof(struct msg); + } + + *(u32 *)&fts->msg->data[8] = len; + fts->tx += len; + fts->msg->data[12] = cs; + fts->msg->data[13] = dfs; + fts->msg->data[14] = mode; + fts->msg->data[15] = tmode; + fts->msg->data[16] = flags; + fts->msg->data[17] = first; + ret = spi_phytium_set(fts); + if (ret) { + dev_err(&fts->master->dev, "AP <-> RV interaction failed\n"); + return ret; + } + + first = 0; + } while (fts->tx_end > fts->tx); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_phytium_write); + +int spi_phytium_read(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags) +{ + int ret; + u32 len; + u64 smem_rx; + u8 first = 1; + u64 rx_addr; + + do { + if (fts->dma_get_ddrdata) + len = min_t(u32, (u32)(fts->rx_end - fts->rx), + (u32)(fts->rx_end - fts->rx)); + else + len = min_t(u32, (u32)(fts->rx_end - fts->rx), 128); + + fts->msg->cmd_id = PHYTSPI_MSG_CMD_DATA; + + smem_rx = (u64)fts->msg + sizeof(struct msg); + + if (len > 16 && fts->dma_get_ddrdata) { + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_DMA_RX; + rx_addr = __virt_to_phys((u64)fts->rx); + if (!rx_addr) { + dev_err(&fts->master->dev, "rx address translation failed\n"); + return -1; + } + *(u64 *)&fts->msg->data[0] = rx_addr; + } else { + fts->msg->cmd_subid = PHYTSPI_MSG_CMD_DATA_RX; + *(u64 *)&fts->msg->data[0] = sizeof(struct msg); + } + + *(u32 *)&fts->msg->data[8] = len; + fts->msg->data[12] = cs; + fts->msg->data[13] = dfs; + fts->msg->data[14] = mode; + fts->msg->data[15] = tmode; + if (fts->rx_end <= fts->rx + len) + fts->msg->data[16] = flags; + else if (first == 1) + fts->msg->data[16] = 1; + else + fts->msg->data[16] = 0; + fts->msg->data[17] = first; + ret = spi_phytium_set(fts); + if (ret) { + dev_err(&fts->master->dev, "AP <-> RV interaction failed\n"); + return ret; + } + if (len <= 16 || !fts->dma_get_ddrdata) + memcpy_byte(fts->rx, (void *)smem_rx, len); + + fts->rx += len; + first = 0; + } while (fts->rx_end > fts->rx); + + return ret; +} +EXPORT_SYMBOL_GPL(spi_phytium_read); + +MODULE_AUTHOR("Peng Min "); +MODULE_DESCRIPTION("Phytium SPI adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-phytium-plat-v2.c b/drivers/spi/spi-phytium-plat-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..9801406d61e9322847c5d59a2f407418606db7b1 --- /dev/null +++ b/drivers/spi/spi-phytium-plat-v2.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller platform driver. + * + * Copyright (c) 2023-2024, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-phytium.h" + +#define DRIVER_NAME_PHYT "phytium_spi_2.0" +#define DRIVER_VERSION "1.0.7" + +#define PHYTIUM_CPU_PART_FTC872 0x872 + +#define MIDR_PHYTIUM_FTC872 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_FTC872) + +static ssize_t debug_show(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct phytium_spi *fts = dev_get_drvdata(dev); + ssize_t ret; + u32 reg; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + ret = sprintf(buf, "%x\n", reg); + + return ret; +} + +static ssize_t debug_store(struct device *dev, + struct device_attribute *da, const char *buf, + size_t size) +{ + u8 loc, dis_en, status = 0; + char *p; + char *token; + long value; + u32 reg; + struct phytium_spi *fts = dev_get_drvdata(dev); + + dev_info(dev, "echo alive(1)/debug(0) enable(1)/disable(0) > debug\n"); + dev_info(dev, "Example:echo 0 1 > debug; Enable Debug Function\n"); + + p = kmalloc(size, GFP_KERNEL); + if (!p) + return -ENOMEM; + strscpy(p, buf, size); + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + loc = (u8)value; + + token = strsep(&p, " "); + if (!token) + return -EINVAL; + + status = kstrtol(token, 0, &value); + if (status) + return status; + dis_en = value; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + + if (loc == 1) { + if (dis_en == 1) { + fts->alive_enabled = true; + reg |= BIT(loc); + } else if (dis_en == 0) { + fts->alive_enabled = false; + reg &= ~BIT(loc); + } + fts->runtimes = 0; + } else if (loc == 0) { + if (dis_en == 1) { + fts->debug_enabled = true; + reg |= BIT(loc); + } else if (dis_en == 0) { + fts->debug_enabled = false; + reg &= ~BIT(loc); + } + } + + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, reg); + + kfree(p); + + return size; +} + +static DEVICE_ATTR_RW(debug); + +static struct attribute *spi_phyt_device_attrs[] = { + &dev_attr_debug.attr, + NULL, +}; + +static const struct attribute_group spi_phyt_device_group = { + .attrs = spi_phyt_device_attrs, +}; + +static int spi_phyt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phytium_spi *fts; + struct resource *regfile_mem, *share_mem; + int ret; + int num_cs; + int cs_gpio; + int global_cs = 0; + int i; + u32 clk_rate = SPI_DEFAULT_CLK; + + fts = devm_kzalloc(&pdev->dev, sizeof(struct phytium_spi), + GFP_KERNEL); + if (!fts) + return -ENOMEM; + + regfile_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regfile_mem) { + dev_err(&pdev->dev, "no regfile_mem resource?\n"); + return -EINVAL; + } + + fts->regfile = devm_ioremap_resource(&pdev->dev, regfile_mem); + if (IS_ERR(fts->regfile)) { + dev_err(&pdev->dev, "fts->regfile map failed\n"); + return PTR_ERR(fts->regfile); + } + + share_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!share_mem) { + dev_err(&pdev->dev, "no share_mem resource?\n"); + return -EINVAL; + } + + fts->mem_tx_physic = (u32)share_mem->start + sizeof(struct msg); + fts->mem_rx_physic = (u32)share_mem->start + sizeof(struct msg); + + fts->tx_shmem_addr = devm_ioremap_resource(&pdev->dev, share_mem); + if (IS_ERR(fts->tx_shmem_addr)) { + dev_err(&pdev->dev, "fts->tx_shmem_addr map failed\n"); + return PTR_ERR(fts->tx_shmem_addr); + } + + fts->msg = (struct msg *)fts->tx_shmem_addr; + + fts->mem_tx = (u64)fts->msg + sizeof(struct msg); + fts->mem_rx = (u64)fts->msg + sizeof(struct msg); + + fts->irq = platform_get_irq(pdev, 0); + if (fts->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return fts->irq; /* -ENXIO */ + } + + if (pdev->dev.of_node) { + fts->clk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(fts->clk)) + return PTR_ERR(fts->clk); + ret = clk_prepare_enable(fts->clk); + if (ret) + return ret; + + fts->max_freq = clk_get_rate(fts->clk); + } else if (has_acpi_companion(&pdev->dev)) { + fwnode_property_read_u32(dev->fwnode, "spi-clock", &clk_rate); + fts->max_freq = clk_rate; + } + + fts->bus_num = pdev->id; + device_property_read_u32(&pdev->dev, "reg-io-width", &fts->reg_io_width); + + num_cs = 4; + + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + + fts->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < fts->num_cs; i++) { + cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } else if (has_acpi_companion(&pdev->dev)) { + int n; + int *cs; + struct gpio_desc *gpiod; + + n = gpiod_count(&pdev->dev, "cs"); + + cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); + fts->cs = cs; + + for (i = 0; i < n; i++) { + gpiod = devm_gpiod_get_index_optional(&pdev->dev, "cs", i, + GPIOD_OUT_LOW); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out; + } + + if (gpiod) { + cs_gpio = desc_to_gpio(gpiod); + cs[i] = cs_gpio; + } else { + cs[i] = -ENOENT; + } + } + } + + device_property_read_u32(&pdev->dev, "global-cs", &global_cs); + fts->global_cs = global_cs; + + fts->dma_get_ddrdata = false; + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == MIDR_PHYTIUM_FTC872) + fts->dma_get_ddrdata = true; + + ret = spi_phyt_add_host(&pdev->dev, fts); + if (ret) + goto out; + + platform_set_drvdata(pdev, fts); + + if (sysfs_create_group(&pdev->dev.kobj, &spi_phyt_device_group)) + dev_warn(&pdev->dev, "failed create sysfs\n"); + + return 0; + +out: + clk_disable_unprepare(fts->clk); + return ret; +} + +static int spi_phyt_remove(struct platform_device *pdev) +{ + struct phytium_spi *fts = platform_get_drvdata(pdev); + + spi_phyt_remove_host(fts); + sysfs_remove_group(&pdev->dev.kobj, &spi_phyt_device_group); + clk_disable_unprepare(fts->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int spi_phyt_suspend(struct device *dev) +{ + struct phytium_spi *fts = dev_get_drvdata(dev); + + return spi_phyt_suspend_host(fts); +} + +static int spi_phyt_resume(struct device *dev) +{ + struct phytium_spi *fts = dev_get_drvdata(dev); + + return spi_phyt_resume_host(fts); +} +#endif + +static SIMPLE_DEV_PM_OPS(spi_phyt_pm_ops, spi_phyt_suspend, spi_phyt_resume); + +static const struct of_device_id spi_phyt_of_match[] = { + { .compatible = "phytium,spi-2.0", .data = (void *)0 }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, spi_phyt_of_match); + +static const struct acpi_device_id spi_phyt_acpi_match[] = { + {"PHYT0060", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, spi_phyt_acpi_match); + +static struct platform_driver spi_phyt_driver = { + .probe = spi_phyt_probe, + .remove = spi_phyt_remove, + .driver = { + .name = DRIVER_NAME_PHYT, + .of_match_table = of_match_ptr(spi_phyt_of_match), + .acpi_match_table = ACPI_PTR(spi_phyt_acpi_match), + .pm = &spi_phyt_pm_ops, + }, +}; +module_platform_driver(spi_phyt_driver); + +MODULE_AUTHOR("Peng Min "); +MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/spi/spi-phytium-qspi.c b/drivers/spi/spi-phytium-qspi.c index 745f0afc0ccce604e7967b5d1e3966aa3ea83c96..9e66a99d8a0e68dc5854c00e312051f24b492fe1 100644 --- a/drivers/spi/spi-phytium-qspi.c +++ b/drivers/spi/spi-phytium-qspi.c @@ -21,7 +21,7 @@ #include #include -#define DRIVER_VERSION "1.0.2" +#define DRIVER_VERSION "1.0.3" #define PHYTIUM_CPU_PART_FTC862 0x862 @@ -267,28 +267,28 @@ static int phytium_qspi_flash_capacity_encode_new(u32 size, switch (size) { case SZ_4M: - *cap |= (0x0 >> (4 * i)); + *cap |= (0x0 << (4 * i)); break; case SZ_8M: - *cap |= (0x1 >> (4 * i)); + *cap |= (0x1 << (4 * i)); break; case SZ_16M: - *cap |= (0x2 >> (4 * i)); + *cap |= (0x2 << (4 * i)); break; case SZ_32M: - *cap |= (0x3 >> (4 * i)); + *cap |= (0x3 << (4 * i)); break; case SZ_64M: - *cap |= (0x4 >> (4 * i)); + *cap |= (0x4 << (4 * i)); break; case SZ_128M: - *cap |= (0x5 >> (4 * i)); + *cap |= (0x5 << (4 * i)); break; case SZ_256M: - *cap |= (0x6 >> (4 * i)); + *cap |= (0x6 << (4 * i)); break; case SZ_512M: - *cap |= (0x7 >> (4 * i)); + *cap |= (0x7 << (4 * i)); break; default: ret = -EINVAL; @@ -447,8 +447,8 @@ static int phytium_qspi_exec_op(struct spi_mem *mem, if (op->dummy.nbytes) { cmd |= QSPI_CMD_PORT_LATENCY_MASK; - cmd |= ((op->dummy.nbytes * 8) / op->dummy.buswidth) << - QSPI_CMD_PORT_LATENCY_SHIFT; + cmd |= ((op->dummy.nbytes * 8 - 1) / op->dummy.buswidth) << + QSPI_CMD_PORT_DUMMY_SHIFT; } if (op->data.nbytes) { @@ -690,7 +690,7 @@ static int phytium_qspi_probe(struct platform_device *pdev) qspi = spi_controller_get_devdata(ctrl); qspi->ctrl = ctrl; - reg_name_array = kcalloc(4, sizeof(*reg_name_array), GFP_KERNEL); + reg_name_array = devm_kcalloc(dev, 4, sizeof(*reg_name_array), GFP_KERNEL); if (dev->of_node) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); else if (has_acpi_companion(dev)) { @@ -773,7 +773,7 @@ static int phytium_qspi_probe(struct platform_device *pdev) goto probe_setup_failed; } - if (!qspi->nodirmap) { + if (!qspi->nodirmap && qspi->fnum != 0) { /* * The controller supports direct mapping access only if all * flashes are of same size. diff --git a/drivers/spi/spi-phytium-v2.c b/drivers/spi/spi-phytium-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..dbf834f0e13c4002a2f33de9879a3e7453a10e07 --- /dev/null +++ b/drivers/spi/spi-phytium-v2.c @@ -0,0 +1,570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SPI core controller driver. + * + * Copyright (c) 2023-2024, Phytium Technology Co., Ltd.. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +#define MCP251x_READ 0x03 +#define MCP251x_READ_RXB0 0x90 +#define MCP251x_READ_RXB1 0x94 + +static inline void spi_phyt_enable_chip(struct phytium_spi *fts, u8 enable) +{ + u8 val = enable ? 1 : 2; + + spi_phytium_set_cmd8(fts, PHYTSPI_MSG_CMD_SET_MODULE_EN, val); +} + +static inline void spi_phyt_set_clk(struct phytium_spi *fts, u16 div) +{ + u32 new_div = div; + + spi_phytium_set_cmd32(fts, PHYTSPI_MSG_CMD_SET_BAUDR, new_div); +} + +static inline void spi_phyt_dma_reset(struct phytium_spi *fts, u8 enable) +{ + spi_phytium_set_cmd8(fts, PHYTSPI_MSG_CMD_SET_DMA_RESET, enable); +} + +static inline void spi_phyt_global_cs(struct phytium_spi *fts) +{ + u32 global_cs_en; + u16 cs; + + global_cs_en = GENMASK(fts->num_cs-1, 0) << fts->num_cs; + + cs = (u16)((0x1 << 8) | global_cs_en); + spi_phytium_set_cmd16(fts, PHYTSPI_MSG_CMD_SET_CS, cs); +} + +static inline void spi_phyt_reset_chip(struct phytium_spi *fts) +{ + spi_phyt_dma_reset(fts, 1); + spi_phyt_enable_chip(fts, 0); + if (fts->global_cs) + spi_phyt_global_cs(fts); + spi_phyt_enable_chip(fts, 1); +} + +static inline void spi_phyt_shutdown_chip(struct phytium_spi *fts) +{ + spi_phyt_enable_chip(fts, 0); + spi_phyt_set_clk(fts, 0); +} + +struct phytium_spi_chip { + u8 poll_mode; + u8 type; + void (*cs_control)(u32 command); +}; + +struct chip_data { + u8 cs; + u8 tmode; + u8 type; + + u8 poll_mode; + + u16 clk_div; + u32 speed_hz; + void (*cs_control)(u32 command); +}; + +static void spi_phyt_set_cs(struct spi_device *spi, bool enable) +{ + struct phytium_spi *fts = spi_master_get_devdata(spi->master); + struct chip_data *chip = spi_get_ctldata(spi); + u32 origin; + u16 cs; + + if (fts->tx || fts->rx) + return; + + if (fts->msg->cmd_id == PHYTSPI_MSG_CMD_DATA && + fts->msg->cmd_subid == PHYTSPI_MSG_CMD_DATA_TX) + return; + + if (chip && chip->cs_control) + chip->cs_control(!enable); + + if (!enable) { + cs = BIT(spi->chip_select); + spi_phytium_set_cmd16(fts, PHYTSPI_MSG_CMD_SET_CS, cs); + if (fts->global_cs) { + origin = (GENMASK(fts->num_cs-1, 0) << fts->num_cs) + | (1 << spi->chip_select); + cs = (0x1 << 8) | origin; + spi_phytium_set_cmd16(fts, PHYTSPI_MSG_CMD_SET_CS, cs); + } + } else { + if (fts->global_cs) { + origin = (GENMASK(fts->num_cs-1, 0) << fts->num_cs) + & ~(1 << spi->chip_select); + cs = (0x1 << 8) | origin; + spi_phytium_set_cmd16(fts, PHYTSPI_MSG_CMD_SET_CS, cs); + } + } +} + +static irqreturn_t spi_phyt_irq(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct phytium_spi *fts = spi_master_get_devdata(master); + + complete(&fts->cmd_completion); + writel_relaxed(0, fts->regfile + SPI_REGFILE_RV2AP_INTR_STATE); + writel_relaxed(0x10, fts->regfile + SPI_REGFILE_RV2AP_INT_CLEAN); + + return IRQ_HANDLED; +} + +static int spi_phyt_transfer_one(struct spi_master *master, + struct spi_device *spi, struct spi_transfer *transfer) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + struct chip_data *chip = spi_get_ctldata(spi); + int ret; + + fts->tx = (void *)transfer->tx_buf; + fts->tx_end = fts->tx + transfer->len; + fts->rx = transfer->rx_buf; + fts->rx_end = fts->rx + transfer->len; + fts->len = transfer->len; + + if (chip->cs_control) { + if (fts->rx && fts->tx) + chip->tmode = TMOD_TR; + else if (fts->rx) + chip->tmode = TMOD_RO; + else + chip->tmode = TMOD_TO; + } + + if (fts->tx && fts->len == 1) { + if ((*(u8 *)fts->tx == SPINOR_OP_WREN) && fts->spi_write_flag == 0) { + spi_phytium_write_pre(fts, spi->chip_select, + transfer->bits_per_word, spi->mode, + chip->tmode, 3, fts->spi_write_flag); + fts->spi_write_flag++; + return 0; + } + + if ((*(u8 *)fts->tx == SPINOR_OP_BE_4K) && (fts->spi_write_flag == 1) && + fts->flash_read == 0 && fts->flash_erase != 1) { + fts->spi_write_flag++; + fts->flash_erase = 1; + return 0; + } + + if ((*(u8 *)fts->tx == SPINOR_OP_CHIP_ERASE) && (fts->spi_write_flag == 1) && + fts->flash_read == 0 && fts->flash_erase == 0) { + ret = spi_phytium_flash_erase(fts, spi->chip_select, + transfer->bits_per_word, + spi->mode, chip->tmode, 3, SPINOR_OP_CHIP_ERASE); + fts->spi_write_flag = 0; + fts->flash_erase = 2; + } + + if ((*(u8 *)fts->tx == SPINOR_OP_READ || *(u8 *)fts->tx == SPINOR_OP_READ_FAST || + *(u8 *)fts->tx == SPINOR_OP_READ_4B || + *(u8 *)fts->tx == SPINOR_OP_READ_FAST_4B) && + fts->spi_write_flag == 0 && fts->flash_read == 0 && + fts->flash_erase == 0) { + fts->flash_cmd = *(u8 *)fts->tx; + fts->spi_write_flag++; + fts->flash_read = 1; + return 0; + } + + if ((fts->spi_write_flag == 1) && fts->flash_read == 0 && + fts->flash_write == 0 && ((*(u8 *)fts->tx == SPINOR_OP_PP) || + (*(u8 *)fts->tx == SPINOR_OP_PP_4B))) { + fts->flash_cmd = *(u8 *)fts->tx; + fts->spi_write_flag++; + fts->flash_write = 1; + return 0; + } + + if ((*(u8 *)fts->tx == SPINOR_OP_RDSR) && fts->flash_write == 3) { + fts->read_sr = 1; + fts->flash_write = 0; + return 0; + } + + if ((*(u8 *)fts->tx == SPINOR_OP_RDSR) && fts->flash_erase == 2) { + fts->read_sr = 1; + return 0; + } + } + + if (fts->read_sr) { + *(u8 *)(fts->rx) = 0; + fts->read_sr = 0; + return 0; + } + + if (fts->tx) { + if (fts->flash_erase == 1) { + ret = spi_phytium_flash_erase(fts, spi->chip_select, + transfer->bits_per_word, + spi->mode, chip->tmode, 3, SPINOR_OP_BE_4K); + if (ret) { + dev_err(&master->dev, "flash erase failed\n"); + return ret; + } + fts->spi_write_flag = 0; + fts->flash_erase++; + } else if (fts->flash_read) { + ret = spi_phytium_flash_erase(fts, spi->chip_select, + transfer->bits_per_word, + spi->mode, chip->tmode, 1, fts->flash_cmd); + if (ret) { + dev_err(&master->dev, "transfer read-command failed\n"); + return ret; + } + fts->spi_write_flag = 0; + fts->flash_read = 0; + } else if (fts->flash_write == 1) { + fts->flash_write++; + ret = spi_phytium_flash_write(fts, fts->flash_cmd); + if (ret) { + dev_err(&master->dev, "flash write failed\n"); + return ret; + } + } else if (fts->flash_erase == 2 && (*(u8 *)fts->tx == SPINOR_OP_WRDI)) { + ret = spi_phytium_write(fts, spi->chip_select, transfer->bits_per_word, + spi->mode, chip->tmode, 3, fts->spi_write_flag); + if (ret) { + dev_err(&master->dev, "transfer disable-command failed\n"); + return ret; + } + fts->flash_erase = 0; + } else { + fts->flags = 1; + if (fts->spi_write_flag == 0 && *(u8 *)(fts->tx) != MCP251x_READ + && *(u8 *)(fts->tx) != MCP251x_READ_RXB0 + && *(u8 *)(fts->tx) != MCP251x_READ_RXB1) + fts->flags = 3; + + ret = spi_phytium_write(fts, spi->chip_select, transfer->bits_per_word, + spi->mode, chip->tmode, fts->flags, fts->spi_write_flag); + if (ret) { + dev_err(&master->dev, "write command failed\n"); + return ret; + } + if (fts->flash_write == 2) + fts->flash_write++; + fts->spi_write_flag = 0; + } + } + + if (fts->rx) { + ret = spi_phytium_read(fts, spi->chip_select, transfer->bits_per_word, + spi->mode, chip->tmode, 2); + if (ret) { + dev_err(&master->dev, "read data failed\n"); + return ret; + } + } + + return ret; +} + +static void spi_phyt_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct phytium_spi *fts = spi_master_get_devdata(master); + + spi_phyt_reset_chip(fts); +} + +static int spi_phyt_setup(struct spi_device *spi) +{ + struct phytium_spi_chip *chip_info = NULL; + struct chip_data *chip; + struct spi_master *master = spi->master; + struct phytium_spi *fts = spi_master_get_devdata(master); + u8 data_width, scph, scpol, tmode; + u16 mode; + u16 clk_div; + + spi_phyt_enable_chip(fts, 0); + + clk_div = (fts->max_freq / spi->max_speed_hz + 1) & 0xfffe; + spi_phyt_set_clk(fts, clk_div); + fts->clk_div = clk_div; + + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + chip_info = spi->controller_data; + + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } + + chip->tmode = 0; + + data_width = spi->bits_per_word; + spi_phytium_set_cmd8(fts, PHYTSPI_MSG_CMD_SET_DATA_WIDTH, data_width); + + scph = spi->mode & (0x1); + scpol = spi->mode >> 1; + mode = (scph << 8) | scpol; + spi_phytium_set_cmd16(fts, PHYTSPI_MSG_CMD_SET_MODE, mode); + + tmode = chip->tmode; + spi_phytium_set_cmd8(fts, PHYTSPI_MSG_CMD_SET_TMOD, tmode); + + spi_phyt_enable_chip(fts, 1); + + return 0; +} + +static void spi_phyt_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +void spi_phyt_enable_debug(struct phytium_spi *fts) +{ + u32 reg; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, + reg | SPI_REGFILE_DEBUG_VAL); +} + +void spi_phyt_disable_debug(struct phytium_spi *fts) +{ + u32 reg; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + reg &= ~SPI_REGFILE_DEBUG_VAL; + + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, reg); +} + +void spi_phyt_disable_alive(struct phytium_spi *fts) +{ + u32 reg; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + reg &= ~SPI_REGFILE_ALIVE_VAL; + + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, reg); +} + +void spi_watchdog(struct phytium_spi *fts) +{ + u32 reg; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, + reg | SPI_REGFILE_HEARTBIT_VAL); +} + +static void spi_phyt_timer_handle(struct timer_list *t) +{ + struct phytium_spi *fts = from_timer(fts, t, timer); + + if (fts->alive_enabled && fts->watchdog) { + if (fts->runtimes < 20) + fts->watchdog(fts); + + fts->runtimes++; + } + + mod_timer(&fts->timer, jiffies + msecs_to_jiffies(10)); +} + +void spi_handle_debug_err(struct phytium_spi *fts) +{ + struct device *dev = &fts->master->dev; + u32 reg, len, i; + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + + if (reg & SPI_REGFILE_HAVE_LOG) { + len = strnlen(fts->log, fts->log_size); + dev_info(dev, "log len :%d,addr: 0x%llx,size:%d\n", + len, (u64)fts->log, fts->log_size); + if (len > SPI_LOG_LINE_MAX_LEN) { + for (i = 0; i + SPI_LOG_LINE_MAX_LEN < len; i += SPI_LOG_LINE_MAX_LEN) + dev_info(dev, "(log)%.*s\n", SPI_LOG_LINE_MAX_LEN, &fts->log[i]); + } else { + dev_info(dev, "(log)%.*s\n", SPI_LOG_LINE_MAX_LEN, &fts->log[0]); + } + + for (i = 0; i < fts->log_size; i++) + fts->log[i] = 0; + } + + reg &= ~SPI_REGFILE_HAVE_LOG; + phytium_write_regfile(fts, SPI_REGFILE_DEBUG, reg); +} + +static void spi_phyt_hw_init(struct device *dev, struct phytium_spi *fts) +{ + u32 reg, i; + + spi_phytium_default(fts); + + reg = phytium_read_regfile(fts, SPI_REGFILE_DEBUG); + fts->ddr_paddr = ((reg & SPI_REGFILE_ADDR_MASK) >> 8) << SPI_DDR_ADDR_HIGH; + fts->log_size = ((reg & SPI_REGFILE_SIZE_MASK) >> 4) * SPI_DEBUG_LOG_SIZE; + fts->log = devm_ioremap(dev, fts->ddr_paddr, fts->log_size); + + if (IS_ERR(fts->log)) { + dev_err(dev, "log_addr is err\n"); + return; + } + + for (i = 0; i < fts->log_size; i++) + fts->log[i] = 0; +} + +int spi_phyt_add_host(struct device *dev, struct phytium_spi *fts) +{ + struct spi_master *master; + int ret; + + WARN_ON(fts == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + fts->master = master; + snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); + + init_completion(&fts->cmd_completion); + ret = devm_request_irq(dev, fts->irq, spi_phyt_irq, IRQF_SHARED, fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = fts->bus_num; + master->num_chipselect = fts->num_cs; + master->setup = spi_phyt_setup; + master->cleanup = spi_phyt_cleanup; + master->set_cs = spi_phyt_set_cs; + master->transfer_one = spi_phyt_transfer_one; + master->handle_err = spi_phyt_handle_err; + master->max_speed_hz = fts->max_freq; + master->dev.of_node = dev->of_node; + master->dev.fwnode = dev->fwnode; + master->flags = SPI_CONTROLLER_GPIO_SS; + master->flags |= SPI_CONTROLLER_HALF_DUPLEX; + + spi_master_set_devdata(master, fts); + + spi_phyt_disable_debug(fts); + spi_phyt_disable_alive(fts); + fts->runtimes = 0; + fts->debug_enabled = false; + fts->alive_enabled = false; + + fts->watchdog = spi_watchdog; + fts->handle_debug_err = spi_handle_debug_err; + + fts->timer.expires = jiffies + msecs_to_jiffies(50); + timer_setup(&fts->timer, spi_phyt_timer_handle, 0); + add_timer(&fts->timer); + + spi_phyt_hw_init(dev, fts); + + ret = devm_spi_register_master(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + goto err_exit; + } + + return 0; + +err_exit: + spi_phyt_enable_chip(fts, 0); +err_free_master: + spi_master_put(master); + return ret; +} +EXPORT_SYMBOL_GPL(spi_phyt_add_host); + +void spi_phyt_remove_host(struct phytium_spi *fts) +{ + del_timer(&fts->timer); + spi_phyt_shutdown_chip(fts); +} +EXPORT_SYMBOL_GPL(spi_phyt_remove_host); + +int spi_phyt_suspend_host(struct phytium_spi *fts) +{ + int ret; + + ret = spi_controller_suspend(fts->master); + if (ret) + return ret; + + spi_phyt_shutdown_chip(fts); + return 0; +} +EXPORT_SYMBOL_GPL(spi_phyt_suspend_host); + +int spi_phyt_resume_host(struct phytium_spi *fts) +{ + int ret; + + spi_phyt_hw_init(&fts->master->dev, fts); + + spi_phyt_enable_chip(fts, 0); + spi_phyt_set_clk(fts, fts->clk_div); + spi_phyt_enable_chip(fts, 1); + + ret = spi_controller_resume(fts->master); + if (ret) + dev_err(&fts->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(spi_phyt_resume_host); + +MODULE_AUTHOR("Peng Min "); +MODULE_DESCRIPTION("Driver for Phytium SPI controller core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c index 131baeacdcd14e079584a881de22ab13cbd3a7af..853e604906890a0a225aa87b168ab0df35124b5c 100644 --- a/drivers/spi/spi-phytium.c +++ b/drivers/spi/spi-phytium.c @@ -405,14 +405,6 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) fts->dma_addr = (dma_addr_t)(fts->paddr + DR); snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); - spi_hw_init(dev, fts); - - ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, fts->name, master); - if (ret < 0) { - dev_err(dev, "can not get IRQ\n"); - goto err_free_master; - } - master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); @@ -438,7 +430,17 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) } } + spi_hw_init(dev, fts); + spi_master_set_devdata(master, fts); + + ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, + fts->name, master); + if (ret < 0) { + dev_err(dev, "can not get IRQ\n"); + goto err_free_master; + } + ret = spi_register_controller(master); if (ret) { dev_err(&master->dev, "problem registering spi master\n"); diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h index 781a4fae52dbdb3b58f610df0dab8ed2b6c98a3e..6bb672aed7bf7f17f0cbceead1f31e02672d362e 100644 --- a/drivers/spi/spi-phytium.h +++ b/drivers/spi/spi-phytium.h @@ -55,6 +55,90 @@ #define SPI_DMA_RDMAE (1 << 0) #define SPI_DMA_TDMAE (1 << 1) #define SPI_WAIT_RETRIES 5 + +#define SPI_REGFILE_SIZE (0x48) +#define SPI_REGFILE_AP2RV_INTR_STATE (0x24) +#define SPI_REGFILE_RV2AP_INTR_STATE (0x2c) +#define SPI_REGFILE_RV2AP_INT_CLEAN (0x74) +#define SPI_REGFILE_DEBUG (0x58) + +#define SPI_REGFILE_DEBUG_VAL BIT(0) +#define SPI_REGFILE_ALIVE_VAL BIT(1) +#define SPI_REGFILE_HEARTBIT_VAL BIT(2) +#define SPI_REGFILE_HAVE_LOG BIT(3) +#define SPI_REGFILE_SIZE_MASK GENMASK(7, 4) +#define SPI_REGFILE_ADDR_MASK GENMASK(27, 8) + +#define SPI_DDR_ADDR_HIGH 12 +#define SPI_DEBUG_LOG_SIZE 4096 + +#define SPI_LOG_LINE_MAX_LEN 400 + +#define SPI_MODULE_OPT_CMD 0x20 + +#define SPI_TRANS_DATA_SIZE 1024 +#define FLASH_PAGE_SIZE 256 + +#define SPI_MSG_COMPLETE_OK 1 +#define SPI_MSG_COMPLETE_KO 0 + +#define SPI_RESULT_IGNORE_LVL (0) +#define SPI_RESULT_WRITE_ISR_LVL (1) +#define SPI_RESULT_READ_ISR_LVL (2) + +#define SPI_SHMEM_TX_MSG_MAX_CNT 1 + +#define SPI_MASTER_TIMEOUT 8 + +#define SPI_DEFAULT_CLK 50000000 + +enum phytspi_msg_cmd_id { + PHYTSPI_MSG_CMD_DEFAULT = 0, + PHYTSPI_MSG_CMD_SET, + PHYTSPI_MSG_CMD_GET, + PHYTSPI_MSG_CMD_DATA, + PHYTSPI_MSG_CMD_REPORT, +}; + +enum phytspi_set_subid { + PHYTSPI_MSG_CMD_SET_MODULE_EN = 0, + PHYTSPI_MSG_CMD_SET_DATA_WIDTH, + PHYTSPI_MSG_CMD_SET_MODE, + PHYTSPI_MSG_CMD_SET_TMOD, + PHYTSPI_MSG_CMD_SET_BAUDR, + PHYTSPI_MSG_CMD_SET_INT_TI, + PHYTSPI_MSG_CMD_SET_NDF, + PHYTSPI_MSG_CMD_SET_CS, + PHYTSPI_MSG_CMD_SET_DMA_RESET, +}; + +enum phytspi_data_subid { + PHYTSPI_MSG_CMD_DATA_TX = 0, + PHYTSPI_MSG_CMD_DATA_RX, + PHYTSPI_MSG_CMD_DATA_FLASH_TX, + PHYTSPI_MSG_CMD_FLASH_ERASE, + PHYTSPI_MSG_CMD_DATA_DMA_TX, + PHYTSPI_MSG_CMD_DATA_DMA_RX, + PHYTSPI_MSG_CMD_DATA_FLASH_DMA_TX, +}; + +struct msg { + u8 reserved; + u8 seq; + u8 cmd_id; + u8 cmd_subid; + u16 len; + u8 status1; + u8 status0; + u8 data[56]; +}; + +struct spi_trans_msg_info { + u32 msg_total_num; + u32 shmem_data_addr; + int result; +}; + struct phytium_spi; struct phytium_spi_dma_ops { int (*dma_init)(struct device *dev, struct phytium_spi *fts); @@ -71,6 +155,19 @@ struct phytium_spi { char name[16]; void __iomem *regs; + void __iomem *regfile; + void __iomem *tx_shmem_addr; + void *rx_shmem_addr; + + struct msg *msg; + u32 mem_tx_physic; + u32 mem_rx_physic; + u64 mem_tx; + u64 mem_rx; + + u16 clk_div; + int module; + bool global_cs; bool dma_en; unsigned long paddr; @@ -90,7 +187,31 @@ struct phytium_spi { void *rx_end; u8 n_bytes; int dma_mapped; + struct clk *clk; irqreturn_t (*transfer_handler)(struct phytium_spi *fts); + + int cmd_err; + u32 cur_tx_tail; + struct completion cmd_completion; + + u8 flags; + u8 spi_write_flag; + u8 flash_erase; + u8 flash_read; + u8 flash_write; + u8 read_sr; + u8 flash_cmd; + + bool debug_enabled; + bool alive_enabled; + struct timer_list timer; + u32 runtimes; // for debug + u64 ddr_paddr; + char *log; + u32 log_size; + void (*watchdog)(struct phytium_spi *fts); + void (*handle_debug_err)(struct phytium_spi *fts); + /* DMA info */ u32 current_freq; /* frequency in hz */ struct dma_chan *txchan; @@ -102,12 +223,15 @@ struct phytium_spi { dma_addr_t dma_addr; /* phy address of the Data register */ const struct phytium_spi_dma_ops *dma_ops; struct completion dma_completion; + + bool dma_get_ddrdata; }; static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) { return __raw_readl(fts->regs + offset); } + static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) { return __raw_readw(fts->regs + offset); @@ -188,10 +312,42 @@ static inline void spi_shutdown_chip(struct phytium_spi *fts) spi_set_clk(fts, 0); fts->current_freq = 0; } + +static inline u32 phytium_read_regfile(struct phytium_spi *fts, u32 reg_off) +{ + return readl_relaxed(fts->regfile + reg_off); +} + +static inline void phytium_write_regfile(struct phytium_spi *fts, u32 reg_off, u32 val) +{ + writel_relaxed(val, fts->regfile + reg_off); +} + +extern void spi_phytium_default(struct phytium_spi *fts); +extern void spi_phytium_set_cmd8(struct phytium_spi *fts, u16 sub_cmd, u8 data); +extern void spi_phytium_set_cmd16(struct phytium_spi *fts, u16 sub_cmd, u16 data); +extern void spi_phytium_set_cmd32(struct phytium_spi *fts, u16 sub_cmd, u32 data); +extern void spi_phytium_data_cmd_write(struct phytium_spi *fts, u16 sub_cmd); +extern void spi_phytium_data_cmd_read(struct phytium_spi *fts, u16 sub_cmd); +extern void spi_phytium_write_pre(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, u8 tmode, + u8 flags, u8 spi_write_flag); +extern int spi_phytium_flash_erase(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags, u8 cmd); +extern int spi_phytium_flash_write(struct phytium_spi *fts, u8 cmd); +extern int spi_phytium_write(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags, u8 spi_write_flag); +extern int spi_phytium_read(struct phytium_spi *fts, u8 cs, u8 dfs, u8 mode, + u8 tmode, u8 flags); +extern int spi_phyt_add_host(struct device *dev, struct phytium_spi *fts); +extern void spi_phyt_remove_host(struct phytium_spi *fts); +extern int spi_phyt_suspend_host(struct phytium_spi *fts); +extern int spi_phyt_resume_host(struct phytium_spi *fts); + extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); extern void phytium_spi_remove_host(struct phytium_spi *fts); extern int phytium_spi_suspend_host(struct phytium_spi *fts); extern int phytium_spi_resume_host(struct phytium_spi *fts); extern void phytium_spi_dmaops_set(struct phytium_spi *fts); extern int phytium_spi_check_status(struct phytium_spi *fts, bool raw); + #endif /* PHYTIUM_SPI_HEADER_H */ diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index f9aef39cac2e93dbb488b62f44236c49eecbf338..af78adf909d858ddcef907c07a56a084de1e5805 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -78,4 +78,5 @@ source "drivers/staging/qlge/Kconfig" source "drivers/staging/vme_user/Kconfig" +source "drivers/staging/phytium-npu/Kconfig" endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ffa70dda481d36c526a11275a2b7896957a4edc3..a6f79c989bcfd45081d62d6760d466c615db9abc 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_PI433) += pi433/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_QLGE) += qlge/ +obj-$(CONFIG_PHYTIUM_NPU) += phytium-npu/ diff --git a/drivers/staging/phytium-npu/Kconfig b/drivers/staging/phytium-npu/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..6f15304154139bbea2a9cf5415b5c33e67c26113 --- /dev/null +++ b/drivers/staging/phytium-npu/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig PHYTIUM_NPU + tristate "Phytium neural network processing unit" + depends on ARCH_PHYTIUM + help + Say Y here if your want support for Phytium NPU controller framework. + This is common support for devices that embed the Phytium NPU IP. + + To compile this driver as a module, choose M here: the module will be + called phytium_npu. + +if PHYTIUM_NPU +choice + prompt "Select the Phytium NPU Hardware" + default NPU_PLATFORM + help + You need to select a suitable NPU controller in the current board. + This option allows you to decide which Phytium NPU will be built + in system. + If in doubt, select 'N' + config NPU_PLATFORM + tristate "driver runs on platform" + help + Say Y here if you want to support for IO Mapped Phytium NPU controller. + This support is for devices that have the Phytium NPU controller IP. + To compile this driver as a module, choose M here: the module will be + called phytium_npu_platform. + config NPU_PCI + tristate "driver runs on PCI hardware" + help + Say Y here if you want to support for Phytium NPU controller connected + to the PCI bus. This support is for devices that have the Phytium NPU + controller IP embedded into a PCI device. + To compile this driver as a module, choose M here: the module will be + called phytium_npu_pci. +endchoice +endif diff --git a/drivers/staging/phytium-npu/Makefile b/drivers/staging/phytium-npu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ecbcb639754a5e17653232ee87224f06050a98d6 --- /dev/null +++ b/drivers/staging/phytium-npu/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y += -I$(src)/include/ +ccflags-y += -I$(srctree)/drivers/staging/phytium-npu/include/ + +ifdef CONFIG_NPU_PLATFORM + ifeq ($(CONFIG_NPU_PLATFORM),m) + EXTRA_CFLAGS += -DPHYTIUM_NPU_PLATFORM + endif + + ifeq ($(CONFIG_NPU_PLATFORM),y) + EXTRA_CFLAGS += -DPHYTIUM_NPU_PLATFORM + endif +endif + +obj-$(CONFIG_PHYTIUM_NPU) += phytium_npu.o +phytium_npu-y := phytium_npu_common.o phytium_npu_dev.o \ +phytium_npu_dev_mmu.o phytium_npu_mm_mmu.o phytium_npu_workqueue.o \ +phytium_npu_session.o phytium_npu_uapi.o phytium_npu_debug.o + +obj-$(CONFIG_NPU_PLATFORM) += phytium_npu_platform.o +obj-$(CONFIG_NPU_PCI) += phytium_npu_pci.o diff --git a/drivers/staging/phytium-npu/Makefile.testing b/drivers/staging/phytium-npu/Makefile.testing new file mode 100644 index 0000000000000000000000000000000000000000..da579693a6becfec5304e5e4f888ec3ce0ac0be4 --- /dev/null +++ b/drivers/staging/phytium-npu/Makefile.testing @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +export CONFIG_PHYTIUM_NPU := y +export CONFIG_NPU_PLATFORM := y +export CONFIG_PHYTIUM_NPU_DMA_HEAP := y +include Makefile +ARCH := arm64 +CROSS_COMPILE := aarch64-linux-gnu- + +#------------------------------------ +# for internal use +#------------------------------------ + +ifeq ($(KERNELRELEASE),) + +#KDIR ?= /lib/modules/$(shell uname -r)/build +KSRC := /home/chengquan/code/phytium-linux-kernel-master/ +PWD := $(shell pwd) + +# kernel warning levels: as high as possible: +# W=12 and W=123 seem to warn about linux kernel headers, so use W=1. +KWARN := W=1 + +ifneq (,$(shell which sparse)) +# C=1: use 'sparse' for extra warnings, if it is avail. +SPARSE := C=1 +endif + +all: modules +modules: + $(MAKE) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE)-C $(KSRC) M=$(shell pwd) modules + +clean: + $(MAKE) -C $(KDIR) M=$(PWD) clean +check: + cd $(KDIR); scripts/checkpatch.pl -f `find $(PWD)/include -name "*\.[ch]"` + +endif # KERNELRELEASE diff --git a/drivers/staging/phytium-npu/include/phytium_npu.h b/drivers/staging/phytium-npu/include/phytium_npu.h new file mode 100644 index 0000000000000000000000000000000000000000..434be712094ad54be1c3ec761f971f85dbf4d8aa --- /dev/null +++ b/drivers/staging/phytium-npu/include/phytium_npu.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#ifndef __PHYTIUM_NPU_H__ +#define __PHYTIUM_NPU_H__ +#include +#include +#include +#include +#include "phytium_npu_mmu.h" + +#define NPU_FTN300_PCI_DRV_NAME "NPU_FTN300_pci" +#define NPU_FTN300_PLAT_DRV_NAME "NPU_FTN300_platform" +#define NPU_FTN300_DRIVER_VERSION "1.0.1" + +#define NPU_SUPPORT_SESSION_NUM 128 +#ifndef TRUE +#define TRUE 1 +#endif +#ifndef FALSE +#define FALSE 0 +#endif + +#define NPU_STREAM_NONE 0x0 +#define NPU_STREAM_IN_HW 0x1 +#define NPU_STREAM_SCHEDULED 0x09 +#define NPU_STREAM_BUFF_IN_HW 0x10 +#define NPU_STREAM_BUFF_NO_READY 0x11 +#define NPU_STREAM_BUFF_BUSY 0x12 + +#define NPU_STREAM_INFER_NONE 0x0 +#define NPU_STREAM_INFER_WAIT 0x2 +#define NPU_STREAM_INFER_WORK 0x3 +#define NPU_STREAM_INFER_DONE 0x4 + +#define NPU_MMU_CONTEXT_NUM 2 +#define NPU_MMU_CONTEXT_MODULE_ID 0 +#define NPU_MMU_CONTEXT_IO_ID 1 + +#define NEED_NOTHING 0x0 +#define NEED_SCHEDULE 0x1 +#define NEED_QUEUE_STREAM 0x2 + +#define NPU_AUTO_SUSPEND_TIMEOUT 5000 +#define NPU_ACPI_POWER_TYPE 0x1 +#define GET_TIME_NS(ts) ktime_get_real_ts64(ts) +/* state of the NPU device: power up or down */ +enum npu_power_state { + NPU_STATE_OFF, + NPU_STATE_ON +}; + +struct phytium_npu_stream; + +struct phytium_npu_debugfs { + u32 debug_mode; /* register|mem file */ + /* perf|band|crc|PB_stream|PB_layer|PB_pass|crc_stream|crc_layer|crc|pass */ + u32 debug_type; + u32 debug_fd; /* debug perf or band memory fd */ + u32 debug_crc_fd; /* debug crc memory fd */ + u32 debug_size; + u64 debug_addr; + u64 debug_crc_addr; + struct iosys_map debug_map; + struct iosys_map debug_crc_map; + struct dentry *sess_dbgfs_dir; +}; + +struct phytium_npu_session { + struct phytium_npu_dev *npu_dev; + struct list_head sess_list_entry; + struct list_head sched_list_entry; + struct list_head stream_list; + struct list_head response_list; + struct list_head map_mmu_list; + struct phytium_npu_mmu_context mmu_ctx[NPU_MMU_CONTEXT_NUM]; + struct npu_mmu_ctx *share_mctx; //mctx is belong to sesson + struct phytium_npu_debugfs dbgfs; + wait_queue_head_t response_wq; + int id; +}; + +struct phytium_npu_dev { + struct platform_device *pdev; + struct device *dev; + struct miscdevice miscdev; + struct list_head sessions_list; + struct list_head sched_sess_list; + struct dentry *dbgfs_root_dir; + struct phytium_npu_stream *activated_stream; /* stream had work on hardware */ + struct phytium_npu_stream *queued_stream; /* stream wait for activate */ + struct workqueue_struct *stream_wq; + struct work_struct stream_work; + struct delayed_work rt_delay_work; + struct npu_mmu_config_global nmmu_config; + struct mutex mutex_lock; /* protect stream */ + spinlock_t spin_irq_lock; /* protect interrupt */ + int used_mmu_context_id[4]; + u32 irq_status; + int sessions_count; + int is_cache_stream_on; + int is_use_repeat; + int is_platform_dev; + int irq; + int power_status; + int load_status; + int voltage_val; + int clock_freq; + void __iomem *reg_base; + void __iomem *power_reg_base; + struct timespec64 ts; + struct timespec64 te; + struct timespec64 tspan; +}; + +struct npu_user_stream_rsp { + struct phytium_npu_session *session; + struct list_head stream_rsp_list_entry; + int rsp_size; + struct npu_user_rsp ursp; +}; + +struct phytium_npu_stream { + struct phytium_npu_session *session; + struct list_head stream_list_entry; + struct npu_user_stream_rsp *rsp; + int stream_status; /* NONE|IN HARDWARE */ + int infer_status; /* NONE|QUEUED|WORK|DONE */ + int user_repeat_count; /* REPEAT COUNT */ + int all_buf_ready; /* NONE|READY */ + int is_rollback; /* NONE|ROLLBACK */ + struct npu_user_submit_stream nustream; +}; + +//extern shared function +int phytium_npu_session_init(struct phytium_npu_dev *npu, struct phytium_npu_session *session); +struct phytium_npu_session *phytium_npu_session_create(struct device *dev); +int phytium_npu_register_dev(struct device *dev, void *plat_data, void __iomem *reg_base); +int phytium_npu_register_misc(struct phytium_npu_dev *npudev); +int phytium_npu_unregister_misc(struct phytium_npu_dev *npudev); +int phytium_npu_session_release(struct phytium_npu_dev *npu, struct phytium_npu_session *session); +struct npu_mctx_map *phytium_npu_find_mmu_ctx_map(struct phytium_npu_session *sess, int dma_buf_fd); +struct dma_buf *phytium_npu_check_and_get_dma_buf(struct phytium_npu_session *sess, int dma_buf_fd); +struct phytium_npu_dev *phytium_npu_get_npudev(void); +int phytium_npu_mmu_dev_init(struct phytium_npu_dev *npudev, int page_size); +int phytium_npu_create_new_mmu_context(struct phytium_npu_dev *npu, + struct phytium_npu_session *session); +int phytium_npu_mmu_map_init(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_memory_map *usr_mmap); +int phytium_npu_import_dmabuf(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_memory_map *usr_mmap); +int phytium_npu_release_mmu_context(struct phytium_npu_dev *npu, + struct phytium_npu_session *session); +int phytium_npu_mmu_unmap(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_memory_unmap *usr_unmmap); +void phytium_npu_mmu_output_status(struct phytium_npu_dev *npudev); +int phytium_npu_mmu_config_dev_mmu(struct phytium_npu_session *sess); +int phytium_npu_mmu_dev_flush_tlb(struct phytium_npu_dev *npudev, int ctxid); +int phytium_npu_write_stream(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + const char *stream_buf, size_t size); +int phytium_npu_delete_stream(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_delete_stream *stream); +int phytium_npu_handle_irq(struct device *dev); +u32 phytium_npu_get_irq_status(struct phytium_npu_dev *npudev); +u32 phytium_npu_get_irq_enable_status(struct phytium_npu_dev *npudev); +void phytium_npu_clear_msi_irq(struct phytium_npu_dev *npudev); +void phytium_npu_clear_irq_status(struct phytium_npu_dev *npudev, u32 event); +void phytium_npu_config_preload(struct phytium_npu_dev *npudev); +void phytium_npu_config_start_inference(struct phytium_npu_dev *npudev, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream); +int phytium_npu_handle_thread_irq(struct device *dev); +int phytium_npu_hw_reset_self(struct phytium_npu_dev *npudev); +void phytium_npu_config_hl_wdt(struct phytium_npu_dev *npudev); +void phytium_npu_config_event(struct phytium_npu_dev *npudev, u32 events, int is_enable); +u32 phytium_npu_get_axi_err_status(struct phytium_npu_dev *npudev); +void phytium_npu_output_mem_wdt_err(struct phytium_npu_dev *npudev); +void do_work(struct work_struct *work); +void phytium_npu_schedule_stream_queues(struct phytium_npu_dev *npu, bool asynchronous); +int phytium_npu_check_stream_buf_is_ready(struct phytium_npu_stream *nstream); +void phytium_npu_set_stream_buf_status_with_fd(struct phytium_npu_session *sess, + int fd, int status); +void phytium_npu_repeat_stream(struct phytium_npu_session *sess, int stream_id, int is_repeat); + +void phytium_npu_change_schedule_session(struct phytium_npu_session *sess, + struct phytium_npu_dev *npu, + int is_change); +int phytium_npu_config_dma_address(struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream); +void phytium_npu_config_clock(struct phytium_npu_dev *sess, int is_enable); +int phytium_npu_submit_stream(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream); +int phytium_npu_prepare_hw_4_queued_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream); +int phytium_npu_try_excute_queued_stream(struct phytium_npu_dev *npu); +int phytium_npu_rollback_stream(struct phytium_npu_dev *npu); +int phytium_npu_update_activated_stream_4_err_mmu(struct phytium_npu_dev *npu); +int phytium_npu_session_release_all(struct phytium_npu_dev *npudev); +int phytium_npu_unregister_dev(struct device *dev); +int phytiun_npu_check_debug_fs_cfg(struct phytium_npu_session *sess); +void phytium_npu_debug_set_hw_register(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess); +void phytium_npu_debugfs_init(struct phytium_npu_dev *npu); +void phytium_npu_debugfs_session_init(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess); +void phytium_npu_debug_show_register(struct phytium_npu_dev *npu, struct phytium_npu_session *sess); +void phytium_npu_debugfs_session_remove(struct phytium_npu_session *sess); +void phytium_npu_debugfs_remove(struct phytium_npu_dev *npu); +void phytium_npu_try_resume_work(struct phytium_npu_dev *npudev); +int phytium_npu_schedule_suspend(struct phytium_npu_dev *npudev, u32 delay_ms); +int phytium_npu_common_resume(struct device *dev); +int phytium_npu_common_suspend(struct device *dev); +void phytium_npu_get_time_span(struct timespec64 *start, struct timespec64 *end, + struct timespec64 *span); +#endif diff --git a/drivers/staging/phytium-npu/include/phytium_npu_leopard_reg.h b/drivers/staging/phytium-npu/include/phytium_npu_leopard_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..fd3f04158004a26fb6275b1cda4bd37b3f87f0e0 --- /dev/null +++ b/drivers/staging/phytium-npu/include/phytium_npu_leopard_reg.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#ifndef __PHYTIUM_NPU_LEOPARD_NEW_REG_H__ +#define __PHYTIUM_NPU_LEOPARD_NEW_REG_H__ + +#define NPU_SYS_CLK_STATUS (0x0888U) +#define NPU_SYS_MWDT (0x08C8U) +#define NPU_SYS_HWDT (0x08D0U) +#define NPU_SYS_RTMC (0x08D8U) +#define NPU_SYS_RTMD (0x08E0U) +#define NPU_SYS_CLK_STATUS0 (0x0808U) +/* + * Register NPU_PERF + */ +#define NPU_PERF_READ (0x0A80U) +#define NPU_PERF_WRITE (0x0A88U) +#define NPU_PERF_WRITE_DS (0x0A90U) +#define NPU_PERF_READS (0x0A98U) +#define NPU_PERF_WRITES (0x0AA0U) +#define NPU_PERF_READ_IS (0x0AA8U) +#define NPU_PERFWRITE_IS (0x0AB0U) +#define NPU_PERF_RD_BS1 (0x0AB8U) +#define NPU_PERF_WR_BS1 (0x0AC0U) +#define NPU_PERF_RD_BS2 (0x0AC8U) +#define NPU_PERF_WR_BS2 (0x0AD0U) +#define NPU_PERF_RD_BS3 (0x0AD8U) +#define NPU_PERF_WR_BS3 (0x0AE0U) +#define NPU_PERF_RD_BS4 (0x0AE8U) +#define NPU_PERF_WR_BS4 (0x0AF0U) +#define NPU_PERF_RESET (0x0AF8U) +#define NPU_PERF_ENABLE (0x0A00U) +#define NPU_SYS_MMU_STATUS (0x0A08U) + +#define NPU_MDBG_S1 (0x0A10U) +#define NPU_MDBG_S2 (0x0A18U) +#define NPU_MDBG_IDLE (0x0A20U) +#define NPU_MDBG_STATUS3 (0x0A28U) +#define NPU_MDBG_FAULT_STOP_STATUS (0x0A30U) +#define NPU_MDBG_STATUS_DEBUG (0x0A38U) +#define NPU_OUTSTANDING_READ (0x0A40U) +#define NPU_PAGE_FAULT_STALL (0x0A48U) + +//SLC REG +#define NPU_CACHE_RESET (0x0A50U) +#define NPU_CACHE_REQ_CNT_EN (0x0A58U) +#define NPU_CACHE_CMDREQ_RD (0x0A60U) +#define NPU_CACHE_CMDBCK_REQ_WR (0x0A68U) +#define NPU_CACHE_CMDCRC_REQ_WR (0x0A70U) +#define NPU_CACHE_CMDDBG_REQ_WR (0x0A78U) +#define NPU_CACHE_CMDREQ_FENCE (0x0B80U) +#define NPU_CACHE_IREQ0_RD (0x0B88U) +#define NPU_CACHE_MM_REQ_RD (0x0B90U) +#define NPU_CACHE_MM_REQ_WR (0x0B98U) +#define NPU_CACHE_CREQ_RD (0x0BA8U) +#define NPU_CACHE_AREQ_RD (0x0BB0U) +#define NPU_CACHE_OPK_REQ_WR (0x0BB8U) +#define NPU_CACHE_CMDREQ_RD_WORD (0x0BC0U) +#define NPU_CACHE_CMDBCK_REQ_WR_WORD (0x0BC8U) +#define NPU_CACHE_CMDCRC_REQ_WR_WORD (0x0BD0U) +#define NPU_CACHE_CMDDBG_REQ_WR_WORD (0x0BD8U) +#define NPU_CACHE_CREQ_FENCE_WORD (0x0BE0U) +#define NPU_CACHE_IREQ0_RD_WORD (0x0BE8U) +#define NPU_CACHE_MM_REQ_RD_WORD (0x0BF0U) +#define NPU_CACHE_MM_REQ_WR_WORD (0x0BF8U) +#define NPU_CACHE_CREQ_RD_WORD (0x0B08U) +#define NPU_CACHE_AREQ_RD_WORD (0x0B10U) +#define NPU_CACHE_OPK_REQ_WR_WORD (0x0B18U) +#define NPU_CACHE_MMU_REQ_RD (0x0B20U) +#define NPU_CACHE_MMU_REQ_RD_WORD (0x0B28U) +#define NPU_CACHE_EWO_REQ_RD (0x0B30U) +#define NPU_CACHE_EWO_REQ_RD_WORD (0x0B38U) +#define NPU_FIFO_WORD_COUNT (0x0B40U) + +#define NPU_SYS_CLK_CTRL (0x2880U) +#define NPU_SYS_BUS_RESET_CTRL (0x2888U) +#define NPU_SYS_RESET_CTRL (0x2890U) +#define NPU_SYS_CMDMH_CONTROL (0x2898U) +#define NPU_SYS_IMH_CONTROL (0x28A0U) +#define NPU_SYS_CMH_CONTROL (0x28A8U) +#define NPU_SYS_AMH_CONTROL (0x28B0U) +#define NPU_SYS_OMH_CONTROL (0x28B8U) +#define NPU_SYS_ELEMENTOPS_MH_CONTROL (0x28C0U) +#define NPU_SYS_MM_MH_CONTROL (0x28C8U) + +#define NPU_SYS_H0_MEM_SIZE (0x28D8U) +#define NPU_SYS_H0_OCM_ADDR (0x2810U) +#define NPU_SYS_H0_OCM_SIZE (0x2818U) + +#define NPU_SYS_MEM_WDT_CM (0x2998U) +#define NPU_SYS_MEM_WDT_CTRL (0x29A0U) +#define NPU_SYS_HWDT_CTRL (0x29A8U) +#define NPU_SYS_HWDT_CM (0x29B0U) +#define NPU_SYS_IDLE_HYSTERESIS_COUNT (0x29C0U) +#define NPU_SYS_SOCIF_WAKEUP_ENABLE (0x29C8U) + +#define NPU_SYS_RESET_CLK_CTRL (0x29D0U) +#define NPU_SYS_CLK_CTRL0 (0x29D8U) +#define NPU_SYS_AXI_EXACCESS (0x29E8U) +#define NPU_SYS_REGBANK_REQUEST_INVALID (0x29F0U) +#define NPU_SYS_CMD_PRIORITY_LIMITS_LEGACY (0x2900U) +#define NPU_SYS_CMD_SECURITY_CONTROL (0x2908U) +#define NPU_SYS_ARB_STALL_RATIO (0x2A80U) + +#define NPU_SYS_DATAPATH_STALL_RFE (0x2A88U) +#define NPU_SYS_DATAPATH_STALL_RBE (0x2A90U) +#define NPU_SYS_ARB_MP (0x2A98U) + +#define NPU_SYS_MMU_PSIZE_RONE (0xEBD0U) +#define NPU_SYS_MMU_PSIZE_RTWO (0xEBD8U) +#define NPU_SYS_MMU_PSIZE_RTHREE (0xEBE0U) +#define NPU_SYS_MMU_PSIZE_RFOUR (0xEBE8U) + +#define NPU_SYS_MEM_CTRL (0xEA80U) +#define NPU_SYS_MEM_FAULT_STOP_CTRL (0xEAC8U) +#define NPU_SYS_ACE_QOS_CTRL (0xEB90U) +#define NPU_SYS_ACE_PRI_MAP_CTRL (0xEB98U) +#define NPU_SYS_ACE_CTRL (0xEBA0U) +#define NPU_SYS_ACE_STATUS (0xEBB0U) + +#define NPU_SYS_L1_GLB_CTRL (0xEC80U) +#define NPU_SYS_CONTEXT_MAPPING2 (0xF808U) +#define NPU_SYS_CONTEXT_MAPPING3 (0xF810U) +#define NPU_SYS_MEM_FIX (0xF858U) +#define NPU_SYS_PWR_MAN_HYSTERESIS (0xF980U) + +#define NPU_CH0_CONTROL (0x10880U) +#define NPU_CH0_STATUS (0x10888U) + +#define NPU_CH0_CMD_BASE_ADDRESS (0x108A0U) +#define NPU_CH0_ADDR_USED (0x108B8U) +#define NPU_CH0_ADDR0 (0x108C0U) +#define NPU_CH0_ADDR1 (0x108C8U) +#define NPU_CH0_ADDR2 (0x108D0U) +#define NPU_CH0_ADDR3 (0x108D8U) +#define NPU_CH0_ADDR4 (0x108E0U) +#define NPU_CH0_ADDR5 (0x108E8U) +#define NPU_CH0_ADDR6 (0x108F0U) +#define NPU_CH0_ADDR7 (0x108F8U) +#define NPU_CH0_WRITEBACK_CONTROL (0x10800U) +#define NPU_CH0_VHA_EVENT_ENABLE (0x10808U) +#define NPU_CH0_VHA_EVENT_STATUS (0x10810U) +#define NPU_CH0_VHA_EVENT_CLEAR (0x10818U) +#define NPU_CH0_CRC_CONTROL (0x10980U) +#define NPU_CH0_CRC_ADDRESS (0x10988U) +#define NPU_CH0_DEBUG_ADDRESS (0x10990U) +#define NPU_CH0_DEBUG_SIZE (0x10998U) +#define NPU_CH0_DEBUG_CONTROL (0x109A0U) +#define NPU_CH0_DEBUG_STATUS (0x109A8U) +#define NPU_CH0_PRELOAD_CONTROL (0x109B0U) +#define NPU_CH0_ADDR8 (0x109C0U) +#define NPU_CH0_ADDR9 (0x109C8U) +#define NPU_CH0_ADDR10 (0x109D0U) +#define NPU_CH0_ADDR11 (0x109D8U) +#define NPU_CH0_ADDR12 (0x109E0U) +#define NPU_CH0_ADDR13 (0x109E8U) +#define NPU_CH0_ADDR14 (0x109F0U) +#define NPU_CH0_ADDR15 (0x109F8U) + +#define NPU_CH0_PERFORMANCE (0x10920U) +#define NPU_CH0_MMU_CTRL_INVAL (0x1E880U) +#define NPU_CH0_MMU_MAPPING_CONTEXT (0x1E888U) +#define NPU_CH0_MMU_MAPPING_ADDR (0x1E890U) +#define NPU_CH0_MMU_ERR_S1 (0x1E898U) +#define NPU_CH0_MMU_ERR_S2 (0x1E8A0U) +#define NPU_CH0_MMU_CTRL_BS (0x1E8C0U) + +#define MMU_CTRL_BYPASS_EN (0X1U) +#define MMU_CTRL_BYPASS_DISABLE (0X0U) +#define MMU_CTRL_ALL_CTX_BIT (11U) +#define MMU_CTRL_ALL_CTX_EN (0X00800U) +#define MMU_CTRL_CTX_SHIFT (3) /* 3-10bit */ +#define MMU_CTRL_INVAL_PC (0X0004U) +#define MMU_CTRL_INVAL_PD (0X0002U) +#define MMU_CTRL_INVAL_PT (0X0001U) + +#define MMU_FS1_LEVEL_SHIFT (62U) +#define MMU_FS1_LEVEL_MSK (0X3U) +#define MMU_FS1_REQ_ID_SHIFT (56U) +#define MMU_FS1_REQ_ID_MSK (0X7FU) +#define MMU_FS1_CONTEXT_SHIFT (48U) +#define MMU_FS1_CONTEXT_MSK (0XFFU) +#define MMU_FS1_ADDRESS_SHIFT (4U) +#define MMU_FS1_ADDRESS_MSK (0XFFFFFFFFFFFU) +#define MMU_FS1_RNW_SHIFT (3U) +#define MMU_FS1_RNW_MSK (0X1) +#define MMU_FS1_TYPE_SHIFT (1U) +#define MMU_FS1_TYPE_MSK (0X3U) +#define MMU_FS1_FAULT_SHIFT (0U) +#define MMU_FS1_FAULT_MSK (0X1) + +#define MMU_FS2_WRITEBACK_SHIFT (29U) +#define MMU_FS2_WRITEBACK_MSK (0X1U) +#define MMU_FS2_CLEANUNIQUE_SHIFT (28U) +#define MMU_FS2_CLEANUNIQUE_MSK (0X1U) +#define MMU_FS2_BANK_SHIFT (24U) +#define MMU_FS2_BANK_MSK (0XFU) +#define MMU_FS2_TLB_ENTRY_SHIFT (16U) +#define MMU_FS2_TLB_ENTRY_MSK (0XFFU) +#define MMU_FS2_FBM_FAULT_SHIFT (10U) +#define MMU_FS2_FBM_FAULT_MSK (0X1) +#define MMU_FS2_BIF_ID_SHIFT (0U) +#define MMU_FS2_BIF_ID_MSK (0X3FFU) + +#define MMU_PS_CONFIG_SHIFT (38U) +#define MMU_PS_END_ADDR_SHIFT (19U) +#define MMU_PS_BASE_ADDR_SHIFT (0U) +#define MMU_PS_ADDR_ALIGNSHIFT (21U) +#define MMU_PS_END_ADDR_DELT_SHIFT (21 - 19) + +#define SYS_CLOCK_AUTO 0x20 +#define MAIN_CLOCK_AUTO 0xA2AA2AAA2A000220 + +#define NPU_SOFT_RESET_CONFIG 0xC0000107 +#define NPU_HW_BE_READY 0x200000 +/* 100ms @800MHz */ +#define NPU_HL_WDT_CYCLES 0xfffffff +/* 1ms @800MHz MEM*/ +#define NPU_MEM_WDT_CYCLES 0xffffffff + +#define NPU_HW_START_EN 0x1 + +#define NPU_INFERENCE_COMPLETE_EVENT 0x1 +#define NPU_INFERENCT_ERR_EVENT 0x2 +#define NPU_MEM_WDT_EVENT 0x8 +#define NPU_WDT_EVENT 0x80008 +#define NPU_AXI_EVENT 0x40000 +#define NPU_MMU_EVENT 0x10000 +#define NPU_ERR_EVENT 0x100000 +#define NPU_ALL_EVENT (NPU_INFERENCE_COMPLETE_EVENT | NPU_INFERENCT_ERR_EVENT | \ + NPU_AXI_EVENT | NPU_MMU_EVENT | \ + NPU_WDT_EVENT | NPU_ERR_EVENT) + +#define NPU_PRE_CBUF 0x10 +#define NPU_PRE_RD_256 0x01000000 +#define NPU_PRE_WR_256 0x00200000 +#define NPU_PRELOAD_CFG (NPU_PRE_CBUF | NPU_PRE_RD_256 | NPU_PRE_WR_256) + +#endif /* __PHYTIUM_NPU_LEOPARD_REG_H__ */ diff --git a/drivers/staging/phytium-npu/include/phytium_npu_mmu.h b/drivers/staging/phytium-npu/include/phytium_npu_mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..be14efb63d51343ed6892f3d677144f8cfb20ba7 --- /dev/null +++ b/drivers/staging/phytium-npu/include/phytium_npu_mmu.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#ifndef __PHYTIUM_NPU_MMU_H__ +#define __PHYTIUM_NPU_MMU_H__ + +#include +#include +#include "linux/dma-fence.h" +#include "phytium_npu_uapi.h" + +#define PHYT_MMU_PC_MASK 0xFFC0000000 +#define PHYT_MMU_PC_SHIFT 30 +#define PHYT_MMU_PD_MASK 0x3FE00000 +#define PHYT_MMU_PD_SHIFT 21 +#define PHYT_MMU_PT_MASK 0x1FF000 +#define PHYT_MMU_PT_SHIFT 12 +#define PC_SHIFT 8 +#define PHYT_MMU_ENTRY_VALID 0x1 +#define PHYT_MMU_ENTRY_INVALID 0x0 +#define PHYT_MMU_PC_ENTRY_MASK 0xFFFFFFF0 + +#define PHYT_MMU_PTE_AXCACHE_MASK 0x3C00000000000000UL +#define PHYT_MMU_PTE_AXCACHE_SHIFT 58 +#define PHYT_MMU_PTE_PARITY_SHIFT 62 + +#define PHYT_MMU_AXICACHE_ATTR 0x1C00000000000000 +/* Page catalogue address shift */ +#define PHYT_MMU_PC_ADDR_SHIFT 12 +#define PHYT_MMU_DDR_VIRT_BASE 0x40000000 +#define PHYT_MMU_ALLOC_PAGE_SIZE 0x1000 + +#define PHYT_MMU_PD_PT_RECORD 512 +#define MAX_INFERENCE_CTX 4 +#define NUM_FENCES_PER_BUF 10 +#define MAX_NUM_DEVS 8 + +/* MMUv3 PTE entry flags */ +enum mmu_map_flag { + PHYT_MMU_PTE_FLAG_NONE = 0x0, + PHYT_MMU_PTE_FLAG_VALID = 0x1, + PHYT_MMU_PTE_FLAG_READ_ONLY = 0x2, + PHYT_MMU_PTE_FLAG_CACHE_COHERENCY = 0x4, +}; + +struct npu_mmu_config_global { + u8 mode_bypass; + u8 width; + u8 is_use_mmu_pte; + u8 default_ctx_id; + u32 page_size; +}; + +struct npu_mmu_config { + u32 addr_width; /* physical */ + bool bypass_hw; /* MMU bypass mode */ + bool use_pte_parity; /* enables parity calculation for PTEs */ + enum mem_attr new_attr; + int page_size; +}; + +struct mmu_page_desc { + u64 phys; + u64 cpu_addr; + u64 npu_addr_base; +}; + +struct pd_ext_info { + u32 valid_pd; + struct mmu_page_desc *pd_desc[512]; +}; + +struct npu_mmu_pt { + struct npu_mmu_pd *mpd; + struct mmu_page_desc desc; + u32 valid; +}; + +struct npu_mmu_pd { + struct npu_mmu_catalogue *mpc; + struct mmu_page_desc desc; + struct npu_mmu_pt *pt; + u64 pt_page_addr[512]; + u32 ref_count; +}; + +struct npu_mmu_catalogue { + struct npu_mmu_ctx *mctx; + struct npu_mmu_pd **pd; + struct mmu_page_desc desc; + struct pd_ext_info pd_ext; + u32 ref_count; +}; + +struct npu_mctx_map { + struct npu_mmu_ctx *mctx; + struct list_head mctx_map_entry; //add to npu_mmu_ctx maplist + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + int dma_buf_fd; + u32 map_type; + u64 virt_addr; //map mmu virtual address from user +}; + +struct npu_mmu_ctx { + struct phytium_npu_mmu_context *pnmctx; + struct npu_mmu_catalogue *mmu_pc; + struct list_head maplist; +}; + +struct phytium_npu_mmu_context { + struct device *dev; + u32 npu_id; + u32 map_type; + u32 context_id; + u32 pc_base_phys_addr; + u64 virt_base; + u64 curr_virt_base; + struct npu_mmu_ctx *mctx; +}; + +static inline u64 phytium_npu_read64(void *addr) +{ + return (u64)readl_relaxed((void __iomem *)addr) | + ((u64)readl_relaxed((void __iomem *)addr + 4) << 32); +} + +#define REGWRITE64(N, K, V) \ + writeq_relaxed(V, (void __iomem *)((N)->reg_base + (K))) +#define REGWRITE32(N, K, V) \ + writel_relaxed(V, (void __iomem *)((N)->reg_base + (K))) + +#define REGREAD32(N, K) \ + readl_relaxed((void __iomem *)((N)->reg_base + (K))) + +#define REGREAD64(N, K) \ + readq_relaxed((void __iomem *)((N)->reg_base + (K))) + +/* Virtual memory space for dma buf in the kernel - OCM & device debug buffers */ +#define NPU_VA_BASE_ADDR 0x2000000ULL +#define NPU_VA_SIZE 0xc0000000ULL //SIZE 2G, 2M-3G + +#endif diff --git a/drivers/staging/phytium-npu/include/phytium_npu_reg.h b/drivers/staging/phytium-npu/include/phytium_npu_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..3663c68ae5fa0e7b3114c4de728ec392a06ca79a --- /dev/null +++ b/drivers/staging/phytium-npu/include/phytium_npu_reg.h @@ -0,0 +1,241 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#ifndef __PHYTIUM_NPU_X100_REG_H__ +#define __PHYTIUM_NPU_X100_REG_H__ + +#define NPU_SYS_CLK_STATUS (0x0008U) +#define NPU_SYS_MWDT (0x0048U) +#define NPU_SYS_HWDT (0x0050U) +#define NPU_SYS_RTMC (0x0058U) +#define NPU_SYS_RTMD (0x0060U) +#define NPU_SYS_CLK_STATUS0 (0x0088U) +/* + * Register NPU_PERF + */ +#define NPU_PERF_READ (0x0200U) +#define NPU_PERF_WRITE (0x0208U) +#define NPU_PERF_WRITE_DS (0x0210U) +#define NPU_PERF_READS (0x0218U) +#define NPU_PERF_WRITES (0x0220U) +#define NPU_PERF_READ_IS (0x0228U) +#define NPU_PERFWRITE_IS (0x0230U) +#define NPU_PERF_RD_BS1 (0x0238U) +#define NPU_PERF_WR_BS1 (0x0240U) +#define NPU_PERF_RD_BS2 (0x0248U) +#define NPU_PERF_WR_BS2 (0x0250U) +#define NPU_PERF_RD_BS3 (0x0258U) +#define NPU_PERF_WR_BS3 (0x0260U) +#define NPU_PERF_RD_BS4 (0x0268U) +#define NPU_PERF_WR_BS4 (0x0270U) +#define NPU_PERF_RESET (0x0278U) +#define NPU_PERF_ENABLE (0x0280U) +#define NPU_SYS_MMU_STATUS (0x0288U) + +#define NPU_MDBG_S1 (0x0290U) +#define NPU_MDBG_S2 (0x0298U) +#define NPU_MDBG_IDLE (0x02A0U) +#define NPU_MDBG_STATUS3 (0x02A8U) +#define NPU_MDBG_FAULT_STOP_STATUS (0x02B0U) +#define NPU_MDBG_STATUS_DEBUG (0x02B8U) +#define NPU_OUTSTANDING_READ (0x02C0U) +#define NPU_PAGE_FAULT_STALL (0x02C8U) + +//SLC REG +#define NPU_CACHE_RESET (0x02D0U) +#define NPU_CACHE_REQ_CNT_EN (0x02D8U) +#define NPU_CACHE_CMDREQ_RD (0x02E0U) +#define NPU_CACHE_CMDBCK_REQ_WR (0x02E8U) +#define NPU_CACHE_CMDCRC_REQ_WR (0x02F0U) +#define NPU_CACHE_CMDDBG_REQ_WR (0x02F8U) +#define NPU_CACHE_CMDREQ_FENCE (0x0300U) +#define NPU_CACHE_IREQ0_RD (0x0308U) +#define NPU_CACHE_MM_REQ_RD (0x0310U) +#define NPU_CACHE_MM_REQ_WR (0x0318U) +#define NPU_CACHE_CREQ_RD (0x0328U) +#define NPU_CACHE_AREQ_RD (0x0330U) +#define NPU_CACHE_OPK_REQ_WR (0x0338U) +#define NPU_CACHE_CMDREQ_RD_WORD (0x0340U) +#define NPU_CACHE_CMDBCK_REQ_WR_WORD (0x0348U) +#define NPU_CACHE_CMDCRC_REQ_WR_WORD (0x0350U) +#define NPU_CACHE_CMDDBG_REQ_WR_WORD (0x0358U) +#define NPU_CACHE_CREQ_FENCE_WORD (0x0360U) +#define NPU_CACHE_IREQ0_RD_WORD (0x0368U) +#define NPU_CACHE_MM_REQ_RD_WORD (0x0370U) +#define NPU_CACHE_MM_REQ_WR_WORD (0x0378U) +#define NPU_CACHE_CREQ_RD_WORD (0x0388U) +#define NPU_CACHE_AREQ_RD_WORD (0x0390U) +#define NPU_CACHE_OPK_REQ_WR_WORD (0x0398U) +#define NPU_CACHE_MMU_REQ_RD (0x03A0U) +#define NPU_CACHE_MMU_REQ_RD_WORD (0x03A8U) +#define NPU_CACHE_EWO_REQ_RD (0x03B0U) +#define NPU_CACHE_EWO_REQ_RD_WORD (0x03B8U) +#define NPU_FIFO_WORD_COUNT (0x03C0U) + +#define NPU_SYS_CLK_CTRL (0x2000U) +#define NPU_SYS_BUS_RESET_CTRL (0x2008U) +#define NPU_SYS_RESET_CTRL (0x2010U) +#define NPU_SYS_CMDMH_CONTROL (0x2018U) +#define NPU_SYS_IMH_CONTROL (0x2020U) +#define NPU_SYS_CMH_CONTROL (0x2028U) +#define NPU_SYS_AMH_CONTROL (0x2030U) +#define NPU_SYS_OMH_CONTROL (0x2038U) +#define NPU_SYS_ELEMENTOPS_MH_CONTROL (0x2040U) +#define NPU_SYS_MM_MH_CONTROL (0x2048U) + +#define NPU_SYS_H0_MEM_SIZE (0x2058U) +#define NPU_SYS_H0_OCM_ADDR (0x2090U) +#define NPU_SYS_H0_OCM_SIZE (0x2098U) + +#define NPU_SYS_MEM_WDT_CM (0x2118U) +#define NPU_SYS_MEM_WDT_CTRL (0x2120U) +#define NPU_SYS_HWDT_CTRL (0x2128U) +#define NPU_SYS_HWDT_CM (0x2130U) +#define NPU_SYS_IDLE_HYSTERESIS_COUNT (0x2140U) +#define NPU_SYS_SOCIF_WAKEUP_ENABLE (0x2148U) + +#define NPU_SYS_RESET_CLK_CTRL (0x2150U) +#define NPU_SYS_CLK_CTRL0 (0x2158U) +#define NPU_SYS_AXI_EXACCESS (0x2168U) +#define NPU_SYS_REGBANK_REQUEST_INVALID (0x2170U) +#define NPU_SYS_CMD_PRIORITY_LIMITS_LEGACY (0x2180U) +#define NPU_SYS_CMD_SECURITY_CONTROL (0x2188U) +#define NPU_SYS_ARB_STALL_RATIO (0x2200U) + +#define NPU_SYS_DATAPATH_STALL_RFE (0x2208U) +#define NPU_SYS_DATAPATH_STALL_RBE (0x2210U) +#define NPU_SYS_ARB_MP (0x2218U) + +#define NPU_SYS_MMU_PSIZE_RONE (0xE350U) +#define NPU_SYS_MMU_PSIZE_RTWO (0xE358U) +#define NPU_SYS_MMU_PSIZE_RTHREE (0xE360U) +#define NPU_SYS_MMU_PSIZE_RFOUR (0xE368U) + +#define NPU_SYS_MEM_CTRL (0xE200U) +#define NPU_SYS_MEM_FAULT_STOP_CTRL (0xE248U) +#define NPU_SYS_ACE_QOS_CTRL (0xE310U) +#define NPU_SYS_ACE_PRI_MAP_CTRL (0xE318U) +#define NPU_SYS_ACE_CTRL (0xE320U) +#define NPU_SYS_ACE_STATUS (0xE330U) + +#define NPU_SYS_L1_GLB_CTRL (0xE400U) +#define NPU_SYS_CONTEXT_MAPPING2 (0xF088U) +#define NPU_SYS_CONTEXT_MAPPING3 (0xF090U) +#define NPU_SYS_MEM_FIX (0xF0D8U) +#define NPU_SYS_PWR_MAN_HYSTERESIS (0xF100U) + +#define NPU_CH0_CONTROL (0x10000U) +#define NPU_CH0_STATUS (0x10008U) +#define NPU_CH0_S2 (0x10010U) + +#define NPU_CH0_CMD_BASE_ADDRESS (0x10020U) +#define NPU_CH0_ADDR_USED (0x10038U) +#define NPU_CH0_ADDR0 (0x10040U) +#define NPU_CH0_ADDR1 (0x10048U) +#define NPU_CH0_ADDR2 (0x10050U) +#define NPU_CH0_ADDR3 (0x10058U) +#define NPU_CH0_ADDR4 (0x10060U) +#define NPU_CH0_ADDR5 (0x10068U) +#define NPU_CH0_ADDR6 (0x10070U) +#define NPU_CH0_ADDR7 (0x10078U) +#define NPU_CH0_WRITEBACK_CONTROL (0x10080U) +#define NPU_CH0_VHA_EVENT_ENABLE (0x10088U) +#define NPU_CH0_VHA_EVENT_STATUS (0x10090U) +#define NPU_CH0_VHA_EVENT_CLEAR (0x10098U) +#define NPU_CH0_CRC_CONTROL (0x10100U) +#define NPU_CH0_CRC_ADDRESS (0x10108U) +#define NPU_CH0_DEBUG_ADDRESS (0x10110U) +#define NPU_CH0_DEBUG_SIZE (0x10118U) +#define NPU_CH0_DEBUG_CONTROL (0x10120U) +#define NPU_CH0_DEBUG_STATUS (0x10128U) +#define NPU_CH0_PRELOAD_CONTROL (0x10130U) +#define NPU_CH0_ADDR8 (0x10140U) +#define NPU_CH0_ADDR9 (0x10148U) +#define NPU_CH0_ADDR10 (0x10150U) +#define NPU_CH0_ADDR11 (0x10158U) +#define NPU_CH0_ADDR12 (0x10160U) +#define NPU_CH0_ADDR13 (0x10168U) +#define NPU_CH0_ADDR14 (0x10170U) +#define NPU_CH0_ADDR15 (0x10178U) + +#define NPU_CH0_PERFORMANCE (0x101A0U) +#define NPU_CH0_MMU_CTRL_INVAL (0x1E000U) +#define NPU_CH0_MMU_MAPPING_CONTEXT (0x1E008U) +#define NPU_CH0_MMU_MAPPING_ADDR (0x1E010U) +#define NPU_CH0_MMU_ERR_S1 (0x1E018U) +#define NPU_CH0_MMU_ERR_S2 (0x1E020U) +#define NPU_CH0_MMU_CTRL_BS (0x40020U) + +#define MMU_CTRL_BYPASS_EN (0X1U) +#define MMU_CTRL_BYPASS_DISABLE (0X0U) +#define MMU_CTRL_ALL_CTX_BIT (11U) +#define MMU_CTRL_ALL_CTX_EN (0X00800U) +#define MMU_CTRL_CTX_SHIFT (3) /* 3-10bit */ +#define MMU_CTRL_INVAL_PC (0X0004U) +#define MMU_CTRL_INVAL_PD (0X0002U) +#define MMU_CTRL_INVAL_PT (0X0001U) + +#define MMU_FS1_LEVEL_SHIFT (62U) +#define MMU_FS1_LEVEL_MSK (0X3U) +#define MMU_FS1_REQ_ID_SHIFT (56U) +#define MMU_FS1_REQ_ID_MSK (0X7FU) +#define MMU_FS1_CONTEXT_SHIFT (48U) +#define MMU_FS1_CONTEXT_MSK (0XFFU) +#define MMU_FS1_ADDRESS_SHIFT (4U) +#define MMU_FS1_ADDRESS_MSK (0XFFFFFFFFFFFU) +#define MMU_FS1_RNW_SHIFT (3U) +#define MMU_FS1_RNW_MSK (0X1) +#define MMU_FS1_TYPE_SHIFT (1U) +#define MMU_FS1_TYPE_MSK (0X3U) +#define MMU_FS1_FAULT_SHIFT (0U) +#define MMU_FS1_FAULT_MSK (0X1) + +#define MMU_FS2_WRITEBACK_SHIFT (29U) +#define MMU_FS2_WRITEBACK_MSK (0X1U) +#define MMU_FS2_CLEANUNIQUE_SHIFT (28U) +#define MMU_FS2_CLEANUNIQUE_MSK (0X1U) +#define MMU_FS2_BANK_SHIFT (24U) +#define MMU_FS2_BANK_MSK (0XFU) +#define MMU_FS2_TLB_ENTRY_SHIFT (16U) +#define MMU_FS2_TLB_ENTRY_MSK (0XFFU) +#define MMU_FS2_FBM_FAULT_SHIFT (10U) +#define MMU_FS2_FBM_FAULT_MSK (0X1) +#define MMU_FS2_BIF_ID_SHIFT (0U) +#define MMU_FS2_BIF_ID_MSK (0X3FFU) + +#define MMU_PS_CONFIG_SHIFT (38U) +#define MMU_PS_END_ADDR_SHIFT (19U) +#define MMU_PS_BASE_ADDR_SHIFT (0U) +#define MMU_PS_ADDR_ALIGNSHIFT (21U) +#define MMU_PS_END_ADDR_DELT_SHIFT (21 - 19) + +#define SYS_CLOCK_AUTO 0x20 +#define MAIN_CLOCK_AUTO 0xA2AA2AAA2A000220 + +#define NPU_SOFT_RESET_CONFIG 0xC0000107 +#define NPU_HW_BE_READY 0x200000 +/* 100ms @800MHz */ +#define NPU_HL_WDT_CYCLES 0xfffffff +/* 1ms @800MHz MEM*/ +#define NPU_MEM_WDT_CYCLES 0xffffffff + +#define NPU_HW_START_EN 0x1 + +#define NPU_INFERENCE_COMPLETE_EVENT 0x1 +#define NPU_INFERENCT_ERR_EVENT 0x2 +#define NPU_MEM_WDT_EVENT 0x8 +#define NPU_WDT_EVENT 0x80008 +#define NPU_AXI_EVENT 0x40000 +#define NPU_MMU_EVENT 0x10000 +#define NPU_ERR_EVENT 0x100000 +#define NPU_ALL_EVENT (NPU_INFERENCE_COMPLETE_EVENT | NPU_INFERENCT_ERR_EVENT | \ + NPU_AXI_EVENT | NPU_MMU_EVENT | \ + NPU_WDT_EVENT | NPU_ERR_EVENT) + +#define NPU_PRE_CBUF 0x10 +#define NPU_PRE_RD_256 0x01000000 +#define NPU_PRE_WR_256 0x00200000 +#define NPU_PRELOAD_CFG (NPU_PRE_CBUF | NPU_PRE_RD_256 | NPU_PRE_WR_256) + +#endif /* __PHYTIUM_NPU_LEOPARD_REG_H__ */ diff --git a/drivers/staging/phytium-npu/include/phytium_npu_uapi.h b/drivers/staging/phytium-npu/include/phytium_npu_uapi.h new file mode 100644 index 0000000000000000000000000000000000000000..27f538ea8ed5f759bc9c86c4016e9bb6c7653d4c --- /dev/null +++ b/drivers/staging/phytium-npu/include/phytium_npu_uapi.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#ifndef _PHYTIUM_NPU_USER_API_H_ +#define _PHYTIUM_NPU_USER_API_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* Cache attributes mask */ +#define MEM_ATTR_CACHE_MASK 0xf +#define NPU_RSP_OK 0 +enum { + /* System errors */ + NPU_RSP_ERROR_HW_SYS_AXI_ERROR_SHF = 0, + NPU_RSP_ERROR_HW_SYS_MMU_PAGE_FAULT_SHF, + NPU_RSP_ERROR_HW_SYS_SYS_MEM_WDT_SHF, + NPU_RSP_ERROR_HW_SYS_AXI_MEMORY_PARITY_ERROR_SHF, + NPU_RSP_ERROR_HW_SYS_MMU_PARITY_ERROR_SHF, + NPU_RSP_ERROR_HW_SYS_RAM_CORRECTION_SHF, + NPU_RSP_ERROR_HW_SYS_RAM_DETECTION_SHF, + NPU_RSP_ERROR_HW_SYS_LSYNC_INV_REQ_SHF, + NPU_RSP_ERROR_HW_SYS_LOGIC_ERROR_SHF, + NPU_RSP_ERROR_SW_SYS_EVNT_PARITY_ERROR_SHF, + NPU_RSP_ERROR_SW_WDT_EXPIRED_SHF, + /* CNN core status errors. */ + NPU_RSP_ERROR_HW_CORE_LOGIC_ERROR_SHF, + NPU_RSP_ERROR_HW_RAM_CORRECTION_SHF, + NPU_RSP_ERROR_HW_RAM_DETECTION_SHF, + NPU_RSP_ERROR_HW_CORE_SYNC_ERROR_SHF, + NPU_RSP_ERROR_HW_CORE_WDT_SHF, + NPU_RSP_ERROR_HW_CORE_MEM_WDT_SHF, + NPU_RSP_ERROR_HW_CORE_CNN_ERROR_SHF, + /* Interconnect status errors. */ + NPU_RSP_ERROR_HW_LOCKSTEP_ERROR_SHF, + NPU_RSP_ERROR_HW_IC_LOGIC_ERROR_SHF, + NPU_RSP_ERROR_HW_SOCIF_READ_MISMATCH_SHF, + NPU_RSP_ERROR_HW_SOCIF_READ_UNRESPONSIVE_SHF, + NPU_RSP_ERROR_SW_IC_PARITY_ERROR_SHF, + /* Workload submit errors. */ + NPU_RSP_ERROR_SW_SKIP_CMD_SHF, + NPU_RSP_ERROR_SW_KICK_BIT_READ_BACK_FAILURE_SHF, + NPU_RSP_ERROR_SW_HW_BUSY_SHF, + NPU_RSP_ERROR_SW_INVALID_CMD_INFO_SHF, + NPU_RSP_ERROR_SW_INVALID_CMD_TYPE_SHF, + NPU_RSP_ERROR_SW_MMU_SETUP_FAILURE_SHF, + + /* Special flag to mark critical errors */ + NPU_RSP_ERROR_CRITICAL_SHF +}; + +#define NPU_RSP_ERROR(err) ((1ull << (NPU_RSP_ERROR_##err##_SHF) | \ + (1ull << NPU_RSP_ERROR_CRITICAL_SHF))) +#define NPU_RSP_WARNING(err) (1ull << (NPU_RSP_ERROR_##err##_SHF)) + +#define NPU_RSP_SET_ERROR(err_flags) ((err_flags) |= (1ull << NPU_RSP_ERROR_CRITICAL_SHF)) +#define NPU_RSP_IS_CRITICAL(err) ((((err) >> NPU_RSP_ERROR_CRITICAL_SHF) & 0x1) ? true : false) + +/* memory attributes */ +enum mem_attr { + MEM_ATTR_CACHED = 0x00000001, + MEM_ATTR_UNCACHED = 0x00000002, + MEM_ATTR_WRITECOMBINE = 0x00000004, + + /* Special */ + MEM_ATTR_SECURE = 0x00000010, + MEM_ATTR_NOMAP = 0x00000020, + MEM_ATTR_NOSYNC = 0x00000040, + + /* Internal */ + MEM_ATTR_MMU = 0x10000000, + MEM_ATTR_OCM = 0x20000000, +}; + +enum debug_mode { + DEBUG_MODE_NONE = 0, + DEBUG_MODE_REGISTER, + DEBUG_MODE_MEMORY_FILE, +}; + +enum debug_type { + DEBUG_TYPE_NONE = 0, + DEBUG_TYPE_REG_PERF = 0x1, + DEBUG_TYPE_REG_BAND = 0x2, + DEBUG_TYPE_BAND_STREAM = 0x10, + DEBUG_TYPE_BAND_LAYER = 0x20, + DEBUG_TYPE_BAND_PASS = 0x30, + DEBUG_TYPE_PERF_STREAM = 0x100, + DEBUG_TYPE_PERF_LAYER = 0x200, + DEBUG_TYPE_PERF_PASS = 0x300, + DEBUG_TYPE_CRC_STREAM = 0x1000, + DEBUG_TYPE_CRC_LAYER = 0x2000, + DEBUG_TYPE_CRC_PASS = 0x3000, +}; + +#define NPU_OCM_MAX_NUM_PAGES 128 +#define NPU_MAX_DMA_ADDRS 16 + +// represents OCM types, +#define NPU_LOCAL_OCM 0 /* Local OCM */ +#define NPU_SHARED_OCM 1 /* Shared OCM */ +#define NPU_OCM_TYPE_MAX 2 + +struct npu_info { + u64 pid; + u64 version; + u32 is_have_mmu; + u32 mmu_page_size; + u32 mefficiency; + bool use_debug; + u8 core_num; + u32 l1_size; + u32 l3_size; + u32 l3_percore_size; + u32 clock_freq; +} __aligned(8); + +/* compute stream sent to device */ +enum npu_compute_type { + NPU_INVALID = 0x000, + NPU_STREAM_SUBMIT = 0x201, + NPU_STREAM_DEBUG +}; + +/* optional flags for commands */ +#define NPU_COMPUTEFLAG_NOTIFY 0x0001 /* send response when cmd complete */ +#define NPU_CHECK_CRC 0x0002 /* check the combined CRCs */ +#define NPU_EXEC_TIME_SET 0x0004 /* execution time is valid */ + +struct npu_excute_stream { + u16 sflags; /* NPU_STREAM FLAG_xxx */ + u16 stype; /* enum stream_type */ + u32 sid; /* arbitrary id for stream */ + u8 sp1; /* priority */ + u8 sp2; /* padding */ + u8 all; /* total number of buffers */ + u8 in; /* number of input buffers */ + u32 t; + u32 fd[NPU_MAX_DMA_ADDRS]; /* dma-buf fd */ +}; + +struct npu_user_submit_stream { + struct npu_excute_stream estream; + s32 stream_off; + s32 stream_fd; /* bufid of stream buffer */ + s32 stream_size; + u32 bufoffsets[NPU_MAX_DMA_ADDRS]; /* offsets into inbufs and outbufs buffers */ + u32 bufsizes[NPU_MAX_DMA_ADDRS]; /* sizes of the inbufs and outbufs buffers */ + u8 idx[NPU_MAX_DMA_ADDRS]; /* register to be used for inbufs and outbufs */ + u32 cycles; /* estimated number of cycles for this command */ + u32 num; /* number of subsegments in subseg_info array */ +} __aligned(8); + +/* + * response from kernel module to user. + */ +struct npu_user_rsp { + u64 rsp_err_flags; + u32 sid; /* arbitrary id to identify stream */ + u32 err_no; /* 0 if successful, else -ve */ + u32 session_id; +}; + +#define MAX_NPU_USER_RSP_SIZE (sizeof(struct npu_user_rsp)) +/* + * response returned after CNN_SUBMIT. + */ +struct npu_user_cnn_submit_rsp { + struct npu_user_rsp msg; + u64 last_proc_us; /* processing time in us, measured with system clock */ + u32 mem_usage; /* device memory used */ + u32 hw_cycles; /* hardware cycles used */ +} __aligned(8); + +#define MAX_NPU_UCNN_RSP_SIZE (sizeof(struct npu_user_cnn_submit_rsp)) + +struct npu_free_buffer { + s32 fd; /* [IN] fd of dma buffer to free */ +}; + +enum npu_map_type { + NPU_MAP_TYPE_NONE = 0x0, + NPU_MAP_TYPE_BUF, + NPU_MAP_TYPE_INFERENCE, + NPU_MAP_TYPE_RO = 0x4, +}; + +struct npu_map2cache { + u64 vaddr; /* [IN] Device virtual address of a mapping */ + u32 fd; /* [IN] fd of dma buffer to map to VHA */ + u32 page_size; /* [IN] Page size */ + u32 num_pages; /* [IN] The number of pages to be mapped */ + u32 page_idxs[128];//驱动定义最大page128--516K + /* [IN] Indexes of pages to be mapped */ +} __aligned(8); + +struct npu_memory_map { + u32 map_type; /* [IN] Mapping flags, see NPU_map_type */ + s32 fd; /* [IN] fd of dma buffer if necessary */ + u64 vaddr; /* [IN] Device virtual address to map     */ +} __aligned(8); + +struct npu_memory_unmap { + s32 fd; /* [IN] fd of dma buffer if necessary*/ +}; + +enum npu_buf_status { + NPU_BUF_UNUPDATE, + NPU_BUF_UPDATED_BY_SW, + NPU_BUF_UPDATED_BY_HW +}; + +#define NPU_SYNC_NONE (-1) + +struct npu_set_buffer { + s32 fd; + u32 set_state; /* enum npu_buf_status */ + int input_sync_fd; /* input sync to attach */ + bool is_output_sync; /* output sync signal */ +} __aligned(8); + +enum npu_sync_op { + NPU_SYNC_OP_CREATE_OUT, /* create output sync_fd */ + NPU_SYNC_OP_MERGE_IN, /* merge input sync_fds */ + NPU_SYNC_OP_RELEASE /* release syncs */ +}; + +/* parameters to manage sync_fds */ +#define NPU_SYNC_MAX_DMA_IDS (NPU_MAX_DMA_ADDRS) +#define NPU_SYNC_MAX_IN_SYNC_FDS (NPU_MAX_DMA_ADDRS) + +struct npu_sync_buffer { + enum npu_sync_op op; + u32 num; /* [IN] number of output buffers */ + int fd[NPU_SYNC_MAX_DMA_IDS]; /* [IN] list of output buffer ids */ + int syncfd; /* [OUT] output sync_fd/sync_fd for merged input sync_fds */ +} __aligned(8); + +struct npu_delete_stream { + u32 stream_id; /* [IN] masked ID of commands to be deleted */ + u32 stream_id_mask; /* [IN] mask for command IDs to be deleted */ + bool is_res; /* [IN] if true, respond to this cancel request */ +} __aligned(8); + +struct npu_repeat_stream { + u32 stream_id; /* [IN] masked ID of commands to be deleted */ + u32 stream_id_mask; /* [IN] mask for command IDs to be deleted */ + bool is_repeat; /* [IN] if true, repeat this stream*/ +} __aligned(8); + +struct npu_debug_perf { + u32 debug_mode; /* register|mem file */ + u32 debug_type; /* perf|band|crc */ + u32 debug_fd; /* debug perf or band memory fd */ + u32 debug_crc_fd; /* debug crc memory fd */ +}; + +#define NPU_INFO _IOR('p', 0xB0, struct npu_info) +#define NPU_MAP2CACHE _IOW('p', 0xB1, struct npu_map2cache) +#define NPU_MEMORY_IMPORT _IOW('p', 0xB2, struct npu_memory_map) +#define NPU_MEMORY_UNMAP _IOW('p', 0xB3, struct npu_memory_unmap) +#define NPU_SET_BUF _IOW('p', 0xB4, struct npu_set_buffer) +#define NPU_SYNC_BUF _IOWR('p', 0xB5, struct npu_sync_buffer) +#define NPU_DELET_STREAM _IOW('p', 0xB6, struct npu_delete_stream) +#define NPU_REPEAT_STREAM _IOW('p', 0xB7, struct npu_repeat_stream) +#define NPU_DEBUG_PERF _IOW('p', 0xB8, struct npu_debug_perf) + +/* + * npu scope context + */ +struct npu_trace_ctx { + u32 model_id; /* model id */ + u32 frame_id; /* inference id */ + u32 dev_id; /* device id */ + u32 osid; /* device id */ + u32 pid; /* process id */ + u32 tid; /* thread id */ +}; + +/* Event information, available from NPU_info */ +struct npu_timing_data { + u32 evt_type; /* event type */ + u32 seqno; /* continually increments */ + u32 dev_id; /* device id */ + u32 timestamp_lo; /* in microseconds */ + u32 timestamp_hi; + u32 type; /* either SUBMIT or COMPLETE or ERROR */ + u32 cycles; /* HW cycle count */ + u32 pid; /* process id */ +}; + +enum npu_scope_evt_type { + NPU_EVENT_TIMING, + NPU_EVENT_NUM +}; + +enum npu_timing_data_type { + NPU_EVENT_TYPE_ENQUEUE, + NPU_EVENT_TYPE_SUBMIT, + NPU_EVENT_TYPE_COMPLETE, + NPU_EVENT_TYPE_ERROR, + NPU_EVENT_TYPE_NUM +}; + +#endif diff --git a/drivers/staging/phytium-npu/phytium_npu_common.c b/drivers/staging/phytium-npu/phytium_npu_common.c new file mode 100644 index 0000000000000000000000000000000000000000..ec8e07dabcb0b35a4c8c63b822df261c931083ab --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_common.c @@ -0,0 +1,705 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include "phytium_npu.h" +#include "phytium_npu_mmu.h" +#include "linux/phytium_npu_dma_buf_heap.h" +#include "linux/dma-fence.h" +#include "linux/dma-buf.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef PHYTIUM_NPU_PLATFORM +#include "phytium_npu_leopard_reg.h" +#else +#include "phytium_npu_reg.h" +#endif + +struct phytium_npu_dev *gnpu_dev; +#define AP_CPPC2_STAT 0x0 +#define AP_CPPC2_SET 0x4 +#define CPPC2_DATA0 0x18 +#define CPPC2_DATA1 0x1C +#define CONFIG_POWER_D0 0x110400 +#define CONFIG_POWER_ON_D1 0x40000008 +#define CONFIG_POWER_OFF_D1 0x40000000 +#define TIMEOUT 50000 //test for X1 +#define PERIOD_T 10 + +#define NPU_MMU_40BIT 40 +#define MMU_DEFAULT_TRANS 0 +#define MMU_DEFAULT_CTX_ID 0 +#define MMU_DEFAULT_PGSIZE 4096 + +struct phytium_npu_dev *phytium_npu_get_npudev(void) +{ + return gnpu_dev; +} + +static void phytium_npu_mmu_config_init(struct phytium_npu_dev *npu_dev) +{ + npu_dev->nmmu_config.width = NPU_MMU_40BIT; + npu_dev->nmmu_config.mode_bypass = MMU_DEFAULT_TRANS; + npu_dev->nmmu_config.is_use_mmu_pte = FALSE; + npu_dev->nmmu_config.default_ctx_id = MMU_DEFAULT_CTX_ID; + npu_dev->nmmu_config.page_size = MMU_DEFAULT_PGSIZE; +} + +static void _phytium_npu_poweroff_dtb(struct phytium_npu_dev *npudev) +{ + u32 stat; + int rc; + + if (!npudev->power_reg_base) { + pr_err("not find power management reg"); + return; + } + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + AP_CPPC2_STAT, + stat, (stat == 0), PERIOD_T, TIMEOUT); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when read CPPC stat: %#08x\n", stat); + return; + } + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + CPPC2_DATA0, + stat, (stat & 0x1), PERIOD_T, TIMEOUT * 50); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when config channel status: %#08x\n", stat); + return; + } + + writel_relaxed(CONFIG_POWER_D0, npudev->power_reg_base + CPPC2_DATA0); + writel_relaxed(CONFIG_POWER_OFF_D1, npudev->power_reg_base + CPPC2_DATA1); + writel_relaxed(0x1, npudev->power_reg_base + AP_CPPC2_SET); + + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + CPPC2_DATA0, + stat, (stat & 0x1), PERIOD_T, TIMEOUT * 50); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when read config channel status: %#08x\n", stat); + return; + } + stat = readl_relaxed(npudev->power_reg_base + CPPC2_DATA1); + if (!(stat & 0xFFFFFFF)) + pr_debug("Power off"); +} + +static void _phytium_npu_powerup_dtb(struct phytium_npu_dev *npudev) +{ + u32 stat; + int rc; + + if (!npudev->power_reg_base) { + pr_err("not find power management reg"); + return; + } + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + AP_CPPC2_STAT, + stat, (stat == 0), PERIOD_T, TIMEOUT); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when read CPPC stat: %#08x\n", stat); + return; + } + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + CPPC2_DATA0, + stat, (stat & 0x1), PERIOD_T, TIMEOUT); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when read config channel status: %#08x\n", stat); + return; + } + + writel_relaxed(CONFIG_POWER_D0, npudev->power_reg_base + CPPC2_DATA0); + writel_relaxed(CONFIG_POWER_ON_D1, npudev->power_reg_base + CPPC2_DATA1); + writel_relaxed(0x1, npudev->power_reg_base + AP_CPPC2_SET); + + rc = readl_relaxed_poll_timeout(npudev->power_reg_base + CPPC2_DATA0, + stat, (stat & 0x1), PERIOD_T, TIMEOUT * 5); + if (rc == -ETIMEDOUT) { + pr_err("Timeout when read config channel status: %#08x\n", stat); + return; + } + stat = readl_relaxed(npudev->power_reg_base + CPPC2_DATA1); + if ((stat & 0xFFFFFFF) == 0x8) + pr_debug("Power on"); +} + +static void _phytium_npu_power_manager_acpi(struct phytium_npu_dev *npudev, int on) +{ + acpi_handle handle = ACPI_HANDLE(npudev->dev); + union acpi_object args[3]; + acpi_status status; + unsigned long long rv; + + struct acpi_object_list arg_list = { + .pointer = args, + .count = ARRAY_SIZE(args), + }; + + if (!handle) + return; + + pr_debug("set npu power %d\n", on); + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = NPU_ACPI_POWER_TYPE; + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = 0; + args[2].type = ACPI_TYPE_INTEGER; + args[2].integer.value = 0; + + if (on) { + status = acpi_evaluate_integer(handle, "PPWO", &arg_list, &rv); + if (ACPI_FAILURE(status)) + pr_err("NO PS0 Method\n"); + if (rv) + pr_err("Failed to power on\n"); + pr_debug("acpi_evaluate_integer PPWO ok\n"); + } else { + status = acpi_evaluate_integer(handle, "PPWD", &arg_list, &rv); + if (ACPI_FAILURE(status)) + pr_err("NO PS3 Method\n"); + + if (rv) + pr_err("Failed to power off\n"); + pr_debug("acpi_evaluate_integer PPWD OK\n"); + } +} + +static void _phytium_npu_get_voltage_acpi(struct phytium_npu_dev *npudev, int *voltage) +{ + acpi_handle handle = ACPI_HANDLE(npudev->dev); + union acpi_object args[3]; + acpi_status status; + unsigned long long rv; + + struct acpi_object_list arg_list = { + .pointer = args, + .count = ARRAY_SIZE(args), + }; + + if (!handle) + return; + + pr_debug("Get NPU voltage\n"); + args[0].type = ACPI_TYPE_INTEGER; + args[0].integer.value = NPU_ACPI_POWER_TYPE; + args[1].type = ACPI_TYPE_INTEGER; + args[1].integer.value = 0; + args[2].type = ACPI_TYPE_INTEGER; + args[2].integer.value = 0; + + status = acpi_evaluate_integer(handle, "PGDV", &arg_list, &rv); + if (ACPI_FAILURE(status)) + pr_err("NO PGDV Method\n"); + if (rv < 0) + pr_err("Failed to get voltage\n"); + pr_debug("Get PGDV ok,voltage:%llu\n", rv); + if (rv & 0x80000000) + *voltage = -1; + else + *voltage = (int)rv; +} + +void _phytium_npu_voltage_read(struct phytium_npu_dev *npudev) +{ + if (!npudev->voltage_val) + _phytium_npu_get_voltage_acpi(npudev, &npudev->voltage_val); + pr_debug("voltage value:%d", npudev->voltage_val); +} + +static void _phytium_npu_poweroff(struct phytium_npu_dev *npudev) +{ + if (npudev->power_reg_base) + _phytium_npu_poweroff_dtb(npudev); + else + _phytium_npu_power_manager_acpi(npudev, FALSE); +} + +static void _phytium_npu_powerup(struct phytium_npu_dev *npudev) +{ + if (npudev->power_reg_base) + _phytium_npu_powerup_dtb(npudev); + else + _phytium_npu_power_manager_acpi(npudev, TRUE); +} + +static int phytium_npu_resume(struct phytium_npu_dev *npudev) +{ + _phytium_npu_powerup(npudev); + phytium_npu_hw_reset_self(npudev); + _phytium_npu_voltage_read(npudev); + pr_debug("NPU set power up Ok, start npu hw reset"); + return 0; +} + +static int phytium_npu_suspend(struct phytium_npu_dev *npudev) +{ + phytium_npu_config_clock(npudev, FALSE); + _phytium_npu_poweroff(npudev); + + return 0; +} + +void phytium_npu_try_resume_work(struct phytium_npu_dev *npudev) +{ + dev_dbg(npudev->dev, "%s: resume npu!\n", __func__); + if (npudev->power_status == NPU_STATE_ON) { + if (cancel_delayed_work_sync(&npudev->rt_delay_work)) + pr_debug("cancel_delay_work success"); + else + pr_debug("cancel delay work failed"); + } else if (npudev->power_status == NPU_STATE_OFF) { + phytium_npu_resume(npudev); + npudev->power_status = NPU_STATE_ON; + npudev->load_status = 100; + } +} + +static void phytium_npu_runtime_suspend_work(struct work_struct *work) +{ + struct phytium_npu_dev *npudev = + container_of(work, struct phytium_npu_dev, rt_delay_work.work); + + mutex_lock(&npudev->mutex_lock); + dev_dbg(npudev->dev, "%s: runtime expired!\n", __func__); + + if (npudev->power_status == NPU_STATE_ON) { + phytium_npu_suspend(npudev); + npudev->power_status = NPU_STATE_OFF; + npudev->load_status = NPU_STATE_OFF; + } else { + pr_info("NPU is poweroff"); + } + mutex_unlock(&npudev->mutex_lock); +} + +int phytium_npu_schedule_suspend(struct phytium_npu_dev *npudev, uint32_t delay_ms) +{ + unsigned long cur_time = jiffies + msecs_to_jiffies(delay_ms); + int ret; + + ret = schedule_delayed_work(&npudev->rt_delay_work, cur_time - jiffies); + if (!ret) { + mod_delayed_work(system_wq, &npudev->rt_delay_work, cur_time - jiffies); + pr_debug("Restart delay work"); + } + return ret; +} + +static int phytium_npu_common_init(struct phytium_npu_dev *npu) +{ + int ret = 0; + + /* config power state */ + npu->power_status = NPU_STATE_OFF; + npu->load_status = NPU_STATE_OFF; + npu->voltage_val = 0; + phytium_npu_try_resume_work(npu); + phytium_npu_mmu_config_init(npu); + phytium_npu_mmu_dev_init(npu, npu->nmmu_config.page_size); + + INIT_DELAYED_WORK(&npu->rt_delay_work, phytium_npu_runtime_suspend_work); + INIT_WORK(&npu->stream_work, do_work); + npu->stream_wq = alloc_workqueue("npu-stream", WQ_HIGHPRI | WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!npu->stream_wq) { + ret = -ENOMEM; + return ret; + } + npu->is_cache_stream_on = TRUE; + mutex_init(&npu->mutex_lock); + spin_lock_init(&npu->spin_irq_lock); + phytium_npu_hw_reset_self(npu); + phytium_npu_config_preload(npu); + phytium_npu_debugfs_init(npu); + phytium_npu_schedule_suspend(npu, NPU_AUTO_SUSPEND_TIMEOUT); + return ret; +} + +int phytium_npu_register_dev(struct device *dev, void *plat_data, void __iomem *reg_base) +{ + int ret; + + struct phytium_npu_dev *npu = devm_kzalloc(dev, sizeof(struct phytium_npu_dev), GFP_KERNEL); + + if (!npu) { + ret = -ENOMEM; + goto free_dev; + } + gnpu_dev = npu; + npu->reg_base = reg_base; + npu->power_reg_base = plat_data; + npu->activated_stream = NULL; + npu->queued_stream = NULL; + INIT_LIST_HEAD(&npu->sessions_list); + INIT_LIST_HEAD(&npu->sched_sess_list); + dev_set_drvdata(dev, npu); + npu->dev = dev; + ret = phytium_npu_register_misc(npu); + if (ret) { + dev_err(dev, "%s: failed to add npu dev!", __func__); + goto free_dev; + } + + phytium_npu_common_init(npu); + + return ret; + +free_dev: + gnpu_dev = NULL; + devm_kfree(dev, npu); + return ret; +} +EXPORT_SYMBOL(phytium_npu_register_dev); + +int phytium_npu_unregister_dev(struct device *dev) +{ + struct phytium_npu_dev *npudev = dev_get_drvdata(dev); + int ret; + + WARN_ON(!npudev); + if (!npudev) { + pr_err("npudev is NULL"); + return -EINVAL; + } + + flush_workqueue(npudev->stream_wq); + destroy_workqueue(npudev->stream_wq); + cancel_delayed_work_sync(&npudev->rt_delay_work); + phytium_npu_session_release_all(npudev); + phytium_npu_debugfs_remove(npudev); + ret = phytium_npu_unregister_misc(npudev); + if (ret) { + pr_err("unregister misc npu err"); + return ret; + } + devm_kfree(dev, npudev); + return ret; +} +EXPORT_SYMBOL(phytium_npu_unregister_dev); + +static int phytium_npu_response_stream(struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream, + int sid, int err_no, int errflag) +{ + struct npu_user_stream_rsp *nustream_rsp; + struct npu_user_submit_stream *nustream = &nstream->nustream; + int size, alloc_size; + + alloc_size = sizeof(*nustream_rsp); + if (nstream->nustream.estream.stype == NPU_STREAM_SUBMIT) { + size = MAX_NPU_UCNN_RSP_SIZE; + alloc_size += (MAX_NPU_UCNN_RSP_SIZE - MAX_NPU_USER_RSP_SIZE); + } else { + size = MAX_NPU_USER_RSP_SIZE; + } + + if (nustream->estream.stype & NPU_COMPUTEFLAG_NOTIFY) { + if (!nstream->rsp) { + nustream_rsp = kzalloc(alloc_size, GFP_KERNEL); + if (!nustream_rsp) + return -ENOMEM; + + nstream->rsp = nustream_rsp; + INIT_LIST_HEAD(&nustream_rsp->stream_rsp_list_entry); + } + + nustream_rsp->ursp.sid = sid; + nustream_rsp->ursp.err_no = err_no; + nustream_rsp->ursp.rsp_err_flags = errflag; + nustream_rsp->ursp.session_id = sess->id; + nustream_rsp->rsp_size = size; + nustream_rsp->session = sess; + + list_add_tail(&nustream_rsp->stream_rsp_list_entry, &sess->response_list); + pr_debug("session id:%d response stream id:%#x, err_no:%d errflag:%x", + sess->id, sid, err_no, errflag); + wake_up(&sess->response_wq); + if (!sess->npu_dev->is_use_repeat) { + nustream_rsp->session = NULL; + nstream->rsp = NULL; + list_del(&nstream->stream_list_entry); + kfree(nstream); + pr_debug("free stream id:%#x", sid); + } + } + + return 0; +} + +void phytium_npu_repeat_stream(struct phytium_npu_session *sess, int stream_id, int is_repeat) +{ + struct phytium_npu_stream *curr_stream, *next_stream; + struct npu_user_submit_stream *nustream; + int is_find = 0; + + list_for_each_entry_safe(curr_stream, next_stream, &sess->stream_list, stream_list_entry) { + nustream = &curr_stream->nustream; + if (nustream->estream.sid == stream_id && is_repeat) { + curr_stream->stream_status = NPU_STREAM_NONE; + curr_stream->infer_status = NPU_STREAM_INFER_NONE; + is_find = 1; + } + } + + if (is_find) + pr_debug("find stream id %d and repeat %s", stream_id, + is_repeat ? "TRUE" : "FALSE"); +} + +static inline void +phytium_npu_set_dma_buf_status(struct npu_unified_heap_buffer *buffer, int status) +{ + buffer->buffer_status = status; +} + +void phytium_npu_set_stream_buf_status_with_fd(struct phytium_npu_session *sess, int fd, int status) +{ + struct npu_mctx_map *nmmap; + + nmmap = phytium_npu_find_mmu_ctx_map(sess, fd); + if (nmmap) { + phytium_npu_set_dma_buf_status(nmmap->dma_buf->priv, status); + pr_debug("Set dma buffer %d status %d", fd, status); + } +} + +static void phytium_npu_update_stream_buf_status(struct phytium_npu_stream *nstream) +{ + struct npu_user_submit_stream *nustream = &nstream->nustream; + struct npu_excute_stream *estream = &nustream->estream; + struct npu_mctx_map *nmmap; + size_t i; + + for (i = 0; i < estream->in; i++) { + nmmap = phytium_npu_find_mmu_ctx_map(nstream->session, estream->fd[i]); + if (nmmap) { + phytium_npu_set_dma_buf_status(nmmap->dma_buf->priv, NPU_BUF_UPDATED_BY_HW); + pr_debug("Stream complete and set dma buffer %d status %d", + estream->fd[i], NPU_BUF_UPDATED_BY_HW); + } + } +} + +int phytium_npu_check_stream_buf_is_ready(struct phytium_npu_stream *nstream) +{ + struct npu_user_submit_stream *nustream = &nstream->nustream; + struct npu_excute_stream *estream = &nustream->estream; + struct npu_mctx_map *nmmap; + struct npu_unified_heap_buffer *buffer; + size_t i; + + if (nstream->all_buf_ready) + return TRUE; + + for (i = 0; i < estream->in; i++) { + nmmap = phytium_npu_find_mmu_ctx_map(nstream->session, estream->fd[i]); + if (nmmap) { + buffer = nmmap->dma_buf->priv; + if (buffer->buffer_status == NPU_BUF_UNUPDATE) + return FALSE; + } else { + return FALSE; + } + } + nstream->all_buf_ready = TRUE; + pr_debug("Set stream %d all buf ready status %d", estream->fd[i], nstream->all_buf_ready); + return TRUE; +} + +int phytium_npu_prepare_hw_4_queued_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream) +{ + phytium_npu_mmu_config_dev_mmu(sess); + phytium_npu_config_dma_address(sess, nstream); + return 0; +} + +int phytium_npu_submit_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream) +{ + GET_TIME_NS(&npu->ts); + phytium_npu_try_resume_work(npu); + phytium_npu_config_clock(npu, TRUE); + phytium_npu_config_hl_wdt(npu); + phytium_npu_mmu_config_dev_mmu(sess); + phytium_npu_config_dma_address(sess, nstream); + phytium_npu_clear_irq_status(npu, NPU_ALL_EVENT); + phytium_npu_debug_set_hw_register(npu, npu->activated_stream->session); + phytium_npu_config_event(npu, NPU_ALL_EVENT, TRUE); /* enable all event */ + phytium_npu_config_start_inference(npu, sess, nstream); + GET_TIME_NS(&npu->te); + phytium_npu_get_time_span(&npu->ts, &npu->te, &npu->tspan); + GET_TIME_NS(&npu->ts); + return 0; +} + +static void phytium_npu_inference_complete(struct phytium_npu_dev *npudev) +{ + struct phytium_npu_stream *nstream = npudev->activated_stream; + struct phytium_npu_session *sess = nstream->session; + + if ((npudev->irq_status != NPU_INFERENCE_COMPLETE_EVENT) || !nstream) + return; + phytium_npu_update_stream_buf_status(nstream); + nstream->infer_status = NPU_STREAM_INFER_DONE; + GET_TIME_NS(&npudev->te); + phytium_npu_get_time_span(&npudev->ts, &npudev->te, &npudev->tspan); + phytium_npu_response_stream(sess, nstream, nstream->nustream.estream.sid, NPU_RSP_OK, 0); + phytium_npu_debug_show_register(npudev, sess); + phytium_npu_schedule_suspend(npudev, NPU_AUTO_SUSPEND_TIMEOUT); +} + +int phytium_npu_common_resume(struct device *dev) +{ + struct phytium_npu_dev *npudev = dev_get_drvdata(dev); + + mutex_lock(&npudev->mutex_lock); + _phytium_npu_powerup(npudev); + phytium_npu_schedule_stream_queues(npudev, TRUE); + mutex_unlock(&npudev->mutex_lock); + + return 0; +} +EXPORT_SYMBOL(phytium_npu_common_resume); + +int phytium_npu_common_suspend(struct device *dev) +{ + struct phytium_npu_dev *npudev = dev_get_drvdata(dev); + + mutex_lock(&npudev->mutex_lock); + phytium_npu_rollback_stream(npudev); + phytium_npu_config_clock(npudev, FALSE); + if (!pm_runtime_status_suspended(dev)) + pm_runtime_put_sync(dev); + _phytium_npu_poweroff(npudev); + mutex_unlock(&npudev->mutex_lock); + return 0; +} +EXPORT_SYMBOL(phytium_npu_common_suspend); + +/* top half */ +int phytium_npu_handle_irq(struct device *dev) +{ + struct phytium_npu_dev *npu_dev = dev_get_drvdata(dev); + int status; + int irq_mask = phytium_npu_get_irq_enable_status(npu_dev); + + GET_TIME_NS(&npu_dev->te); + phytium_npu_get_time_span(&npu_dev->ts, &npu_dev->te, &npu_dev->tspan); + GET_TIME_NS(&npu_dev->ts); + status = phytium_npu_get_irq_status(npu_dev); + if (!status) { + pr_debug("No irq here."); + return IRQ_NONE; + } + //clear msi irq status with PCI interface + if (!npu_dev->is_platform_dev) + phytium_npu_clear_msi_irq(npu_dev); + + if (!(status & irq_mask)) + return IRQ_NONE; + + phytium_npu_clear_irq_status(npu_dev, status); + spin_lock(&npu_dev->spin_irq_lock); + npu_dev->irq_status = status & irq_mask; + spin_unlock(&npu_dev->spin_irq_lock); + dev_dbg(npu_dev->dev, "irq status (%#x), bottom status:%#x", status, npu_dev->irq_status); + return IRQ_WAKE_THREAD; +} +EXPORT_SYMBOL(phytium_npu_handle_irq); + +/* bottom half */ +int phytium_npu_handle_thread_irq(struct device *dev) +{ + struct phytium_npu_dev *npu_dev = dev_get_drvdata(dev); + struct phytium_npu_stream *nstream = npu_dev->activated_stream; + int err = 0, err_flag = 0, status; + + mutex_lock(&npu_dev->mutex_lock); + spin_lock(&npu_dev->spin_irq_lock); + status = npu_dev->irq_status; + spin_unlock(&npu_dev->spin_irq_lock); + if (status & (NPU_AXI_EVENT | NPU_ERR_EVENT)) { + /* must reset hw */ + err = npu_dev->irq_status & (NPU_AXI_EVENT | NPU_ERR_EVENT); + err_flag |= NPU_RSP_ERROR_HW_SYS_AXI_ERROR_SHF; + phytium_npu_config_event(npu_dev, 0, 0); /* disable event*/ + phytium_npu_clear_irq_status(npu_dev, NPU_ALL_EVENT); + phytium_npu_hw_reset_self(npu_dev); + } + + if (status & NPU_WDT_EVENT) { + err |= status & NPU_WDT_EVENT; + err_flag |= NPU_RSP_ERROR_SW_WDT_EXPIRED_SHF; + } + + if (status & NPU_MMU_EVENT) { + err |= status & NPU_MMU_EVENT; + err_flag |= NPU_RSP_ERROR_HW_SYS_MMU_PAGE_FAULT_SHF; + phytium_npu_mmu_output_status(npu_dev); + } + + if (err) { + dev_err(npu_dev->dev, "NPU hardware err %#x ", err); + if (err & NPU_AXI_EVENT) + dev_err(npu_dev->dev, "AXI ERR %#x", + phytium_npu_get_axi_err_status(npu_dev)); + if (err & NPU_MEM_WDT_EVENT) + phytium_npu_output_mem_wdt_err(npu_dev); + + if (err & NPU_ERR_EVENT) { + //TODO. will do rollback + phytium_npu_rollback_stream(npu_dev); + if (nstream) + phytium_npu_response_stream(nstream->session, nstream, + nstream->nustream.estream.sid, + NPU_RSP_ERROR_CRITICAL_SHF, + err_flag); + goto schedule_stream; + } + + if (err & NPU_MMU_EVENT && nstream) { + phytium_npu_response_stream(nstream->session, nstream, + nstream->nustream.estream.sid, + NPU_RSP_ERROR_HW_SYS_MMU_PAGE_FAULT_SHF, + err_flag); + phytium_npu_update_activated_stream_4_err_mmu(npu_dev); + phytium_npu_schedule_suspend(npu_dev, NPU_AUTO_SUSPEND_TIMEOUT); + } + } + + if (status & NPU_INFERENCT_ERR_EVENT) { + phytium_npu_rollback_stream(npu_dev); + if (nstream) + phytium_npu_response_stream(nstream->session, nstream, + nstream->nustream.estream.sid, + NPU_RSP_ERROR_HW_CORE_CNN_ERROR_SHF, + err_flag); + goto schedule_stream; + } + + if (status & NPU_INFERENCE_COMPLETE_EVENT) { + pr_debug("DEBUG:NPU infers complete here %#x", npu_dev->irq_status); + phytium_npu_inference_complete(npu_dev); + phytium_npu_try_excute_queued_stream(npu_dev); + } + +schedule_stream: + + phytium_npu_schedule_stream_queues(npu_dev, FALSE); + mutex_unlock(&npu_dev->mutex_lock); + return IRQ_HANDLED; +} +EXPORT_SYMBOL(phytium_npu_handle_thread_irq); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium NPU driver common lib"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/phytium-npu/phytium_npu_debug.c b/drivers/staging/phytium-npu/phytium_npu_debug.c new file mode 100644 index 0000000000000000000000000000000000000000..3c21aec6798ffa1591ff62eddc74e7026a5286b9 --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_debug.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include "phytium_npu.h" +#include "phytium_npu_mmu.h" +#include "linux/phytium_npu_dma_buf_heap.h" +#ifdef PHYTIUM_NPU_PLATFORM +#include "phytium_npu_leopard_reg.h" +#else +#include "phytium_npu_reg.h" +#endif + +#define DEBUG_BAND_COUNT_RESET 0x3fffffff +#define DEBUG_BAND_EN 0x1 +#define DEBUG_PERFORMANCE_COUNT_RESET 0x1 +#define DEBUG_PERFORMANCE_EN 0x1 +#define DEBUG_B_OR_C_STREAM 0x1 +#define DEBUG_B_OR_C_LAYER 0x2 +#define DEBUG_B_OR_C_PASS 0x3 +#define DEBUG_PERF_S 0x4 +#define DEBUG_PERF_L 0x8 +#define DEBUG_PERF_P 0xC +#define DEBUG_FILE_MODE (DEBUG_TYPE_BAND_STREAM | DEBUG_TYPE_BAND_LAYER \ + | DEBUG_TYPE_BAND_PASS | DEBUG_TYPE_PERF_LAYER \ + | DEBUG_TYPE_PERF_PASS | DEBUG_TYPE_PERF_STREAM) +#define DEBUG_FILE_CRC_MODE (DEBUG_TYPE_CRC_STREAM | DEBUG_TYPE_CRC_LAYER \ + | DEBUG_TYPE_CRC_PASS) +#define DEBUG_FILE_MODE_BAND (DEBUG_TYPE_BAND_STREAM | DEBUG_TYPE_BAND_LAYER \ + | DEBUG_TYPE_BAND_PASS) +#define DEBUG_FILE_MODE_PERF (DEBUG_TYPE_PERF_STREAM | DEBUG_TYPE_PERF_LAYER \ + | DEBUG_TYPE_PERF_PASS) +int is_debug_get_time_span; + +void phytium_npu_get_time_span(struct timespec64 *start, + struct timespec64 *end, + struct timespec64 *span) +{ + if (!is_debug_get_time_span) + return; + if (!start || !end || !span) + return; + span->tv_sec = end->tv_sec - start->tv_sec; + if (span->tv_sec > 0) { + span->tv_sec--; + end->tv_nsec += 1000000000; + } + /* Calculate the nanoseconds span. */ + span->tv_nsec = end->tv_nsec - start->tv_nsec; + if (span->tv_nsec > 1000000000) { + span->tv_sec++; + span->tv_nsec -= 1000000000; + } + pr_info("@@@@ scheduled span: %llu (time-ns)\n", + (uint64_t)span->tv_sec * 1000000000ULL + + (uint64_t)span->tv_nsec); +} + +int phytiun_npu_check_debug_fs_cfg(struct phytium_npu_session *sess) +{ + struct phytium_npu_debugfs *dbgfs = &sess->dbgfs; + struct npu_mctx_map *ncmap; + struct npu_unified_heap_buffer *buffer; + + if (dbgfs->debug_mode <= DEBUG_MODE_REGISTER) + return 0; + if (dbgfs->debug_mode == DEBUG_MODE_MEMORY_FILE) { + if (dbgfs->debug_type & DEBUG_FILE_MODE) { + ncmap = phytium_npu_find_mmu_ctx_map(sess, dbgfs->debug_fd); + if (!ncmap) { + pr_err("Not find debug fd %d from debug command!", dbgfs->debug_fd); + return -EFAULT; + } + buffer = ncmap->dma_buf->priv; + if (buffer) { + dbgfs->debug_size = buffer->req_len; + dbgfs->debug_addr = ncmap->virt_addr; + } else { + pr_err("not find buffer structure"); + } + pr_debug("Get debug file info debug_size:%d, debug_addr:%llx ", + dbgfs->debug_size, dbgfs->debug_addr); + } + if (dbgfs->debug_type & DEBUG_FILE_CRC_MODE) { + ncmap = phytium_npu_find_mmu_ctx_map(sess, dbgfs->debug_crc_fd); + if (!ncmap) { + pr_err("Not find debug crc fd %d from debug command!", + dbgfs->debug_fd); + return -EFAULT; + } + dbgfs->debug_crc_addr = ncmap->virt_addr; + pr_debug("Get debug crc file info debug_crc_addr:%llx ", + dbgfs->debug_crc_addr); + } + phytium_npu_debugfs_session_init(sess->npu_dev, sess); + } + return 0; +} + +void phytium_npu_debug_show_register(struct phytium_npu_dev *npu, struct phytium_npu_session *sess) +{ + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + + if (dbg->debug_mode != DEBUG_MODE_REGISTER) + return; + + if (dbg->debug_type & DEBUG_TYPE_REG_BAND) { + pr_info("**** debug band info ****"); + pr_info("request count enable %#x", REGREAD32(npu, NPU_CACHE_REQ_CNT_EN)); + pr_info("stream read request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CMDREQ_RD), + REGREAD32(npu, NPU_CACHE_CMDREQ_RD_WORD)); + pr_info("stream bck write request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CMDBCK_REQ_WR), + REGREAD32(npu, NPU_CACHE_CMDBCK_REQ_WR_WORD)); + pr_info("stream crc write request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CMDCRC_REQ_WR), + REGREAD32(npu, NPU_CACHE_CMDCRC_REQ_WR_WORD)); + pr_info("stream dbg request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CMDDBG_REQ_WR), + REGREAD32(npu, NPU_CACHE_CMDDBG_REQ_WR_WORD)); + pr_info("stream fence request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CMDREQ_FENCE), + REGREAD32(npu, NPU_CACHE_CREQ_FENCE_WORD)); + pr_info("input read request0 counter %d, word %d", + REGREAD32(npu, NPU_CACHE_IREQ0_RD), + REGREAD32(npu, NPU_CACHE_IREQ0_RD_WORD)); + pr_info("mem read request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_MM_REQ_RD), + REGREAD32(npu, NPU_CACHE_MM_REQ_RD_WORD)); + pr_info("mem write request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_MM_REQ_WR), + REGREAD32(npu, NPU_CACHE_MM_REQ_WR_WORD)); + pr_info("coefficient read request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_CREQ_RD), + REGREAD32(npu, NPU_CACHE_CREQ_RD_WORD)); + pr_info("a segment read request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_AREQ_RD), + REGREAD32(npu, NPU_CACHE_AREQ_RD_WORD)); + pr_info("opk write request counter %d, word %d", + REGREAD32(npu, NPU_CACHE_OPK_REQ_WR), + REGREAD32(npu, NPU_CACHE_OPK_REQ_WR_WORD)); + } + + if (dbg->debug_type & DEBUG_TYPE_REG_PERF) { + pr_info("**** debug performance info ****"); + pr_info("write data to memory count:%d", REGREAD32(npu, NPU_PERF_WRITE_DS)); + pr_info("read from memory count: %d, write %d", + REGREAD32(npu, NPU_PERF_READ), + REGREAD32(npu, NPU_PERF_WRITE)); + pr_info("read stream from memory count:%d, write:%d", + REGREAD32(npu, NPU_PERF_READS), + REGREAD32(npu, NPU_PERF_WRITES)); + pr_info("read id from memory count:%d, write %d", + REGREAD32(npu, NPU_PERF_READ_IS), + REGREAD32(npu, NPU_PERFWRITE_IS)); + pr_info("read burst s1 count:%d, write %d", + REGREAD32(npu, NPU_PERF_RD_BS1), + REGREAD32(npu, NPU_PERF_WR_BS1)); + pr_info("read burst s2 count:%d write %d", + REGREAD32(npu, NPU_PERF_RD_BS2), + REGREAD32(npu, NPU_PERF_WR_BS2)); + pr_info("read burst s3 count:%d write %d", + REGREAD32(npu, NPU_PERF_RD_BS3), + REGREAD32(npu, NPU_PERF_WR_BS3)); + pr_info("read burst s4 count:%d write %d", + REGREAD32(npu, NPU_PERF_RD_BS4), + REGREAD32(npu, NPU_PERF_WR_BS4)); + } +} + +void phytium_npu_debug_set_hw_register(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess) +{ + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + int band_dbg = 0, perf_dbg = 0; + + if (dbg->debug_mode == DEBUG_MODE_NONE) + return; + + if (dbg->debug_mode == DEBUG_MODE_REGISTER) { + if (dbg->debug_type & DEBUG_TYPE_REG_BAND) { + REGWRITE32(npu, NPU_CACHE_RESET, DEBUG_BAND_COUNT_RESET); + REGWRITE32(npu, NPU_CACHE_REQ_CNT_EN, DEBUG_BAND_EN); + } else if (dbg->debug_type & DEBUG_TYPE_REG_PERF) { + REGWRITE32(npu, NPU_PERF_RESET, DEBUG_PERFORMANCE_COUNT_RESET); + REGWRITE32(npu, NPU_PERF_ENABLE, DEBUG_PERFORMANCE_EN); + } else { + pr_err("Err:Not set debug type [%s]", __func__); + } + } else if (dbg->debug_mode == DEBUG_MODE_MEMORY_FILE) { + if (dbg->debug_type & DEBUG_FILE_MODE_BAND) { + switch (dbg->debug_type & 0xF0) { + case DEBUG_TYPE_BAND_STREAM: + band_dbg |= DEBUG_B_OR_C_STREAM; + break; + case DEBUG_TYPE_BAND_LAYER: + band_dbg |= DEBUG_B_OR_C_LAYER; + break; + case DEBUG_TYPE_BAND_PASS: + band_dbg |= DEBUG_B_OR_C_PASS; + break; + } + } + + if (dbg->debug_type & DEBUG_FILE_MODE_PERF) { + if (dbg->debug_type & DEBUG_TYPE_PERF_STREAM) { + switch (dbg->debug_type & 0xF00) { + case DEBUG_TYPE_PERF_STREAM: + perf_dbg |= DEBUG_PERF_S; + break; + case DEBUG_TYPE_PERF_LAYER: + perf_dbg |= DEBUG_PERF_L; + break; + case DEBUG_TYPE_PERF_PASS: + perf_dbg |= DEBUG_PERF_P; + break; + } + } + } + + if (dbg->debug_type & DEBUG_FILE_MODE) { + REGWRITE32(npu, NPU_CH0_DEBUG_ADDRESS, dbg->debug_addr); + REGWRITE32(npu, NPU_CH0_DEBUG_SIZE, dbg->debug_size); + REGWRITE32(npu, NPU_CH0_DEBUG_CONTROL, perf_dbg | band_dbg); + pr_debug("set npu hw debug addr:%llx, size:%u, control :%x\n", + dbg->debug_addr, dbg->debug_size, perf_dbg | band_dbg); + } + + if (dbg->debug_type & DEBUG_FILE_CRC_MODE) { + band_dbg = 0; + if (dbg->debug_type & DEBUG_FILE_CRC_MODE) { + switch (dbg->debug_type & 0xF000) { + case DEBUG_TYPE_CRC_STREAM: + band_dbg |= DEBUG_B_OR_C_STREAM; + break; + case DEBUG_TYPE_CRC_LAYER: + band_dbg |= DEBUG_B_OR_C_LAYER; + break; + case DEBUG_TYPE_CRC_PASS: + band_dbg |= DEBUG_B_OR_C_PASS; + break; + } + REGWRITE32(npu, NPU_CH0_CRC_CONTROL, band_dbg); + REGWRITE32(npu, NPU_CH0_CRC_ADDRESS, dbg->debug_crc_addr); + pr_debug("set npu hw debug addr:%llx, control :%x\n", + dbg->debug_crc_addr, band_dbg); + } + } + } +} + +void phytium_npu_debugfs_init(struct phytium_npu_dev *npu) +{ + npu->dbgfs_root_dir = NULL; +#ifdef CONFIG_DEBUG_FS + /* create npu0 directory in /sys/kernel/debug/ */ + npu->dbgfs_root_dir = debugfs_create_dir(npu->miscdev.name, NULL); + if (!npu->dbgfs_root_dir) { + pr_err("%s: Failed to create debug %s directory", __func__, npu->miscdev.name); + return; + } + debugfs_create_x32("load", 0444, npu->dbgfs_root_dir, + &npu->load_status); + debugfs_create_x32("config_cluster", 0444, npu->dbgfs_root_dir, + &npu->power_status); + debugfs_create_x32("clk", 0444, npu->dbgfs_root_dir, + &npu->clock_freq); + debugfs_create_x32("voltage", 0444, npu->dbgfs_root_dir, + &npu->voltage_val); +#endif +} + +void phytium_npu_debugfs_remove(struct phytium_npu_dev *npu) +{ +#ifdef CONFIG_DEBUG_FS + pr_debug("remove debugfs root directory"); + debugfs_remove_recursive(npu->dbgfs_root_dir); +#endif +} + +static ssize_t +phytium_npu_debug_perf_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) +{ + struct phytium_npu_session *sess = (struct phytium_npu_session *)file->private_data; + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + struct dma_buf *pbuf; + struct npu_unified_heap_buffer *buffer; + + if (dbg->debug_mode != DEBUG_MODE_MEMORY_FILE) + return 0; + if (!(dbg->debug_type & DEBUG_FILE_MODE)) + return 0; + pr_debug("%s[%d]: user read file debug_perf.bin", __func__, __LINE__); + pbuf = phytium_npu_check_and_get_dma_buf(sess, dbg->debug_fd); + if (pbuf && !dbg->debug_map.vaddr) { + if (dma_buf_vmap(pbuf, &dbg->debug_map)) { + pr_err("%s: Failed to vmap debug dma_buf", __func__); + return 0; + } + } else { + return 0; + } + buffer = pbuf->priv; + pr_debug("dbg->debug_map.vaddr:%#llx %lld, ppos:%lld, readlen:%ld", + (u64)dbg->debug_map.vaddr, (u64)buffer->req_len, *ppos, len); + pr_debug("%s, fd:%d,count:%ld", __func__, dbg->debug_fd, file_count(pbuf->file)); + return simple_read_from_buffer(buf, len, ppos, dbg->debug_map.vaddr, buffer->req_len); +} + +static int phytium_npu_debug_perf_release(struct inode *inode, struct file *file) +{ + struct phytium_npu_session *sess = (struct phytium_npu_session *)file->private_data; + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + struct dma_buf *pbuf; + + pbuf = phytium_npu_check_and_get_dma_buf(sess, dbg->debug_fd); + if (pbuf) { + dma_buf_vunmap(pbuf, &dbg->debug_map); + pr_debug("%s[%d]: vunmap debug dma_buf fd:%d", __func__, __LINE__, dbg->debug_fd); + pr_info("%s[%d]: vunmap debug dma_buf fd:%d", __func__, __LINE__, dbg->debug_fd); + } + pr_debug("%s, fd:%d,count:%ld", __func__, dbg->debug_fd, file_count(pbuf->file)); + memset(&dbg->debug_map, 0, sizeof(dbg->debug_map)); + return 0; +} + +static const struct file_operations _npu_debug_perf_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = phytium_npu_debug_perf_read, + .release = phytium_npu_debug_perf_release, +}; + +static ssize_t +phytium_npu_debug_perf_crc_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) +{ + struct phytium_npu_session *sess = (struct phytium_npu_session *)file->private_data; + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + struct dma_buf *pbuf; + struct npu_unified_heap_buffer *buffer; + + if (dbg->debug_mode != DEBUG_MODE_MEMORY_FILE) + return 0; + if (!(dbg->debug_type & DEBUG_FILE_CRC_MODE)) + return 0; + + pr_debug("%s[%d]: user read file debug_perf.crc", __func__, __LINE__); + pbuf = phytium_npu_check_and_get_dma_buf(sess, dbg->debug_crc_fd); + if (pbuf) { + if (!dbg->debug_crc_map.vaddr) { + if (dma_buf_vmap(pbuf, &dbg->debug_crc_map)) { + pr_err("%s: Failed to vmap debug crc dma_buf", __func__); + return 0; + } + } + } else { + return 0; + } + buffer = pbuf->priv; + pr_debug("%s dbg->debug_map.vaddr:%#llx %ld, ppos:%lld, readlen:%ld", __func__, + (u64)dbg->debug_crc_map.vaddr, buffer->req_len, (u64)*ppos, len); + pr_debug("%s, fd:%d,count:%ld", __func__, dbg->debug_crc_fd, file_count(pbuf->file)); + return simple_read_from_buffer(buf, len, ppos, dbg->debug_crc_map.vaddr, buffer->req_len); +} + +static int phytium_npu_debug_perf_crc_release(struct inode *inode, struct file *file) +{ + struct phytium_npu_session *sess = (struct phytium_npu_session *)file->private_data; + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + struct dma_buf *pbuf; + + pbuf = phytium_npu_check_and_get_dma_buf(sess, dbg->debug_crc_fd); + if (pbuf) { + dma_buf_vunmap(pbuf, &dbg->debug_crc_map); + pr_debug("%s[%d]: vunmap debug crc dma_buf fd:%d", __func__, + __LINE__, dbg->debug_crc_fd); + } + pr_info("%s, fd:%d,count:%ld", __func__, dbg->debug_crc_fd, file_count(pbuf->file)); + memset(&dbg->debug_crc_map, 0, sizeof(dbg->debug_crc_map)); + return 0; +} + +static const struct file_operations _npu_debug_perf_crc_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = phytium_npu_debug_perf_crc_read, + .release = phytium_npu_debug_perf_crc_release, +}; + +void phytium_npu_debugfs_session_init(struct phytium_npu_dev *npu, struct phytium_npu_session *sess) +{ +#ifdef CONFIG_DEBUG_FS + char sess_dir_name[20]; + struct phytium_npu_debugfs *dbg = &sess->dbgfs; + + if (dbg->debug_mode != DEBUG_MODE_MEMORY_FILE) + return; + + snprintf(sess_dir_name, sizeof(sess_dir_name) - 1, "session%d", sess->id); + /* create sessionX directory in /sys/kernel/debug/npu0 */ + if (npu->dbgfs_root_dir) { + dbg->sess_dbgfs_dir = debugfs_create_dir(sess_dir_name, npu->dbgfs_root_dir); + if (!dbg->sess_dbgfs_dir) { + pr_err("%s: Failed to create debug %s directory", __func__, sess_dir_name); + return; + } + + if (dbg->debug_type & DEBUG_FILE_MODE) { + if (!debugfs_create_file("debug_perf.bin", 0444, dbg->sess_dbgfs_dir, + sess, &_npu_debug_perf_fops)) + pr_err("%s: Failed to create debug_perf.bin file", __func__); + } + if (dbg->debug_type & DEBUG_FILE_CRC_MODE) { + if (!debugfs_create_file("debug_perf.crc", 0444, dbg->sess_dbgfs_dir, + sess, &_npu_debug_perf_crc_fops)) + pr_err("%s: Failed to create debug_perf.bin file", __func__); + } + } +#endif +} + +void phytium_npu_debugfs_session_remove(struct phytium_npu_session *sess) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(sess->dbgfs.sess_dbgfs_dir); + pr_debug("%s[%d]:will remove the directory of debugfs", __func__, __LINE__); +#endif +} diff --git a/drivers/staging/phytium-npu/phytium_npu_dev.c b/drivers/staging/phytium-npu/phytium_npu_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..3c13262db1744b9b2d839536a3a949fed15eeb19 --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_dev.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include "phytium_npu_mmu.h" +#include "phytium_npu.h" +#ifdef PHYTIUM_NPU_PLATFORM +#include "phytium_npu_leopard_reg.h" +#else +#include "phytium_npu_reg.h" +#endif +#define POLL_PERIOD 100 +#define POLL_TIMEOUT 100000 + +static void phytium_npu_set_bit(struct phytium_npu_dev *npudev, u32 offset, u32 bit) +{ + u32 value; + + value = REGREAD32(npudev, offset); + value |= bit; + REGWRITE32(npudev, offset, value); +} + +void phytium_npu_config_clock(struct phytium_npu_dev *npudev, int is_enable) +{ + u32 status; + int rc; + + if (is_enable) { + REGWRITE64(npudev, NPU_SYS_CLK_CTRL0, SYS_CLOCK_AUTO); + REGWRITE64(npudev, NPU_SYS_CLK_CTRL, MAIN_CLOCK_AUTO); + } else { + //wait for the clock being idle + rc = readl_relaxed_poll_timeout(npudev->reg_base + NPU_SYS_CLK_STATUS, + status, !status, POLL_PERIOD, POLL_TIMEOUT); + + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout sysclk: %#08x\n", + __func__, __LINE__, status); + + rc = readl_relaxed_poll_timeout(npudev->reg_base + NPU_MDBG_IDLE, + status, 0xFFFF, POLL_PERIOD, POLL_TIMEOUT); + + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mdbg: %#08x\n", + __func__, __LINE__, status); + REGWRITE64(npudev, NPU_SYS_CLK_CTRL0, 0); + REGWRITE64(npudev, NPU_SYS_CLK_CTRL, 0); + } +} + +static inline void phytium_npu_config_soft_reset(struct phytium_npu_dev *npudev) +{ + u32 status; + int rc; + + REGWRITE64(npudev, NPU_SYS_RESET_CTRL, NPU_SOFT_RESET_CONFIG); + + rc = readl_relaxed_poll_timeout(npudev->reg_base + NPU_SYS_RESET_CTRL, + status, !status, POLL_PERIOD, POLL_TIMEOUT); + + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout reset: %#08x\n", __func__, __LINE__, status); +} + +int phytium_npu_hw_reset_self(struct phytium_npu_dev *npudev) +{ + u32 status; + int rc; + + phytium_npu_config_soft_reset(npudev); + + rc = readl_relaxed_poll_timeout(npudev->reg_base + NPU_CH0_VHA_EVENT_STATUS, + status, status & NPU_HW_BE_READY, + POLL_PERIOD, POLL_TIMEOUT); + + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout soft reset: %#08x\n", __func__, __LINE__, status); + + phytium_npu_set_bit(npudev, NPU_CH0_VHA_EVENT_CLEAR, NPU_HW_BE_READY); + return 0; +} + +int phytium_npu_config_dma_address(struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream) +{ + struct phytium_npu_dev *npudev = sess->npu_dev; + struct npu_user_submit_stream *nusstream = &nstream->nustream; + struct npu_excute_stream *nestream = &nusstream->estream; + struct npu_mctx_map *nmmap; + int i, address_used = 0; + + nmmap = phytium_npu_find_mmu_ctx_map(sess, nusstream->stream_fd); + if (!nmmap) { + pr_err("Not find stream fd %d", nusstream->stream_fd); + return -EFAULT; + } + REGWRITE64(npudev, NPU_CH0_CMD_BASE_ADDRESS, nmmap->virt_addr); + + for (i = 0; i < nestream->all; i++) { + if (!nestream->fd[i]) + continue; + nmmap = phytium_npu_find_mmu_ctx_map(sess, nestream->fd[i]); + if (!nmmap) { + pr_err("Not find stream buf fd %d", nestream->fd[i]); + return -EFAULT; + } + if (nusstream->stream_fd == nestream->fd[i]) + continue; + if (i < 8) + REGWRITE64(npudev, NPU_CH0_ADDR0 + nusstream->idx[i] * 8, nmmap->virt_addr); + else + REGWRITE64(npudev, NPU_CH0_ADDR8 + nusstream->idx[i] * 8, nmmap->virt_addr); + address_used |= 0x1 << nusstream->idx[i]; + if (!(nmmap->map_type & NPU_MAP_TYPE_INFERENCE)) + address_used |= 0x1 << (nusstream->idx[i] + 16); + } + REGWRITE64(npudev, NPU_CH0_ADDR_USED, address_used); + pr_debug("DEV set channel used value:%#x", address_used); + return 0; +} + +u32 phytium_npu_get_irq_status(struct phytium_npu_dev *npudev) +{ + return REGREAD32(npudev, NPU_CH0_VHA_EVENT_STATUS); +} + +u32 phytium_npu_get_irq_enable_status(struct phytium_npu_dev *npudev) +{ + return REGREAD32(npudev, NPU_CH0_VHA_EVENT_ENABLE); +} + +void phytium_npu_config_hl_wdt(struct phytium_npu_dev *npudev) +{ + //monitor the event for pass layer (1), layer group (2) + REGWRITE64(npudev, NPU_SYS_HWDT_CM, NPU_HL_WDT_CYCLES); + REGWRITE64(npudev, NPU_SYS_HWDT_CTRL, 0x1); + REGWRITE64(npudev, NPU_SYS_HWDT, 0); +} + +void phytium_npu_config_mem_wdt(struct phytium_npu_dev *npudev) +{ + //monitor the event for memory access. same to hl wdt mode + REGWRITE64(npudev, NPU_SYS_MEM_WDT_CM, NPU_MEM_WDT_CYCLES); + REGWRITE64(npudev, NPU_SYS_MEM_WDT_CTRL, 0x1); + REGWRITE64(npudev, NPU_SYS_MWDT, 0); +} + +void phytium_npu_config_preload(struct phytium_npu_dev *npudev) +{ + REGWRITE32(npudev, NPU_CH0_PRELOAD_CONTROL, NPU_PRELOAD_CFG); +} + +void phytium_npu_config_start_inference(struct phytium_npu_dev *npudev, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream) +{ + u32 ctrl, priority = 0; + + ctrl = min(2048, nstream->nustream.stream_size); + ctrl = (ctrl / 32 - 1) << 1; + ctrl |= NPU_HW_START_EN; + ctrl |= (priority << 8); + ctrl |= (sess->mmu_ctx[NPU_MMU_CONTEXT_MODULE_ID].context_id << 12); + REGWRITE32(npudev, NPU_CH0_CONTROL, ctrl); + pr_debug("%s start npu :%#x", __func__, ctrl); +} + +void phytium_npu_config_event(struct phytium_npu_dev *npudev, u32 events, int is_enable) +{ + if (is_enable) + REGWRITE32(npudev, NPU_CH0_VHA_EVENT_ENABLE, events); + else + REGWRITE32(npudev, NPU_CH0_VHA_EVENT_ENABLE, 0); + pr_debug("enable irq :%#x", is_enable ? events : 0); +} + +void phytium_npu_clear_msi_irq(struct phytium_npu_dev *npudev) +{ + REGWRITE32(npudev, 0x100000 + 0x420, 0x1); +} + +void phytium_npu_clear_irq_status(struct phytium_npu_dev *npudev, u32 event) +{ + REGWRITE32(npudev, NPU_CH0_VHA_EVENT_CLEAR, event); +} + +u32 phytium_npu_get_axi_err_status(struct phytium_npu_dev *npudev) +{ + return REGREAD32(npudev, NPU_SYS_ACE_STATUS); +} + +void phytium_npu_output_mem_wdt_err(struct phytium_npu_dev *npudev) +{ + dev_dbg(npudev->dev, "mem wdt compare match %#x", REGREAD32(npudev, NPU_SYS_MEM_WDT_CM)); + dev_dbg(npudev->dev, "mem wdt time %#x", REGREAD32(npudev, NPU_SYS_MWDT)); + dev_dbg(npudev->dev, "mem bif outstanding %#x", REGREAD32(npudev, NPU_OUTSTANDING_READ)); +} diff --git a/drivers/staging/phytium-npu/phytium_npu_dev_mmu.c b/drivers/staging/phytium-npu/phytium_npu_dev_mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..39f544750f790ee2e1f1f139817d56bcd5dfb1a8 --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_dev_mmu.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include "phytium_npu_mmu.h" +#include "phytium_npu.h" +#include +#ifdef PHYTIUM_NPU_PLATFORM +#include "phytium_npu_leopard_reg.h" +#else +#include "phytium_npu_reg.h" +#endif + +int pg_size[6] = {4096, 16384, 65536, 262144, 1048576, 2097152}; + +static u64 mmu_get_status(u64 status, u64 mask, u64 shift) +{ + return status >> shift & mask; +} + +void phytium_npu_mmu_output_status(struct phytium_npu_dev *npudev) +{ + const char levels[][5] = {"PT", "PD", "PC", "BASE"}; + u64 map_addr = 0; + + u64 s1 = REGREAD64(npudev, NPU_CH0_MMU_ERR_S1); + u64 s2 = REGREAD64(npudev, NPU_CH0_MMU_ERR_S2); + u32 fault = mmu_get_status(s1, MMU_FS1_FAULT_MSK, MMU_FS1_FAULT_SHIFT); + + if (!fault) + return; + + u32 level = mmu_get_status(s1, MMU_FS1_LEVEL_MSK, MMU_FS1_LEVEL_SHIFT); + u32 req_id = mmu_get_status(s1, MMU_FS1_REQ_ID_MSK, MMU_FS1_REQ_ID_SHIFT); + u32 ctxid = mmu_get_status(s1, MMU_FS1_CONTEXT_MSK, MMU_FS1_CONTEXT_SHIFT); + u64 addr = mmu_get_status(s1, MMU_FS1_ADDRESS_MSK, MMU_FS1_ADDRESS_SHIFT); + u32 rnw = mmu_get_status(s1, MMU_FS1_RNW_MSK, MMU_FS1_RNW_SHIFT); + u32 type = mmu_get_status(s1, MMU_FS1_TYPE_MSK, MMU_FS1_TYPE_SHIFT); + + u32 wtb = mmu_get_status(s2, MMU_FS2_WRITEBACK_MSK, MMU_FS2_WRITEBACK_SHIFT); + u32 unique = mmu_get_status(s2, MMU_FS2_CLEANUNIQUE_MSK, MMU_FS2_CLEANUNIQUE_SHIFT); + u32 bank = mmu_get_status(s2, MMU_FS2_BANK_MSK, MMU_FS2_BANK_SHIFT); + u32 tlb_entry = mmu_get_status(s2, MMU_FS2_TLB_ENTRY_MSK, MMU_FS2_TLB_ENTRY_SHIFT); + u32 fbm = mmu_get_status(s2, MMU_FS2_FBM_FAULT_MSK, MMU_FS2_FBM_FAULT_SHIFT); + u32 bif_id = mmu_get_status(s2, MMU_FS2_BIF_ID_MSK, MMU_FS2_BIF_ID_SHIFT); + + REGWRITE64(npudev, NPU_CH0_MMU_MAPPING_CONTEXT, ctxid); + map_addr = REGREAD64(npudev, NPU_CH0_MMU_MAPPING_ADDR); + + pr_debug("%s: *NPU MMU fault: status1:%#llx status2:%#llx\n", __func__, s1, s2); + + pr_warn("%s: NPU MMU fault while %s @ %#llx\n", __func__, + (rnw) ? "reading" : "writing", addr << 4); + pr_warn("%s: level:%s Requestor:%#x Context:%#x Type:%s\n", + __func__, levels[level], req_id, ctxid, + (type == 0) ? "VALID" : (type == 2) ? "READ-ONLY" : "UNKNOWN"); + pr_warn("%s: unique:%d bif_id:%#x tlb_entry:%#x slc_bank:%#x\n", __func__, + unique, bif_id, tlb_entry, bank); + pr_warn("%s: current mapping@context%d:%#llx\n", __func__, ctxid, map_addr << 12); + pr_warn("%s: fbm:%u wtb:%u\n", __func__, fbm, wtb); +} + +int phytium_npu_mmu_dev_flush_tlb(struct phytium_npu_dev *npudev, int ctxid) +{ + int value = MMU_CTRL_INVAL_PC | MMU_CTRL_INVAL_PD | MMU_CTRL_INVAL_PT; + + if (ctxid) + value |= ctxid << MMU_CTRL_CTX_SHIFT; + + REGWRITE32(npudev, NPU_CH0_MMU_CTRL_INVAL, value); + pr_debug("%s flush tlb value %#x", __func__, value); + return 0; +} + +static inline void +phytium_npu_mmu_setup_dev_config(struct phytium_npu_dev *npudev, int ctxid, u32 pc_phys) +{ + REGWRITE64(npudev, NPU_CH0_MMU_CTRL_BS, MMU_CTRL_BYPASS_DISABLE); + REGWRITE64(npudev, NPU_CH0_MMU_MAPPING_CONTEXT, ctxid); + REGWRITE64(npudev, NPU_CH0_MMU_MAPPING_ADDR, pc_phys); +} + +int phytium_npu_mmu_config_dev_mmu(struct phytium_npu_session *sess) +{ + struct phytium_npu_dev *npudev = sess->npu_dev; + size_t i; + + for (i = 0; i < ARRAY_SIZE(sess->mmu_ctx); i++) { + phytium_npu_mmu_setup_dev_config(npudev, sess->mmu_ctx[i].context_id, + sess->mmu_ctx[i].pc_base_phys_addr); + pr_debug("%s ctx %ld hw context id %d phys %x", __func__, i, + sess->mmu_ctx[i].context_id, + sess->mmu_ctx[i].pc_base_phys_addr); + phytium_npu_mmu_dev_flush_tlb(npudev, sess->mmu_ctx[i].context_id); + } + return 0; +} + +static int phytium_npu_get_page_size_config(int page_size) +{ + int i; + + for (i = 0; i < sizeof(pg_size); i++) { + if (page_size == pg_size[i]) + return i; + } + + return 0;//default 4K +} + +int phytium_npu_mmu_dev_init(struct phytium_npu_dev *npudev, int page_size) +{ + u64 ps_config = phytium_npu_get_page_size_config(page_size); + u64 value = 0; + + /* range 2M - 3G */ + pr_debug("%s NPU MMU set NPU va range0:%#llx-%#llx\n", __func__, + NPU_VA_BASE_ADDR, NPU_VA_SIZE); + value = ps_config << MMU_PS_CONFIG_SHIFT; + value |= NPU_VA_BASE_ADDR >> MMU_PS_ADDR_ALIGNSHIFT; + value |= (NPU_VA_BASE_ADDR + NPU_VA_SIZE) >> MMU_PS_END_ADDR_DELT_SHIFT; + REGWRITE64(npudev, NPU_SYS_MMU_PSIZE_RONE, value); + pr_debug("%s NPU_SYS_MMU_PSIZE_RONE:%#llx\n", __func__, value); + return 0; +} diff --git a/drivers/staging/phytium-npu/phytium_npu_mm_mmu.c b/drivers/staging/phytium-npu/phytium_npu_mm_mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..990b6c67489f1fbce3a6811af4b8dcd5e9db54dc --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_mm_mmu.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/phytium_npu_dma_buf_heap.h" +#include "phytium_npu.h" +#include "phytium_npu_mmu.h" +#include "phytium_npu_uapi.h" + +#define PD_NUM 512 +#define PT_NUM 512 +#define MMU_DEFAULT_ID 0 +struct npu_mmu_config gconfig;//global variable +//#define DEBUG_MMU +#ifdef DEBUG_MMU +#define PRDEBUG(format, ...) pr_debug(format, ##__VA_ARGS__) +#else +#define PRDEBUG(format, ...) +#endif + +static int phytium_npu_release_mmu_source(struct phytium_npu_dev *npudev, + struct phytium_npu_session *sess); + +int phytium_npu_create_new_mmu_context(struct phytium_npu_dev *npu, + struct phytium_npu_session *session) +{ + struct npu_mmu_ctx *ctx; + struct npu_mmu_catalogue *pc; + struct phytium_npu_mmu_context *pnmctx; + size_t i; + + WARN_ON(!session); + if (!session) + return -EINVAL; + + ctx = devm_kzalloc(npu->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->mmu_pc = devm_kzalloc(npu->dev, sizeof(*pc), GFP_KERNEL); + if (!ctx->mmu_pc) + return -ENOMEM; + session->share_mctx = ctx; + for (i = 0; i < ARRAY_SIZE(session->mmu_ctx); i++) { + pnmctx = &session->mmu_ctx[i]; + ctx->mmu_pc->mctx = session->share_mctx; + ctx->pnmctx = pnmctx; + INIT_LIST_HEAD(&ctx->maplist); + pnmctx->mctx = ctx; + pnmctx->context_id = MMU_DEFAULT_ID; + pnmctx->npu_id = 0; + pnmctx->virt_base = PHYT_MMU_DDR_VIRT_BASE; + pnmctx->curr_virt_base = PHYT_MMU_DDR_VIRT_BASE; + pnmctx->dev = npu->dev; + } + return 0; +} + +int phytium_npu_release_mmu_context(struct phytium_npu_dev *npu, struct phytium_npu_session *sess) +{ + struct npu_mmu_ctx *ctx; + struct phytium_npu_mmu_context *pnmctx; + size_t i; + + WARN_ON(!sess); + if (!sess) + return -EINVAL; + + phytium_npu_release_mmu_source(npu, sess);//release mmu source first + + ctx = sess->share_mctx; + if (!ctx) { + dev_err(npu->dev, "ERR:Mmu context is lost."); + } else { + pr_debug("%s,free mmu ctx:%p\n", __func__, ctx); + devm_kfree(npu->dev, ctx->mmu_pc); + devm_kfree(npu->dev, ctx); + sess->share_mctx = NULL; + } + + for (i = 0; i < ARRAY_SIZE(sess->mmu_ctx); i++) { + pnmctx = &sess->mmu_ctx[i]; + pnmctx->mctx = NULL; + pr_debug("%s[%d]:release point mmu context %lu", __func__, __LINE__, i); + } + return 0; +} + +static int phytium_npu_mmu_get_dma_buf(struct npu_mctx_map *nmap) +{ + if (!nmap) + return -EINVAL; + + nmap->dma_buf = dma_buf_get(nmap->dma_buf_fd); + if (IS_ERR(nmap->dma_buf)) + return PTR_ERR(nmap->dma_buf); + pr_debug("Get dma_buf(%d) ok.", nmap->dma_buf_fd); + + return 0; +} + +#define CONTEXT_ID_SHIFT 4 +#define GET_CONTEXT_ID(X, D) ((X)->id % 4 + (D) * CONTEXT_ID_SHIFT) + +int phytium_npu_mmu_map_init(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct npu_memory_map *usr_mmap) +{ + struct npu_mctx_map *ncmap; + int ctxid; + + ncmap = devm_kzalloc(npu->dev, sizeof(*ncmap), GFP_KERNEL); + if (!ncmap) + return -ENOMEM; + + PRDEBUG("alloc ncmap :%p for fd %d", ncmap, usr_mmap->fd); + ncmap->dma_buf_fd = usr_mmap->fd; + ncmap->map_type = usr_mmap->map_type; + ncmap->virt_addr = usr_mmap->vaddr; + INIT_LIST_HEAD(&ncmap->mctx_map_entry); + if (usr_mmap->map_type & NPU_MAP_TYPE_INFERENCE) + ctxid = NPU_MMU_CONTEXT_MODULE_ID; + else if (usr_mmap->map_type & NPU_MAP_TYPE_BUF) + ctxid = NPU_MMU_CONTEXT_IO_ID; + else + ctxid = NPU_MMU_CONTEXT_IO_ID; + + sess->mmu_ctx[ctxid].map_type = usr_mmap->map_type; + + sess->mmu_ctx[ctxid].context_id = GET_CONTEXT_ID(sess, ctxid); + + ncmap->mctx = sess->mmu_ctx[ctxid].mctx; + list_add(&ncmap->mctx_map_entry, &sess->mmu_ctx[ctxid].mctx->maplist); + phytium_npu_mmu_get_dma_buf(ncmap); + PRDEBUG("%s type ctxid %d mmu hw contex id %u", __func__, + ctxid, sess->mmu_ctx[ctxid].context_id); + return 0; +} + +static inline int phytium_npu_mmu_map_destroy(struct phytium_npu_dev *npu, + struct npu_mctx_map *ncmap) +{ + list_del(&ncmap->mctx_map_entry); + devm_kfree(npu->dev, ncmap); + PRDEBUG("free ncmap :%p", ncmap); + return 0; +} + +struct npu_mctx_map *phytium_npu_find_mmu_ctx_map(struct phytium_npu_session *sess, int dma_buf_fd) +{ + struct npu_mctx_map *ncmap = NULL, *tmp = NULL; + struct phytium_npu_mmu_context *pnmctx; + size_t i; + + WARN_ON(!sess); + for (i = 0; i < ARRAY_SIZE(sess->mmu_ctx); i++) { + pnmctx = &sess->mmu_ctx[i]; + if (!pnmctx->mctx) { + pr_err("pnmctx->mctx is NULL in mmu_ctx[%lu],%s", i, __func__); + continue; + } + + list_for_each_entry_safe(ncmap, tmp, &pnmctx->mctx->maplist, mctx_map_entry) { + if (ncmap->dma_buf_fd == dma_buf_fd) + return ncmap; + } + } + + return NULL; +} + +struct dma_buf *phytium_npu_check_and_get_dma_buf(struct phytium_npu_session *sess, int dma_buf_fd) +{ + struct npu_mctx_map *ncmap = NULL, *tmp = NULL; + struct phytium_npu_mmu_context *pnmctx; + size_t i; + + WARN_ON(!sess); + for (i = 0; i < ARRAY_SIZE(sess->mmu_ctx); i++) { + pnmctx = &sess->mmu_ctx[i]; + if (!pnmctx->mctx) { + pr_err("pnmctx->mctx is NULL in mmu_ctx[%ld],%s", i, __func__); + continue; + } + + list_for_each_entry_safe(ncmap, tmp, &pnmctx->mctx->maplist, mctx_map_entry) { + if (ncmap->dma_buf_fd == dma_buf_fd) + goto find_ctx_map; + } + ncmap = NULL; + } + +find_ctx_map: + if (ncmap) + return ncmap->dma_buf; + return NULL; +} + +static int phytium_npu_get_mmu_base_addr(struct phytium_npu_session *sess) +{ + struct phytium_npu_mmu_context *nmctx = &sess->mmu_ctx[NPU_MMU_CONTEXT_MODULE_ID]; + + if (nmctx->virt_base == nmctx->curr_virt_base) + return nmctx->virt_base; + + return nmctx->curr_virt_base; +} + +static u32 phytium_npu_get_mmu_pc_entry(u64 virt) +{ + return (virt & PHYT_MMU_PC_MASK) >> PHYT_MMU_PC_SHIFT; +} + +static u32 phytium_npu_get_mmu_pd_entry(u64 virt) +{ + return (virt & PHYT_MMU_PD_MASK) >> PHYT_MMU_PD_SHIFT; +} + +/* TODO :support other pagesize */ +static u32 phytium_npu_get_mmu_entry(u64 virt) +{ + return (virt & PHYT_MMU_PT_MASK) >> PHYT_MMU_PT_SHIFT; +} + +static int phytium_npu_get_pc_entry_valid(struct npu_mmu_catalogue *mmu_pc, u64 virt) +{ + u32 *ptr = (u32 *)mmu_pc->desc.cpu_addr; + + return ptr[phytium_npu_get_mmu_pc_entry(virt)] & PHYT_MMU_ENTRY_VALID; +} + +static u64 +phytium_npu_get_pd_phy_addr_in_pc_entry(struct npu_mmu_catalogue *mmu_pc, u64 virt) +{ + u32 *ptr = (u32 *)mmu_pc->desc.cpu_addr; + u64 pd_cpu_addr; + + pd_cpu_addr = ptr[phytium_npu_get_mmu_pc_entry(virt)]; + pd_cpu_addr = (pd_cpu_addr & PHYT_MMU_PC_ENTRY_MASK) << PC_SHIFT; + + return pd_cpu_addr; +} + +static int phytium_npu_prepare_mmu_source(struct phytium_npu_mmu_context *nmctx, + struct npu_mmu_catalogue *mmu_pc, + u64 virt) +{ + struct npu_mmu_pd *pd; + struct npu_mmu_pt *pt; + u32 entry, pd_entry; + u64 *addr; + int ret = 0; + static int is_new_pd_entry; + dma_addr_t dma_addr; + + struct phytium_npu_dev *npu = phytium_npu_get_npudev(); + + if (!mmu_pc->desc.phys) { + addr = dma_alloc_coherent(npu->dev, PHYT_MMU_ALLOC_PAGE_SIZE, + &dma_addr, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + nmctx->pc_base_phys_addr = dma_addr >> PHYT_MMU_PC_ADDR_SHIFT; + PRDEBUG("create page catalogue phys address %#llx, Write context:%#x", + dma_addr, nmctx->pc_base_phys_addr); + /* create page directory , page catalog store address info himself*/ + mmu_pc->desc.cpu_addr = (u64)addr; + mmu_pc->desc.phys = dma_addr; + mmu_pc->desc.npu_addr_base = virt; + } + + if (!mmu_pc->pd) { + /* page directory is NULL */ + mmu_pc->pd = kcalloc(PD_NUM, sizeof(struct npu_mmu_pd *), GFP_KERNEL); + if (!mmu_pc->pd) { + ret = -ENOMEM; + goto no_pd; + } + PRDEBUG("create page directory virt address %p", mmu_pc->pd); + } + + pd_entry = phytium_npu_get_mmu_pd_entry(virt); + if (!mmu_pc->pd[pd_entry]) { + mmu_pc->pd[pd_entry] = kzalloc(sizeof(*mmu_pc->pd[pd_entry]), GFP_KERNEL); + if (!mmu_pc->pd[pd_entry]) { + ret = -ENOMEM; + goto no_pd_entry; + } + is_new_pd_entry = 1; + PRDEBUG("create page directory entry %d virt address %p", + pd_entry, mmu_pc->pd[pd_entry]); + } + + pd = mmu_pc->pd[pd_entry]; + + if (!pd->desc.phys) { + if (!phytium_npu_get_pc_entry_valid(mmu_pc, virt)) { + pd->desc.cpu_addr = (u64)dma_alloc_coherent(npu->dev, + PHYT_MMU_ALLOC_PAGE_SIZE, + &dma_addr, GFP_KERNEL); + pd->desc.phys = dma_addr; + mmu_pc->pd_ext.pd_desc[mmu_pc->pd_ext.valid_pd] = + kzalloc(sizeof(*mmu_pc->pd_ext.pd_desc[mmu_pc->pd_ext.valid_pd]), + GFP_KERNEL); + memcpy(mmu_pc->pd_ext.pd_desc[mmu_pc->pd_ext.valid_pd], + &pd->desc, sizeof(pd->desc)); + mmu_pc->pd_ext.valid_pd++; + } else { + pd->desc.phys = phytium_npu_get_pd_phy_addr_in_pc_entry(mmu_pc, virt); + pd->desc.cpu_addr = (u64)phys_to_virt(pd->desc.phys); + } + + if (!pd->desc.cpu_addr) { + pr_err("No mem for page directory."); + ret = -ENOMEM; + goto no_pd_store_page; + } + pd->desc.npu_addr_base = virt; + PRDEBUG("create page directory entry desc virt address %llx", pd->desc.cpu_addr); + } + + if (!pd->pt) { + /* page table is NULL */ + pd->pt = kzalloc(sizeof(*pd->pt), GFP_KERNEL); + PRDEBUG("create page table address %p", pd->pt); + if (!pd->pt) { + ret = -ENOMEM; + goto no_pt; + } + } + + entry = phytium_npu_get_mmu_entry(virt); + pt = pd->pt; + + if (!pt->desc.phys) { + if (is_new_pd_entry) { + pt->desc.cpu_addr = (u64)dma_alloc_coherent(npu->dev, + PHYT_MMU_ALLOC_PAGE_SIZE, + &dma_addr, GFP_KERNEL); + is_new_pd_entry = 0; + pd->pt_page_addr[pd_entry] = pt->desc.cpu_addr; + PRDEBUG("create page table entry desc virt address %#llx", + pt->desc.cpu_addr); + } else { + pt->desc.cpu_addr = pd->pt_page_addr[pd_entry]; + PRDEBUG("reuse page table entry desc virt address %#llx, pd->page:%#llx", + pt->desc.cpu_addr, pd->desc.cpu_addr); + } + + if (!pt->desc.cpu_addr) { + pr_err("No mem for page table."); + ret = -ENOMEM; + goto no_pt_entry; + } + pt->desc.phys = virt_to_phys((u64 *)pt->desc.cpu_addr); + pt->desc.npu_addr_base = virt; + } + return 0; + +no_pt_entry: + kfree(pd->pt); +no_pt: + kfree((u64 *)pd->desc.cpu_addr); +no_pd_store_page: + kfree(mmu_pc->pd[pd_entry]); +no_pd_entry: + kfree(mmu_pc->pd); +no_pd: + kfree((u64 *)mmu_pc->desc.cpu_addr); + mmu_pc->desc.cpu_addr = 0; + mmu_pc->desc.phys = 0; + return ret; +} + +static int phytium_npu_write_mmu_pt(struct npu_mmu_pt *mmu_pt, int entry, + u64 phys, int flags) +{ + u64 *ptr = (u64 *)mmu_pt->desc.cpu_addr; + + ptr[entry] = phys | flags; + PRDEBUG("pt entry %d value %llx", entry, ptr[entry]); + return 0; +} + +static int phytium_npu_write_mmu_pd(struct npu_mmu_pd *mmu_pd, int entry, + struct npu_mmu_pt *mmu_pt, int flags) +{ + u64 *ptr = (u64 *)mmu_pd->desc.cpu_addr; + + ptr[entry] = mmu_pt->desc.phys | flags; + PRDEBUG("pd entry %d value %llx", entry, ptr[entry]); + return 0; +} + +static int phytium_npu_write_mmu_pc(struct npu_mmu_catalogue *mmu_pc, int entry, + struct npu_mmu_pd *mmu_pd, int flags) +{ + u32 *ptr = (u32 *)mmu_pc->desc.cpu_addr; + + ptr[entry] = (mmu_pd->desc.phys >> PC_SHIFT) | flags; + PRDEBUG("pc entry %d value %llx", entry, ptr[entry]); + return 0; +} + +static void phytium_npu_debug_mmu_tlb_info(struct phytium_npu_mmu_context *nmctx, u64 virt) +{ + struct npu_mmu_catalogue *mmu_pc = nmctx->mctx->mmu_pc; + u32 *pc_ptr; + u64 *ptr2, *ptr3; + + pc_ptr = (u32 *)mmu_pc->desc.cpu_addr; + /* debug info */ + PRDEBUG("------mmu pc info-------"); + PRDEBUG("pc_base_phys_addr:%#x, virt_base:%#llx", nmctx->pc_base_phys_addr, + nmctx->virt_base); + PRDEBUG("map to virt addr %#llx", virt); + PRDEBUG("pc phys:%#llx, cpu_addr:%#llx, nvaddr base:%#llx, store pd phy addr:%#x", + mmu_pc->desc.phys, mmu_pc->desc.cpu_addr, mmu_pc->desc.npu_addr_base, + pc_ptr[phytium_npu_get_mmu_pc_entry(virt)]); + + struct npu_mmu_pd *mmu_pd = mmu_pc->pd[phytium_npu_get_mmu_pd_entry(virt)]; + + ptr2 = (u64 *)mmu_pd->desc.cpu_addr; + PRDEBUG("pd phys:%#llx, cpu_addr:%#llx, nvaddr base:%#llx, store pt phy addr:%#llx", + mmu_pd->desc.phys, mmu_pd->desc.cpu_addr, mmu_pd->desc.npu_addr_base, + ptr2[phytium_npu_get_mmu_pd_entry(virt)]); + struct npu_mmu_pt *mmu_pt = mmu_pd->pt; + + ptr3 = (u64 *)mmu_pt->desc.cpu_addr; + PRDEBUG("pt phys:%#llx, cpu_addr:%#llx, nvaddr base:%#llx," + mmu_pt->desc.phys, mmu_pt->desc.cpu_addr, mmu_pt->desc.npu_addr_base); + PRDEBUG("store user addr:%#llx, entry:%d", + ptr3[phytium_npu_get_mmu_entry(virt)], phytium_npu_get_mmu_entry(virt)); +} + +static int phytium_npu_mmu_unmap_addr(struct phytium_npu_mmu_context *pnmctx, + struct npu_mctx_map *ncmap, u64 virt, + int pagenum) +{ + struct npu_mmu_pd *pd; + struct npu_mmu_pt *pt; + int entry, pd_entry, pt_entry; + u64 unmap_virt; + struct npu_mmu_ctx *nmctx = pnmctx->mctx; + struct npu_mmu_catalogue *mmu_pc; + int i; + + if (!nmctx) { + pr_err("mpu mmu ctx is NULL point, pnmctx:%p,ncmap:%p", pnmctx, ncmap); + return -EINVAL; + } + mmu_pc = nmctx->mmu_pc; + if (!nmctx) { + pr_err("mpu mmu catalogue is NULL point"); + return -EINVAL; + } + + unmap_virt = pagenum * PHYT_MMU_ALLOC_PAGE_SIZE + virt; + + entry = phytium_npu_get_mmu_pc_entry(unmap_virt); + pd_entry = phytium_npu_get_mmu_pd_entry(unmap_virt); + pt_entry = phytium_npu_get_mmu_entry(unmap_virt); + PRDEBUG("pc entry:%d, pd entry:%d pt entry %d", entry, pd_entry, pt_entry); + + if (!mmu_pc->ref_count) { + pr_info("mmu page catalogue is NULL!"); + return -11; + } + + mmu_pc->ref_count ? mmu_pc->ref_count-- : mmu_pc->ref_count; + + pd = mmu_pc->pd[pd_entry]; + if (pd) { + pd->ref_count ? pd->ref_count-- : pd->ref_count; + PRDEBUG("pd valid:%lld", pd->ref_count); + pt = pd->pt; + if (pt) { + if (pt->valid == 1) { + dma_free_coherent(pnmctx->dev, PHYT_MMU_ALLOC_PAGE_SIZE, + (u64 *)pt->desc.cpu_addr, pt->desc.phys); + + pt->desc.cpu_addr = 0; + pt->desc.phys = 0; + PRDEBUG("**free only pt cpu address :%p", pt); + pt->valid ? pt->valid-- : pt->valid; + pt = NULL; + } else if (pt->valid) { + phytium_npu_write_mmu_pt(pt, pt_entry, 0, PHYT_MMU_ENTRY_INVALID); + pt->valid ? pt->valid-- : pt->valid; + PRDEBUG("pt valid:%d", pt->valid); + } + } + + if (!pd->ref_count) { + pd->desc.cpu_addr = 0; + pd->desc.phys = 0; + kfree(pd->pt); //free page table point + pd->pt = NULL; + kfree(pd); + mmu_pc->pd[pd_entry] = NULL; + } + } + + if (!mmu_pc->ref_count) { + for (i = 0; i < mmu_pc->pd_ext.valid_pd; i++) { + dma_free_coherent(pnmctx->dev, PHYT_MMU_ALLOC_PAGE_SIZE, + (u64 *)mmu_pc->pd_ext.pd_desc[i]->cpu_addr, + mmu_pc->pd_ext.pd_desc[i]->phys); + kfree(mmu_pc->pd_ext.pd_desc[i]); + mmu_pc->pd_ext.pd_desc[i] = NULL; + } + + dma_free_coherent(pnmctx->dev, PHYT_MMU_ALLOC_PAGE_SIZE, + (u64 *)mmu_pc->desc.cpu_addr, mmu_pc->desc.phys); + + mmu_pc->desc.cpu_addr = 0; + mmu_pc->desc.phys = 0; + PRDEBUG("**free only pd :%p", mmu_pc->pd); + + kfree(mmu_pc->pd); + mmu_pc->pd = NULL; + } + + return 0; +} + +static int phytium_npu_mmu_map_addr(struct phytium_npu_mmu_context *pnmctx, + struct npu_mctx_map *ncmap, + u64 virt, u64 phys, + int pagenum, int ptflags) +{ + struct npu_mmu_catalogue *mmu_pc = pnmctx->mctx->mmu_pc; + struct npu_mmu_pd *pd; + struct npu_mmu_pt *pt; + int ret, entry, pd_entry; + u64 map_virt; + + map_virt = pagenum * PHYT_MMU_ALLOC_PAGE_SIZE + virt; + ret = phytium_npu_prepare_mmu_source(pnmctx, mmu_pc, map_virt); + if (ret) { + pr_err("Err for preparing mmu source."); + return -EINVAL; + } + entry = phytium_npu_get_mmu_pc_entry(map_virt); + pd_entry = phytium_npu_get_mmu_pd_entry(map_virt); + pd = mmu_pc->pd[pd_entry]; + phytium_npu_write_mmu_pc(mmu_pc, entry, pd, PHYT_MMU_ENTRY_VALID); + mmu_pc->ref_count++; + + entry = phytium_npu_get_mmu_entry(map_virt); + pt = pd->pt; + + phytium_npu_write_mmu_pd(pd, pd_entry, pt, PHYT_MMU_ENTRY_VALID); + pd->ref_count++; + phytium_npu_write_mmu_pt(pt, entry, phys, ptflags); + pt->valid++; + phytium_npu_debug_mmu_tlb_info(pnmctx, map_virt); + return 0; +} + +static void phytium_npu_mmu_get_ctx_pc_base_address(struct phytium_npu_mmu_context *pnmctx) +{ + struct npu_mmu_ctx *mctx = pnmctx->mctx; + + pnmctx->pc_base_phys_addr = mctx->mmu_pc->desc.phys >> PHYT_MMU_PC_ADDR_SHIFT; +} + +static int phytium_npu_mmu_map_sg(struct phytium_npu_session *sess, struct npu_mctx_map *ncmap, + struct sg_table *sgt) +{ + struct scatterlist *sg; + struct phytium_npu_mmu_context *pnmctx; + u64 phys_addr; + int len, i, flags = 0; + + /* the driver will use self virtual address when + * the user's config was not included virt_addr + */ + if (!ncmap->virt_addr) + ncmap->virt_addr = phytium_npu_get_mmu_base_addr(sess); + PRDEBUG("%s: current virt base %llx\n", __func__, ncmap->virt_addr); + if (ncmap->map_type & NPU_MAP_TYPE_INFERENCE) + pnmctx = &sess->mmu_ctx[NPU_MMU_CONTEXT_MODULE_ID]; + else + pnmctx = &sess->mmu_ctx[NPU_MMU_CONTEXT_IO_ID]; + flags = ncmap->map_type & NPU_MAP_TYPE_RO ? 0x2 : 0; + flags |= PHYT_MMU_ENTRY_VALID; + + if (unlikely(!pnmctx->pc_base_phys_addr)) + phytium_npu_mmu_get_ctx_pc_base_address(pnmctx); + + for_each_sgtable_dma_sg(sgt, sg, i) { + phys_addr = sg_dma_address(sg); + len = sg_dma_len(sg); + if (!PAGE_ALIGNED(phys_addr) || !PAGE_ALIGNED(len)) + PRDEBUG("%s:ERROR addr %llx or len %x is not page aligned!\n", + __func__, phys_addr, len); + + phytium_npu_mmu_map_addr(pnmctx, ncmap, ncmap->virt_addr, phys_addr, i, flags); + } + + return 0; +} + +static int phytium_npu_mmu_unmap_sg(struct npu_mctx_map *ncmap, struct sg_table *sgt) +{ + struct scatterlist *sg; + struct phytium_npu_mmu_context *pnmctx = ncmap->mctx->pnmctx; + int i; + + if (!ncmap->mctx || !ncmap->dma_buf) { + pr_err("ncmap->mctx or ncmap->dma_buf is NULL"); + return 0; + } + if (!pnmctx) { + PRDEBUG("----------------------------------------------------."); + return 0; + } + pr_debug("unmap mmu base virt:%#llx.pnmctx:%p,ncmap:%p", ncmap->virt_addr, pnmctx, ncmap); + for_each_sgtable_dma_sg(sgt, sg, i) { + PRDEBUG("unmap mmu virt:%#llx.", ncmap->virt_addr + i * 4096); + phytium_npu_mmu_unmap_addr(pnmctx, ncmap, ncmap->virt_addr, i); + } + if (ncmap->attach && ncmap->sgt && ncmap->dma_buf) { + dma_buf_unmap_attachment(ncmap->attach, ncmap->sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(ncmap->dma_buf, ncmap->attach); + dma_buf_put(ncmap->dma_buf); + pr_debug("-----put dma buf fd:%d", ncmap->dma_buf_fd); + } else { + pr_err("who is null ? attach:%s, sgt:%s, dam_buf:%s", ncmap->attach ? "OK" : NULL, + ncmap->sgt ? "OK" : NULL, ncmap->dma_buf ? "OK" : NULL); + } + return 0; +} + +int phytium_npu_mmu_unmap(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_memory_unmap *usr_unmmap) +{ + struct npu_mctx_map *ncmap; + + pr_debug("%s, unmap buf fd:%d", __func__, usr_unmmap->fd); + ncmap = phytium_npu_find_mmu_ctx_map(sess, usr_unmmap->fd); + if (!ncmap) { + dev_err(npu->dev, "Not find npu mmu ctx form sess(%d).", sess->id); + return -EINVAL; + } + + phytium_npu_mmu_unmap_sg(ncmap, ncmap->sgt); + phytium_npu_mmu_map_destroy(npu, ncmap); + ncmap = NULL; + + return 0; +} + +int phytium_npu_import_dmabuf(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct npu_memory_map *usr_mmap) +{ + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct npu_mctx_map *ncmap; + struct dma_buf *buf; + int err; + + ncmap = phytium_npu_find_mmu_ctx_map(sess, usr_mmap->fd); + if (!ncmap) { + dev_err(npu->dev, "Not find npu mmu ctx form sess(%d).", sess->id); + return -EINVAL; + } + + buf = ncmap->dma_buf; + attach = dma_buf_attach(buf, npu->dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto free; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto detach; + } + /* map to mmu tlb */ + if (!npu->nmmu_config.mode_bypass) { + err = phytium_npu_mmu_map_sg(sess, ncmap, sgt); + if (err < 0) + goto detach; + } + ncmap->dma_buf = buf; + ncmap->attach = attach; + ncmap->sgt = sgt; + ncmap->virt_addr = usr_mmap->vaddr; + + return 0; +detach: + if (!IS_ERR_OR_NULL(sgt)) + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + + dma_buf_detach(buf, attach); + dma_buf_put(buf); + pr_debug("put dma buf fd:%d", usr_mmap->fd); +free: + kfree(attach); + + return err; +} + +static int phytium_npu_release_mmu_source(struct phytium_npu_dev *npudev, + struct phytium_npu_session *sess) +{ + struct phytium_npu_mmu_context *pnmctx; + struct npu_mctx_map *ncmap = NULL, *tmp; + size_t i; + + WARN_ON(!sess); + + for (i = 0; i < ARRAY_SIZE(sess->mmu_ctx); i++) { + pnmctx = &sess->mmu_ctx[i]; + if (!pnmctx->mctx) { + pr_err("pnmctx->mctx is NULL in mmu_ctx[%ld],%s", i, __func__); + continue; + } + list_for_each_entry_safe(ncmap, tmp, &pnmctx->mctx->maplist, mctx_map_entry) { + phytium_npu_mmu_unmap_sg(ncmap, ncmap->sgt); + phytium_npu_mmu_map_destroy(npudev, ncmap); + ncmap = NULL; + } + } + + return 0; +} diff --git a/drivers/staging/phytium-npu/phytium_npu_pci.c b/drivers/staging/phytium-npu/phytium_npu_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..c1ffeb78fb0155888e937a0186b86acf4ab82acb --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_pci.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* NPU pci driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include "phytium_npu.h" +#include "phytium_npu_reg.h" + +#define PHYTIUM_PCI_VENDOR_ID 0x1DB7 +#define PHYTIUM_PCI_DEVICE_ID 0xDC24 +#define PCI_BAR_DEV 0 +/* PCI and PCI-E do not support 64bit devices on BAR1: 64bit uses 2 bars */ +#define PCI_BAR_RAM 2 +#define NUM_PCI_BARS 3 + +static const struct pci_device_id pci_pci_ids[] = { + { PCI_DEVICE(PHYTIUM_PCI_VENDOR_ID, PHYTIUM_PCI_DEVICE_ID), .class = 0xb, }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, pci_pci_ids); + +static irqreturn_t phytium_npu_thread_irq(int irq, void *dev_id) +{ + struct pci_dev *pci_dev = (struct pci_dev *)dev_id; + + return phytium_npu_handle_thread_irq(&pci_dev->dev); +} + +static irqreturn_t phytium_npu_handle_isr(int irq, void *dev_id) +{ + struct pci_dev *pci_dev = (struct pci_dev *)dev_id; + + if (!pci_dev) + return IRQ_NONE; + + return phytium_npu_handle_irq(&pci_dev->dev); +} + +static int phytium_npu_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct phytium_npu_dev *npu_dev; + struct device *dev = &pci_dev->dev; + void __iomem *reg_addr; + int ret, irq, bar; + + dev_info(dev, "probed a NPU device, pci_dev: %x:%x\n", pci_dev->vendor, pci_dev->device); + + /* Enable the device */ + if (pci_enable_device(pci_dev)) + goto out_disable; + + pci_set_master(pci_dev); + + if (dev->dma_mask) { + dev_info(dev, "%s dev->dma_mask : %p\n", __func__, dev->dma_mask); + } else { + dev_info(dev, "%s mask unset, setting coherent\n", __func__); + dev->dma_mask = &dev->coherent_dma_mask; + } + dev_info(dev, "%s dma_set_mask %#llx,new 40bit\n", __func__, dma_get_mask(dev)); + ret = dma_set_mask(dev, DMA_BIT_MASK(40)); + if (ret) { + dev_err(dev, "%s failed to set dma mask\n", __func__); + goto out_disable; + } + + /* Reserve PCI I/O and memory resources */ + if (pci_request_regions(pci_dev, "npupci")) + goto out_add_dev; + + for (bar = 0; bar < NUM_PCI_BARS; bar++) { + if (bar == PCI_BAR_RAM) { + /* Don't ioremap pci memory - it is mapped on demand */ + continue; + } + if (bar == PCI_BAR_DEV) { + reg_addr = devm_ioremap(dev, + pci_resource_start(pci_dev, bar), + pci_resource_len(pci_dev, bar)); + dev_info(dev, "[bar %u] addr: %#llx size: %#llx km: 0x%p\n", + bar, pci_resource_start(pci_dev, bar), + pci_resource_len(pci_dev, bar), + reg_addr); + break; + } + } + if (!reg_addr) + goto out_release; + + ret = pci_enable_msi(pci_dev); + if (ret) + dev_err(dev, "Failed to enable MSI IRQ."); + + /* Get the IRQ...*/ + irq = pci_dev->irq; + + ret = phytium_npu_register_dev(dev, NULL, reg_addr); + if (ret) { + dev_err(&pci_dev->dev, "failed to initialize driver core!\n"); + goto out_add_dev; + } + + npu_dev = dev_get_drvdata(dev); + npu_dev->irq = irq; + npu_dev->clock_freq = 800000000; + npu_dev->is_platform_dev = FALSE; + npu_dev->power_reg_base = NULL; + ret = devm_request_threaded_irq(&pci_dev->dev, irq, phytium_npu_handle_isr, + phytium_npu_thread_irq, IRQF_SHARED, + "phytium-npu", pci_dev); + + if (ret) { + dev_err(&pci_dev->dev, "failed to request irq\n"); + goto out_irq; + } + pr_info("%s: registered irq: %d\n", __func__, irq); + + return ret; + +out_irq: + phytium_npu_unregister_dev(&pci_dev->dev); +out_add_dev: + devm_iounmap(&pci_dev->dev, reg_addr); +out_release: + pci_release_regions(pci_dev); +out_disable: + pci_disable_device(pci_dev); + return ret; +} + +static void phytium_npu_remove(struct pci_dev *pci_dev) +{ + struct phytium_npu_dev *npu_dev = dev_get_drvdata(&pci_dev->dev); + + if (npu_dev->irq) + devm_free_irq(&pci_dev->dev, npu_dev->irq, (void *)pci_dev); + pci_disable_msi(pci_dev); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + phytium_npu_unregister_dev(&pci_dev->dev); + pr_info("unregister dev"); +} + +#ifdef CONFIG_PM +static int phytium_npu_suspend(struct device *dev) +{ + return phytium_npu_common_suspend(dev); +} + +static int phytium_npu_resume(struct device *dev) +{ + return phytium_npu_common_resume(dev); +} + +static int phytium_npu_runtime_suspend(struct device *dev) +{ + return 0; +} + +static int phytium_npu_runtime_resume(struct device *dev) +{ + return 0; +} + +#endif + +static const struct dev_pm_ops phytium_npu_pm_ops = { + SET_RUNTIME_PM_OPS(phytium_npu_runtime_suspend, + phytium_npu_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(phytium_npu_suspend, phytium_npu_resume) +}; + +static ssize_t info_show(struct device_driver *drv, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "NPU PCI driver version: %s\n", NPU_FTN300_DRIVER_VERSION); +} + +static DRIVER_ATTR_RO(info); +static struct attribute *npu_pci_attrs[] = { + &driver_attr_info.attr, + NULL +}; +ATTRIBUTE_GROUPS(npu_pci); + +static struct pci_driver phytium_npu_pci_drv = { + .name = NPU_FTN300_PCI_DRV_NAME, + .id_table = pci_pci_ids, + .probe = phytium_npu_pci_probe, + .remove = phytium_npu_remove, + .driver = { + .groups = npu_pci_groups, + .pm = &phytium_npu_pm_ops, + } +}; +module_pci_driver(phytium_npu_pci_drv); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium NPU driver for PCI device controllers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/phytium-npu/phytium_npu_platform.c b/drivers/staging/phytium-npu/phytium_npu_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..fbb4a8b85863468a30b3ffc2daa59aa2be0d6d51 --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_platform.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_npu.h" +#include "phytium_npu_leopard_reg.h" + +static irqreturn_t phytium_npu_thread_irq(int irq, void *dev_id) +{ + struct platform_device *pdev = (struct platform_device *)dev_id; + + return phytium_npu_handle_thread_irq(&pdev->dev); +} + +static irqreturn_t phytium_npu_handle_isr(int irq, void *dev_id) +{ + struct platform_device *pdev = (struct platform_device *)dev_id; + + if (!pdev) + return IRQ_NONE; + + return phytium_npu_handle_irq(&pdev->dev); +} + +static int phytium_npu_plat_probe(struct platform_device *pdev) +{ + struct phytium_npu_dev *npu_dev; + struct resource *res; + struct device *dev = &pdev->dev; + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + void __iomem *reg_addr, *preg_addr = NULL; + u64 dma_mask; + int ret, irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -EINVAL; + } + pr_info("%s: registers %#llx-%#llx\n", __func__, + (unsigned long long)res->start, (unsigned long long)res->end); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "could not get IRQ\n"); + return -ENXIO; + } + + reg_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + + if (!reg_addr) { + dev_err(&pdev->dev, "failed to map npu registers\n"); + return -ENXIO; + } + + if (pdev->dev.of_node) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + pr_info("%s: registers power manage %#llx-%#llx\n", __func__, + (unsigned long long)res->start, (unsigned long long)res->end); + preg_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + + if (!preg_addr) { + dev_err(&pdev->dev, "failed to map npu power registers\n"); + return -ENXIO; + } + } + } + + if (!fwnode_property_read_u64(fwnode, "dma-mask", &dma_mask)) + dev_info(dev, "%s get mask from DT : %#llx\n", __func__, dma_mask); + else + dma_mask = DMA_BIT_MASK(40); + + ret = dma_set_mask(dev, dma_mask); + if (ret) { + dev_err(dev, "%s failed to set dma mask\n", __func__); + return ret; + } + + ret = phytium_npu_register_dev(&pdev->dev, preg_addr, reg_addr); + if (ret) { + dev_err(&pdev->dev, "failed to initialize driver core!\n"); + goto out_add_dev; + } + + npu_dev = dev_get_drvdata(dev); + npu_dev->irq = irq; + npu_dev->is_platform_dev = TRUE; + npu_dev->clock_freq = 800000000; + + ret = devm_request_threaded_irq(&pdev->dev, irq, phytium_npu_handle_isr, + phytium_npu_thread_irq, IRQF_SHARED, + "phytium-npu", pdev); + + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto out_irq; + } + pr_info("%s: registered irq: %d\n", __func__, irq); + + return ret; + +out_irq: + phytium_npu_unregister_dev(&pdev->dev); +out_add_dev: + devm_iounmap(&pdev->dev, reg_addr); + return ret; +} + +static int phytium_npu_remove(struct platform_device *pdev) +{ + phytium_npu_unregister_dev(&pdev->dev); + return 0; +} + +#ifdef CONFIG_PM +static int phytium_npu_suspend(struct device *dev) +{ + return phytium_npu_common_suspend(dev); +} + +static int phytium_npu_platform_resume(struct device *dev) +{ + return phytium_npu_common_resume(dev); +} + +static int phytium_npu_runtime_idle(struct device *dev) +{ + /* Eg. turn off external clocks */ + return 0; +} + +static int phytium_npu_runtime_suspend(struct device *dev) +{ + return phytium_npu_common_suspend(dev); +} + +static int phytium_npu_platform_runtime_resume(struct device *dev) +{ + return phytium_npu_common_resume(dev); +} + +#endif + +static const struct dev_pm_ops phytium_npu_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_npu_suspend, phytium_npu_platform_resume) + SET_RUNTIME_PM_OPS(phytium_npu_runtime_suspend, + phytium_npu_platform_runtime_resume, + phytium_npu_runtime_idle) +}; + +static ssize_t info_show(struct device_driver *drv, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "NPU platform driver version: %s\n", NPU_FTN300_DRIVER_VERSION); +} + +static DRIVER_ATTR_RO(info); +static struct attribute *drv_attrs[] = { + &driver_attr_info.attr, + NULL +}; + +ATTRIBUTE_GROUPS(drv); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytium_npu_acpi_ids[] = { + { "PHYT0050", 0 }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_npu_acpi_ids); +#endif +#ifdef CONFIG_OF +static const struct of_device_id phytium_npu_of_ids[] = { + { .compatible = "phytium,npu"}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_npu_of_ids); +#endif + +static struct platform_driver phytium_npu_plat_driver = { + .probe = phytium_npu_plat_probe, + .remove = phytium_npu_remove, + .driver = { + .name = NPU_FTN300_PLAT_DRV_NAME, + .groups = drv_groups, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(phytium_npu_of_ids), + .acpi_match_table = ACPI_PTR(phytium_npu_acpi_ids), + .pm = &phytium_npu_pm_ops, + }, +}; + +module_platform_driver(phytium_npu_plat_driver); + +MODULE_AUTHOR("Cheng Quan "); +MODULE_DESCRIPTION("Phytium NPU driver for IO Mapped controllers"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NPU_FTN300_DRIVER_VERSION); diff --git a/drivers/staging/phytium-npu/phytium_npu_session.c b/drivers/staging/phytium-npu/phytium_npu_session.c new file mode 100644 index 0000000000000000000000000000000000000000..8528ff6ee22c971f8e8e521ae9726989fb14d10d --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_session.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_npu.h" +#include "phytium_npu_mmu.h" + +int global_session_id; + +struct phytium_npu_session *phytium_npu_session_create(struct device *dev) +{ + return devm_kzalloc(dev, sizeof(struct phytium_npu_session), GFP_KERNEL); +} + +int phytium_npu_session_init(struct phytium_npu_dev *npu, struct phytium_npu_session *session) +{ + size_t i; + struct phytium_npu_mmu_context *pnmctx; + + session->npu_dev = npu; + + if (global_session_id >= NPU_SUPPORT_SESSION_NUM) + global_session_id = 0; + + session->id = global_session_id++; + session->dbgfs.sess_dbgfs_dir = NULL; + + init_waitqueue_head(&session->response_wq); + INIT_LIST_HEAD(&session->sess_list_entry); + INIT_LIST_HEAD(&session->sched_list_entry); + INIT_LIST_HEAD(&session->stream_list); + INIT_LIST_HEAD(&session->response_list); + list_add_tail(&session->sess_list_entry, &npu->sessions_list); + list_add(&session->sched_list_entry, &npu->sched_sess_list); + + for (i = 0; i < ARRAY_SIZE(session->mmu_ctx); i++) { + pnmctx = &session->mmu_ctx[i]; + pnmctx->mctx = NULL; + pnmctx->dev = NULL; + } + npu->sessions_count++; + pr_debug("current npu session count %d, session id:%d", npu->sessions_count, session->id); + return 0; +} + +int phytium_npu_session_release(struct phytium_npu_dev *npu, struct phytium_npu_session *session) +{ + struct phytium_npu_session *curr, *next; + + if (!npu || !session) + return -EINVAL; + phytium_npu_debugfs_session_remove(session); + phytium_npu_release_mmu_context(npu, session); + list_for_each_entry_safe(curr, next, &npu->sessions_list, sess_list_entry) { + if (curr == session) { + list_del(&curr->sess_list_entry); + list_del(&curr->sched_list_entry); + npu->sessions_count--; + pr_debug("release session :%p", session); + devm_kfree(npu->dev, curr); + break; + } + } + + return 0; +} + +int phytium_npu_session_release_all(struct phytium_npu_dev *npudev) +{ + struct phytium_npu_session *curr, *next; + + list_for_each_entry_safe(curr, next, &npudev->sessions_list, sess_list_entry) { + phytium_npu_release_mmu_context(npudev, curr); + list_del(&curr->sess_list_entry); + list_del(&curr->sched_list_entry); + npudev->sessions_count--; + devm_kfree(npudev->dev, curr); + } + return 0; +} + +void phytium_npu_change_schedule_session(struct phytium_npu_session *sess, + struct phytium_npu_dev *npu, int is_change) +{ + if (sess != list_entry(&npu->sched_sess_list, struct phytium_npu_session, sched_list_entry)) + while (list_first_entry(&npu->sched_sess_list, + struct phytium_npu_session, sched_list_entry) != sess) + list_rotate_left(&npu->sched_sess_list); + + if (is_change) + list_rotate_left(&npu->sched_sess_list); +} diff --git a/drivers/staging/phytium-npu/phytium_npu_uapi.c b/drivers/staging/phytium-npu/phytium_npu_uapi.c new file mode 100644 index 0000000000000000000000000000000000000000..0fc968a38d63aa05d9649fbba1dda5ddb9333e1d --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_uapi.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "include/phytium_npu.h" +#include "include/phytium_npu_uapi.h" + +static int phytium_npu_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = (struct miscdevice *)file->private_data; + struct phytium_npu_dev *npudev = container_of(miscdev, struct phytium_npu_dev, miscdev); + + pr_debug("open device here!, npudev addr:%p, miscdev:%p", npudev, miscdev); + if (!npudev->dev) + return -1; + + struct phytium_npu_session *sess = phytium_npu_session_create(npudev->dev); + + if (!sess) { + dev_err(npudev->dev, "No memory for creating session."); + return -ENOMEM; + } + + phytium_npu_session_init(npudev, sess); + + phytium_npu_create_new_mmu_context(npudev, sess); + mutex_lock(&npudev->mutex_lock); + phytium_npu_try_resume_work(npudev); + mutex_unlock(&npudev->mutex_lock); + file->private_data = sess; + return 0; +} + +static int phytium_npu_release(struct inode *inode, struct file *file) +{ + struct phytium_npu_session *sess = file->private_data; + struct phytium_npu_dev *npu; + int ret; + + if (!sess) + return -EINVAL; + npu = sess->npu_dev; + pr_debug("npu close here!, npudev addr:%p, sess:%p", npu, sess); + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + phytium_npu_session_release(npu, sess); + file->private_data = NULL; + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static ssize_t phytium_npu_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) +{ + struct phytium_npu_session *sess = file->private_data; + struct phytium_npu_dev *npu = sess->npu_dev; + int ret, ret_len; + + pr_debug("%s:user reads the response", __func__); + if (!sess) + return -EINVAL; + pr_debug("%s: with sess :%p, id:%d", __func__, sess, sess->id); + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + while (list_empty(&sess->response_list)) { + if (file->f_flags & O_NONBLOCK) { + dev_dbg(npu->dev, "%s: no block!", __func__); + mutex_unlock(&npu->mutex_lock); + return -EAGAIN; + } + dev_dbg(npu->dev, "%s: going to sleep\n", __func__); + if (wait_event_interruptible(sess->response_wq, + !list_empty(&sess->response_list))) { + dev_dbg(npu->dev, "%s: signal\n", __func__); + mutex_unlock(&npu->mutex_lock); + return -ERESTARTSYS; + } + + dev_dbg(npu->dev, "%s: woken up\n", __func__); + } + + if (list_empty(&sess->response_list)) { + ret = 0; + goto out; + } + + struct npu_user_stream_rsp *rsp; + + rsp = list_first_entry(&sess->response_list, + struct npu_user_stream_rsp, + stream_rsp_list_entry); + if (!rsp) { + pr_debug("%s:get response NULL", __func__); + ret = 0; + goto out; + } + if (len >= rsp->rsp_size) + ret_len = rsp->rsp_size; + else + ret_len = len; + ret = copy_to_user(buf, &rsp->ursp, ret_len); + if (ret) { + ret = -EFAULT; + goto out; + } + + list_del(&rsp->stream_rsp_list_entry); + ret = ret_len; + + if (!rsp->session) + kfree(rsp); +out: + mutex_unlock(&npu->mutex_lock); + return ret; +} + +static ssize_t phytium_npu_write(struct file *file, const char __user *buf, + size_t count, loff_t *pposn) +{ + struct phytium_npu_session *sess = file->private_data; + struct phytium_npu_dev *npu = sess->npu_dev; + + if (!sess) + return -EINVAL; + phytium_npu_write_stream(npu, sess, buf, count); + return count; +} + +static unsigned int phytium_npu_poll(struct file *file, poll_table *wait) +{ + struct phytium_npu_session *sess = file->private_data; + unsigned long event = poll_requested_events(wait); + unsigned int mask = 0, ret; + + if (!sess) + return -EINVAL; + pr_debug("%s: PID: %d, sess id: %d, link: %p\n", __func__, + task_pid_nr(current), sess->id, sess); + ret = mutex_lock_interruptible(&sess->npu_dev->mutex_lock); + if (ret) + return POLLERR; + if (event & (POLLIN | POLLRDNORM)) { + /* Register for event */ + poll_wait(file, &sess->response_wq, wait); + + if (!list_empty(&sess->response_list)) + mask = POLLIN | POLLRDNORM; + } + mutex_unlock(&sess->npu_dev->mutex_lock); + pr_debug("return mask %x, POLLIN:%x, POLLRDNORM:%x", mask, POLLIN, POLLRDNORM); + return mask; +} + +static int phytium_npu_mm_debug_perf(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + struct npu_debug_perf *dbg_cfg = (struct npu_debug_perf *)arg; + struct phytium_npu_debugfs *dbgfs = &sess->dbgfs; + int ret; + + if (copy_from_user(&sess->dbgfs, dbg_cfg, sizeof(*dbg_cfg))) + return -EFAULT; + pr_debug("[%s]:config debug info:%d,type:%#x", __func__, + dbgfs->debug_mode, dbgfs->debug_type); + + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + ret = phytiun_npu_check_debug_fs_cfg(sess); + mutex_unlock(&npu->mutex_lock); + return ret; +} + +static int phytium_npu_mm_repeat_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + struct npu_repeat_stream stream; + struct npu_repeat_stream *rstream = &stream; + int ret; + + if (copy_from_user(rstream, arg, sizeof(*rstream))) + return -EFAULT; + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + phytium_npu_repeat_stream(sess, rstream->stream_id & rstream->stream_id_mask, + rstream->is_repeat); + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static int phytium_npu_mm_delete_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + struct npu_delete_stream stream; + struct npu_delete_stream *dstream = &stream; + int ret; + + if (copy_from_user(dstream, arg, sizeof(*dstream))) + return -EFAULT; + pr_info("%s: dstream id :%x, mask:%x, response:%d", __func__, dstream->stream_id, + dstream->stream_id_mask, dstream->is_res); + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + phytium_npu_delete_stream(npu, sess, dstream); + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static int phytium_npu_mm_sync_buf(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, void *arg) +{//TODO. + return 0; +} + +static int phytium_npu_mm_set_buf(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + struct npu_set_buffer buf; + struct npu_set_buffer *sbuf = &buf; + int ret; + + if (copy_from_user(sbuf, arg, sizeof(*sbuf))) + return -EFAULT; + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + phytium_npu_set_stream_buf_status_with_fd(sess, sbuf->fd, sbuf->set_state); + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static int phytium_npu_mm_unmap(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + int ret; + struct npu_memory_unmap usr_unmap; + + if (copy_from_user(&usr_unmap, arg, sizeof(usr_unmap))) + return -EFAULT; + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + phytium_npu_mmu_unmap(npu, sess, &usr_unmap); + pr_debug("------unmap done"); + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static int phytium_npu_mm_import(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + void *arg) +{ + struct npu_memory_map tmp; + struct npu_memory_map *map = &tmp; + int ret; + + if (copy_from_user(map, arg, sizeof(*map))) + return -EFAULT; + pr_debug("map type:%x, fd:%d, vaddr:%llx", map->map_type, map->fd, map->vaddr); + ret = mutex_lock_interruptible(&npu->mutex_lock); + if (ret) + return ret; + + ret = phytium_npu_mmu_map_init(npu, sess, map); + if (ret) + return ret; + phytium_npu_import_dmabuf(npu, sess, map); + mutex_unlock(&npu->mutex_lock); + return 0; +} + +static int phytium_npu_mm_map2cache(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, void *arg) +{ + return 0; +} + +#define PRODUCT_ID 0x0102030405060708 +#define KVERSION 0x0001 +static int +phytium_npu_get_info(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, void *arg) +{ + struct npu_info sinfo; + struct npu_info *info = &sinfo; + + info->pid = PRODUCT_ID; + info->version = 0x0001; + info->is_have_mmu = 1; + info->mmu_page_size = npu->nmmu_config.page_size; + info->mefficiency = 1; + info->use_debug = 0; + info->core_num = 1; + info->l1_size = 0; + info->l3_size = 0; + info->l3_percore_size = 0; + info->clock_freq = npu->clock_freq; + if (copy_to_user(arg, info, sizeof(*info))) + return -EFAULT; + return 0; +} + +static long phytium_npu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct phytium_npu_session *sess = file->private_data; + struct phytium_npu_dev *npu = sess->npu_dev; + int retval; + + if (!sess) + return -EINVAL; + dev_dbg(npu->dev, "%s: cmd: 0x%x\n", __func__, cmd); + + switch (cmd) { + case NPU_INFO: + retval = phytium_npu_get_info(npu, sess, (void __user *)arg); + break; + + case NPU_MAP2CACHE: + retval = phytium_npu_mm_map2cache(npu, sess, (void __user *)arg); + break; + + case NPU_MEMORY_IMPORT: + retval = phytium_npu_mm_import(npu, sess, (void __user *)arg); + break; + + case NPU_MEMORY_UNMAP: + retval = phytium_npu_mm_unmap(npu, sess, (void __user *)arg); + break; + + case NPU_SET_BUF: + retval = phytium_npu_mm_set_buf(npu, sess, (void __user *)arg); + break; + + case NPU_SYNC_BUF: + retval = phytium_npu_mm_sync_buf(npu, sess, (void __user *)arg); + break; + case NPU_DELET_STREAM: + retval = phytium_npu_mm_delete_stream(npu, sess, (void __user *)arg); + break; + case NPU_REPEAT_STREAM: + retval = phytium_npu_mm_repeat_stream(npu, sess, (void __user *)arg); + break; + case NPU_DEBUG_PERF: + retval = phytium_npu_mm_debug_perf(npu, sess, (void __user *)arg); + break; + default: + dev_err(npu->dev, "No this cmd to execute."); + break; + } + + return retval; +} + +static const struct file_operations phytium_npu_fops = { + .owner = THIS_MODULE, + .read = phytium_npu_read, + .poll = phytium_npu_poll, + .write = phytium_npu_write, + .open = phytium_npu_open, + .unlocked_ioctl = phytium_npu_ioctl, + .compat_ioctl = phytium_npu_ioctl, + .release = phytium_npu_release, +}; + +int phytium_npu_register_misc(struct phytium_npu_dev *npudev) +{ + int ret; + char *npu_dev_name = NULL; + + if (!npudev || !npudev->dev) { + pr_err("%s: invalid params!\n", __func__); + return -EINVAL; + } + + npu_dev_name = devm_kzalloc(npudev->dev, 8, GFP_KERNEL); + if (!npu_dev_name) + return -ENOMEM; + + snprintf(npu_dev_name, 8, "npu%d", 0); + + dev_dbg(npudev->dev, "%s: trying to register NPU misc /dev/%s ...\n", + __func__, npu_dev_name); + + npudev->miscdev.minor = MISC_DYNAMIC_MINOR; + npudev->miscdev.fops = &phytium_npu_fops; + npudev->miscdev.name = npu_dev_name; + + ret = misc_register(&npudev->miscdev); + if (ret) { + dev_err(npudev->dev, "%s: Unable to register NPU misc device\n", __func__); + goto err; + } + + dev_dbg(npudev->dev, "%s: NPU misc device registered successfully\n", __func__); + pr_info("%s, misc:%p, npu:%p\n", __func__, &npudev->miscdev, npudev); + + return 0; + +err: + devm_kfree(npudev->dev, npu_dev_name); + return ret; +} + +int phytium_npu_unregister_misc(struct phytium_npu_dev *npudev) +{ + if (!npudev || !npudev->dev) { + pr_err("%s: invalid params!\n", __func__); + return -EINVAL; + } + + misc_deregister(&npudev->miscdev); + + dev_dbg(npudev->dev, "%s: NPU misc device unregistered successfully\n", __func__); + + return 0; +} diff --git a/drivers/staging/phytium-npu/phytium_npu_workqueue.c b/drivers/staging/phytium-npu/phytium_npu_workqueue.c new file mode 100644 index 0000000000000000000000000000000000000000..1d7301949b54998bb175b581e4bf93a725be8566 --- /dev/null +++ b/drivers/staging/phytium-npu/phytium_npu_workqueue.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Platform NPU driver for Phytium NPU controller + * + * Copyright (C) 2023 Phytium Technology Co., Ltd. + */ +#include +#include +#include +#include +#include "phytium_npu.h" +#include "phytium_npu_uapi.h" +#ifdef PHYTIUM_NPU_PLATFORM +#include "phytium_npu_leopard_reg.h" +#else +#include "phytium_npu_reg.h" +#endif + +static void wake(struct phytium_npu_dev *npu) +{ + queue_work(npu->stream_wq, &npu->stream_work); +} + +static int phytium_npu_check_stream_buf(struct phytium_npu_session *sess, + struct npu_user_submit_stream *nustream) +{ + int i; + struct npu_excute_stream *nes = &nustream->estream; + + for (i = 0; i < nes->all; i++) { + if (!phytium_npu_find_mmu_ctx_map(sess, nes->fd[i])) { + pr_err("Not find input dma buf fd :%d from usr strem!", nes->fd[i]); + return -EFAULT; + } + pr_debug("find buffer fd %d", nes->fd[i]); + } + + if (!phytium_npu_find_mmu_ctx_map(sess, nustream->stream_fd)) { + pr_err("Not find stream dma buf fd :%d from usr strem!", nustream->stream_fd); + return -EFAULT; + } + + pr_debug("find stream fd %#x ,stream id:%#x", nustream->stream_fd, nes->sid); + return 0; +} + +static struct phytium_npu_stream * +phytium_npu_find_session_stream(struct phytium_npu_session *sess, int stream_id) +{ + struct phytium_npu_stream *nesstream, *tmp = NULL; + + WARN_ON(!sess); + + list_for_each_entry_safe(nesstream, tmp, &sess->stream_list, stream_list_entry) { + if (nesstream->nustream.estream.sid == stream_id) + break; + } + + return nesstream; +} + +static struct npu_user_stream_rsp * +phytium_npu_find_session_stream_rsp(struct phytium_npu_session *sess, int stream_id) +{ + struct npu_user_stream_rsp *nustream_rsp, *tmp = NULL, *rsp = NULL; + + WARN_ON(!sess); + + list_for_each_entry_safe(nustream_rsp, tmp, &sess->response_list, stream_rsp_list_entry) { + if (nustream_rsp->ursp.sid == stream_id) { + rsp = nustream_rsp; + break; + } + } + + return rsp; +} + +int phytium_npu_rollback_stream(struct phytium_npu_dev *npu) +{ + int processing = 0; + struct phytium_npu_stream *stream; + + if (npu->activated_stream) { + stream = npu->activated_stream; + stream->is_rollback = TRUE; + stream->stream_status = NPU_STREAM_NONE; + stream->infer_status = NPU_STREAM_INFER_DONE; + processing = TRUE; + npu->activated_stream = NULL; + } + + if (npu->queued_stream) { + stream = npu->queued_stream; + stream->is_rollback = TRUE; + stream->stream_status = NPU_STREAM_NONE; + stream->infer_status = NPU_STREAM_INFER_DONE; + npu->queued_stream = NULL; + } + + pr_debug("%s: processing %d", __func__, processing); + return processing; +} + +int phytium_npu_update_activated_stream_4_err_mmu(struct phytium_npu_dev *npu) +{ + int processing = 0; + + if (npu->activated_stream) { + processing = TRUE; + npu->activated_stream = NULL; + } + return processing; +} + +int phytium_npu_try_activate_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *stream) +{ + if (!npu->is_cache_stream_on) { + if (npu->activated_stream) + return NEED_SCHEDULE; + goto activate_stream; + } + if (npu->activated_stream && npu->queued_stream) { + pr_info("activated stream and queued stream is full"); + return NEED_SCHEDULE; + } else if (!npu->activated_stream && npu->queued_stream) { + pr_info("impossible here ,queued stream not NULL"); + } else if (npu->activated_stream && !npu->queued_stream) { + return NEED_QUEUE_STREAM; + } + //else activate and queued stream is both NULL; +activate_stream: + npu->activated_stream = stream; + stream->stream_status = NPU_STREAM_IN_HW; + stream->infer_status = NPU_STREAM_INFER_WORK; + + return phytium_npu_submit_stream(npu, sess, stream); +} + +static int phytium_npu_try_queued_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *stream) +{ + if (!npu->is_cache_stream_on) + return 1; + + if (npu->activated_stream && !npu->queued_stream) { + stream->stream_status = NPU_STREAM_NONE; + stream->infer_status = NPU_STREAM_INFER_WAIT; + npu->queued_stream = stream; + pr_debug("stream had queued"); + return phytium_npu_prepare_hw_4_queued_stream(npu, sess, stream); + } + return 1; +} + +int phytium_npu_try_excute_queued_stream(struct phytium_npu_dev *npu) +{ + struct phytium_npu_stream *stream = npu->queued_stream; + + if (!npu->is_cache_stream_on) { + npu->activated_stream = NULL; + pr_debug("%s, activate stream NULL", __func__); + return 1; + } + + if (npu->activated_stream) { + npu->activated_stream->infer_status = NPU_STREAM_INFER_DONE; + npu->activated_stream = NULL; + } + + if (stream) { + npu->activated_stream = stream; + stream->stream_status = NPU_STREAM_BUFF_IN_HW; + stream->infer_status = NPU_STREAM_INFER_WORK; + npu->queued_stream = NULL; + pr_debug("%s NPU will excut queued stream", __func__); + phytium_npu_debug_set_hw_register(npu, npu->activated_stream->session); + phytium_npu_config_event(npu, NPU_ALL_EVENT, TRUE); /* enable all event */ + phytium_npu_config_start_inference(npu, npu->activated_stream->session, + npu->activated_stream); + return 0; + } + + return 1; +} + +int phytium_npu_delete_stream(struct phytium_npu_dev *npu, struct phytium_npu_session *sess, + struct npu_delete_stream *stream) +{ + struct phytium_npu_stream *nesstream; + struct npu_user_stream_rsp *nustream_rsp, *new_rsp; + int stream_id = stream->stream_id & stream->stream_id_mask; + + nesstream = phytium_npu_find_session_stream(sess, stream_id); + nustream_rsp = phytium_npu_find_session_stream_rsp(sess, stream_id); + + if (stream->is_res) { + new_rsp = kzalloc(sizeof(*new_rsp), GFP_KERNEL); + if (!new_rsp) + return -ENOMEM; + + INIT_LIST_HEAD(&new_rsp->stream_rsp_list_entry); + new_rsp->ursp.sid = stream_id; + new_rsp->ursp.err_no = NPU_RSP_OK; + new_rsp->ursp.rsp_err_flags = 0; + new_rsp->ursp.session_id = sess->id; + new_rsp->rsp_size = MAX_NPU_USER_RSP_SIZE; + new_rsp->session = NULL; + list_add_tail(&new_rsp->stream_rsp_list_entry, &sess->response_list); + wake_up(&sess->response_wq); + pr_debug("delete stream will response a new msg,%s", __func__); + } + + if (nesstream) { + list_del(&nesstream->stream_list_entry); + if (nesstream->rsp) { + if (nustream_rsp) + list_del(&nustream_rsp->stream_rsp_list_entry); + kfree(nesstream->rsp); + } + kfree(nesstream); + nesstream = NULL; + pr_debug("delete the stream :%d,%s", stream_id, __func__); + } + + return 0; +} + +static int phytium_npu_add_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + struct phytium_npu_stream *nstream) +{ + int ret; + struct npu_excute_stream *nesstream; + + nesstream = &nstream->nustream.estream; + INIT_LIST_HEAD(&nstream->stream_list_entry); + + if (nesstream->stype != NPU_STREAM_SUBMIT) { + pr_err("Stream type %d is not support !", nesstream->stype); + return -EFAULT; + } + + ret = phytium_npu_check_stream_buf(sess, &nstream->nustream); + if (ret) { + pr_err("Not find stream dma buf fd :%d from usr strem!", + nstream->nustream.stream_fd); + return -EFAULT; + } + + list_add_tail(&nstream->stream_list_entry, &sess->stream_list); + pr_debug("add new stream to session stream_list"); + ret = phytium_npu_try_activate_stream(npu, sess, nstream); + //TODO start a new thread to process the stream when activate the stream failed + if (ret == NEED_SCHEDULE) { + phytium_npu_schedule_stream_queues(npu, TRUE); + } else if (ret == NEED_QUEUE_STREAM) { + ret = phytium_npu_try_queued_stream(npu, sess, nstream); + if (!ret) + pr_debug("%s Success for queued a new stream", __func__); + else + pr_debug("Not Success for activating a new stream\n"); + } + + return 0; +} + +static void debug_stream_info(struct phytium_npu_session *sess, + struct npu_user_submit_stream *stream, + size_t size) +{ + int i = 0; + + pr_debug("stream fd:%d, size:%d, offset:%x", stream->stream_fd, + stream->stream_size, stream->stream_off); + for (i = 0; i < 16; i++) { + pr_debug("i:%d, fd:%d s:%d off:%x", i, stream->estream.fd[i], + stream->bufsizes[i], stream->bufoffsets[i]); + } + + pr_debug("sess:%p SESSIONID:%d ,stream id:%x num seg:%d, stype:%d", + sess, sess->id, stream->estream.sid, stream->num, stream->estream.stype); +} + +int phytium_npu_write_stream(struct phytium_npu_dev *npu, + struct phytium_npu_session *sess, + const char *stream_buf, size_t size) +{ + struct phytium_npu_stream *nstream; + int ret; + + nstream = kzalloc(sizeof(*nstream), GFP_KERNEL); + if (!nstream) + return -EINVAL; + + nstream->session = sess; + nstream->stream_status = NPU_STREAM_NONE; + nstream->infer_status = NPU_STREAM_INFER_NONE; + ret = copy_from_user(&nstream->nustream, stream_buf, size); + if (ret) { + dev_err(npu->dev, "%s: user stream copy failed!\n", __func__); + ret = -EFAULT; + goto err_copy; + } + ret = mutex_lock_interruptible(&npu->mutex_lock); + debug_stream_info(sess, &nstream->nustream, size); + if (ret) + goto err_copy; + + phytium_npu_add_stream(npu, sess, nstream); + mutex_unlock(&npu->mutex_lock); + return 0; + +err_copy: + kfree(nstream); + + return ret; +} + +static struct phytium_npu_stream *phytium_npu_is_working_full(struct phytium_npu_dev *npu) +{ + if (npu->is_cache_stream_on) { + if (npu->activated_stream && npu->queued_stream) + return NULL; + + return npu->activated_stream ? npu->queued_stream : npu->activated_stream; + } + + return npu->activated_stream; +} + +static int phytium_npu_stream_is_ready_for_inference(struct phytium_npu_stream *nstream, + struct npu_user_submit_stream *nustream) +{ + if (nstream->stream_status > NPU_STREAM_NONE) + return NPU_STREAM_BUFF_IN_HW; + if (!phytium_npu_check_stream_buf_is_ready(nstream)) + return NPU_STREAM_BUFF_NO_READY; + return 0; +} + +void do_work(struct work_struct *work) +{ + struct phytium_npu_dev *npu = container_of(work, struct phytium_npu_dev, stream_work); + struct phytium_npu_session *curr_sess, *next_sess; + struct phytium_npu_stream *curr_stream, *next_stream; + struct npu_user_submit_stream *nustream; + int ret; + + mutex_lock(&npu->mutex_lock); + if (phytium_npu_is_working_full(npu)) { + pr_debug("NPU is working for inference"); + mutex_unlock(&npu->mutex_lock); + return; + } + + do { + list_for_each_entry_safe(curr_sess, next_sess, &npu->sched_sess_list, + sched_list_entry) { + list_for_each_entry_safe(curr_stream, next_stream, + &curr_sess->stream_list, + stream_list_entry) { + if (curr_stream->stream_status >= NPU_STREAM_IN_HW || + curr_stream->infer_status) + continue; + + nustream = &curr_stream->nustream; + pr_debug("estream type:%#x", nustream->estream.stype); + if (nustream->estream.stype == NPU_STREAM_SUBMIT) { + ret = phytium_npu_stream_is_ready_for_inference(curr_stream, + nustream); + pr_debug("strem is ready:%d", ret); + if (ret)/* current stream is not ready */ + continue; + ret = phytium_npu_try_activate_stream(npu, + curr_sess, + curr_stream); + if (!ret) { + phytium_npu_change_schedule_session(curr_sess, + npu, TRUE); + pr_debug("Change schedule session locate"); + goto out_schedule; + } else if (ret == NEED_QUEUE_STREAM) { + ret = phytium_npu_try_queued_stream(npu, + curr_sess, + curr_stream); + } + + if (!ret) + pr_debug("%s Success for queued a new stream", + __func__); + } + } + } +out_schedule:; + } while (0); + mutex_unlock(&npu->mutex_lock); +} + +void phytium_npu_schedule_stream_queues(struct phytium_npu_dev *npu, bool asynchronous) +{ + if (asynchronous) { + pr_debug("asynchronous schedule do work"); + wake(npu); + } else { + pr_debug("synchronous schedule do work"); + mutex_unlock(&npu->mutex_lock); + do_work(&npu->stream_work); + mutex_lock(&npu->mutex_lock); + } +} diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 9989809613b750efe929ba4d3148963b49ccebc2..2e1b3d1058f059ee9da02e74cf388b75b1e579bb 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -84,6 +84,26 @@ config SERIAL_PHYTIUM_PCI If unsure, say N. +config SERIAL_PHYTIUM_UART_V2 + tristate "Phytium V2 serial port support" + depends on ARCH_PHYTIUM + select SERIAL_CORE + help + This driver supports the Phytium UART V2 controller on platform adapters. + If you want to compile this driver into the kernel, say Y here. To + compile this driver as a module, choose M here. + + If unsure, say N. + +config SERIAL_PHYTIUM_V2_DEBUG + bool "Phytium UART V2 debug/heartbeat function support" + depends on SERIAL_PHYTIUM_UART_V2 + help + This driver supports the Phytium UART V2 controller Debug on platform adapters. + If you want to enable debug, say Y here. + + If unsure, say N. + config SERIAL_EARLYCON_SEMIHOST bool "Early console using Arm compatible semihosting" depends on ARM64 || ARM || RISCV diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index cd1d9c759cac022917b58b8d8296999bc1be730a..170a4176ab09723013c8e15d0fb7a4e33f9525cd 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -89,7 +89,7 @@ obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o obj-$(CONFIG_SERIAL_PHYTIUM_PCI) += phytium-uart.o - +obj-$(CONFIG_SERIAL_PHYTIUM_UART_V2) += phytium-uart-v2.o # GPIOLIB helpers for modem control lines obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o diff --git a/drivers/tty/serial/phytium-uart-v2.c b/drivers/tty/serial/phytium-uart-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..1fc1eb7043d6d9bd9b8051895cbc7c5b2e767715 --- /dev/null +++ b/drivers/tty/serial/phytium-uart-v2.c @@ -0,0 +1,1379 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Driver for phytium uart v2 + * Copyright (c) 2020-2025, Phytium Technology, Co., Ltd. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium-uart-v2.h" +#define cmd_id_type uint8_t +#define cmd_subid_type uint8_t +#define DEFAULT_CLK 10000000 +#define PHYT_UART_DRV_VER "1.1.1" +/* + * We wrap our port structure around the generic uart_port. + */ +struct phytium_uart_port { + struct uart_port port; + unsigned int old_cr; /* state during shutdown */ + unsigned int old_status; + char type[12]; + struct device *dev; + struct clk *clk; + void __iomem *shmem_base; + bool m_buf_empty; + bool heartbeat_enable; + bool debug_enable; + struct timer_list alive_timer; +}; + +/* define msg format */ +struct msg { + u16 module_id; + u8 cmd_id; + u8 cmd_subid; + u16 length; + u16 complete; + u8 data[120]; +}; + +static unsigned int phytium_uart_read(const struct phytium_uart_port *pup, + unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + return readl_relaxed(addr); +} + +static void phytium_uart_write(unsigned int val, + const struct phytium_uart_port *pup, unsigned int reg) +{ + void __iomem *addr = pup->port.membase + reg; + + writel_relaxed(val, addr); +} + +/* func for filling with msg */ +static void msg_fill(struct msg *msg, u16 module_id, + u8 cmd_id, u8 cmd_subid, u16 complete) +{ + msg->module_id = module_id; + msg->cmd_id = cmd_id; + msg->cmd_subid = cmd_subid; + msg->complete = complete; + + memset(msg->data, 0, sizeof(msg->data)); +} + +static int tx_ring_buffer_is_full(struct phytium_uart_port *pup) +{ + u16 tx_tail, tx_head; + + tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + tx_head = phytium_uart_read(pup, REG_TX_HEAD) & BUFFER_POINTER_MASK; + + if (((tx_tail + 1) % TX_BUFFER_SIZE) == tx_head) + return 1; + + return 0; +} + +static int tx_ring_buffer_is_empty(struct phytium_uart_port *pup) +{ + u16 tx_tail, tx_head; + + tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + tx_head = phytium_uart_read(pup, REG_TX_HEAD) & BUFFER_POINTER_MASK; + + if (tx_tail == tx_head) + return 1; + + return 0; +} + +static void PHYT_MSG_INSERT(struct phytium_uart_port *pup, struct msg *msg) +{ + u16 tx_tail, tx_head; + + tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + tx_head = phytium_uart_read(pup, REG_TX_HEAD) & BUFFER_POINTER_MASK; + + pr_debug("TX_TAIL:%x, TX_HEAD:%x", tx_tail, tx_head); + + while (tx_ring_buffer_is_full(pup)) + cpu_relax(); + + memcpy(pup->shmem_base + TX_MSG_SIZE * tx_tail, + msg, sizeof(struct msg)); + + /* updata tx tail pointer */ + tx_tail = (tx_tail + 1) % TX_BUFFER_SIZE; + phytium_uart_write(tx_tail, pup, REG_TX_TAIL); + + phytium_uart_write(PHYT_INT_TRIGGER_BIT, pup, REG_PHYT_INT_STATE); +} + + +static int rx_ring_buffer_is_empty(struct phytium_uart_port *pup) +{ + u16 rx_tail, rx_head; + + rx_tail = phytium_uart_read(pup, REG_RX_TAIL) & BUFFER_POINTER_MASK; + rx_head = phytium_uart_read(pup, REG_RX_HEAD) & BUFFER_POINTER_MASK; + if (rx_tail == rx_head) + return 1; + return 0; +} + + +static void phytium_fifo_to_tty(struct phytium_uart_port *pup) +{ + struct msg *handler_msg; + int sysrq; + unsigned int ch, flag, fifotaken; + u16 count = 0; + u16 rx_head, rx_tail; + + while (!rx_ring_buffer_is_empty(pup)) { + rx_head = (phytium_uart_read(pup, REG_RX_HEAD) + & BUFFER_POINTER_MASK); + rx_tail = (phytium_uart_read(pup, REG_RX_TAIL) + & BUFFER_POINTER_MASK); + + /* get operator pointer of rv data msg */ + handler_msg = (struct msg *)(pup->shmem_base + TX_MSG_SIZE + * TX_BUFFER_SIZE + RX_MSG_SIZE * rx_head); + pr_debug("handler_msg = %p, rx_head=%d, rx_tail=%d\n", + handler_msg, rx_head, rx_tail); + if (!handler_msg) { + pr_err("%s cannot get msg!\n", __func__); + return; + } + count = handler_msg->length / 2; + + rx_head = (rx_head + 1) % RX_BUFFER_SIZE; + phytium_uart_write(rx_head, pup, REG_RX_HEAD); + + for (fifotaken = 0; fifotaken != RX_DATA_MAXINUM; fifotaken++) { + if (count == 0) + break; + count--; + /* Take chars maxinum 60 * 2 bytes from the MSG */ + ch = (handler_msg->data[2 * fifotaken]); + ch |= (handler_msg->data[2 * fifotaken + 1] << 8); + ch |= DATA_DUMMY_RX; + flag = TTY_NORMAL; + pup->port.icount.rx++; + + if (unlikely(ch & DATA_ERROR)) { + if (ch & DATA_BE) { + ch &= ~(DATA_FE | DATA_PE); + pup->port.icount.brk++; + if (uart_handle_break(&pup->port)) + continue; + } else if (ch & DATA_PE) + pup->port.icount.parity++; + else if (ch & DATA_FE) + pup->port.icount.frame++; + if (ch & DATA_OE) + pup->port.icount.overrun++; + + ch &= pup->port.read_status_mask; + + if (ch & DATA_BE) + flag = TTY_BREAK; + else if (ch & DATA_PE) + flag = TTY_PARITY; + else if (ch & DATA_FE) + flag = TTY_FRAME; + } + + spin_unlock(&pup->port.lock); + sysrq = uart_handle_sysrq_char(&pup->port, + ch & CHAR_MASK); + spin_lock(&pup->port.lock); + + if (!sysrq) + uart_insert_char(&pup->port, ch, + DATA_OE, ch, flag); + } + } +} + +static void phytium_rx_chars(struct phytium_uart_port *pup) +__releases(&pup->port.lock) +__acquires(&pup->port.lock) +{ + phytium_fifo_to_tty(pup); + + spin_unlock(&pup->port.lock); + + tty_flip_buffer_push(&pup->port.state->port); + + spin_lock(&pup->port.lock); +} + +static void phytium_stop_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + unsigned int int_mask; + + if (!tx_ring_buffer_is_empty(pup)) + return; + /* mask tx msg tail pointer int*/ + int_mask = phytium_uart_read(pup, REG_RP_INT_MASK); + phytium_uart_write(int_mask | PHYT_MSG_DATA_COMPLETED, + pup, REG_RP_INT_MASK); + +} + +static bool phytium_tx_xchar(struct phytium_uart_port *pup, unsigned char c, + bool from_irq) +{ + struct msg msg; + struct msg *handler_msg; + u16 count = 1; + + msg_fill(&msg, UART_MODULE_ID, MSG_DATA, MSG_TX_DATA, 0); + + handler_msg = (struct msg *)(pup->shmem_base); + + if (unlikely(!from_irq) && !(handler_msg->complete & MSG_COMPLETE)) + return false; + msg.data[0] = c; + msg.length = count; + + /* set 1 to wait rv clear out */ + phytium_uart_write(1, pup, REG_CHECK_TX); + + PHYT_MSG_INSERT(pup, &msg); + + pup->port.icount.tx++; + + return true; + +} + +static bool phytium_tx_chars(struct phytium_uart_port *pup, bool from_irq) +{ + struct circ_buf *xmit = &pup->port.state->xmit; + struct msg msg; + int i = 0; + int count = TX_DATA_MAXINUM; + u16 datasize = 0; + + msg_fill(&msg, UART_MODULE_ID, MSG_DATA, MSG_TX_DATA, 0); + + if (pup->port.x_char) { + if (!phytium_tx_xchar(pup, pup->port.x_char, from_irq)) + return true; + pup->port.x_char = 0; + --count; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(&pup->port)) { + phytium_stop_tx(&pup->port); + return false; + } + + if (tx_ring_buffer_is_full(pup)) + return false; + do { + if (count-- == 0) + break; + datasize++; + + msg.length = datasize; + msg.data[datasize - 1] = xmit->buf[xmit->tail]; + + pup->port.icount.tx++; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + } while (!uart_circ_empty(xmit)); + + PHYT_MSG_INSERT(pup, &msg); + + /* if xmit is empty, break */ + if (uart_circ_empty(xmit)) + pr_debug("xmit is empty at i=%d\n", i); + /* If tx_chars is called by start_tx or xmit + * data is full enough to fill msg, don't wakeup + * write again. + */ + if (uart_circ_chars_pending(xmit) <= TX_DATA_MAXINUM) + uart_write_wakeup(&pup->port); + if (uart_circ_empty(xmit)) { + phytium_stop_tx(&pup->port); + return false; + } + return true; +} + +static void phytium_modem_status(struct phytium_uart_port *pup) +{ + struct msg msg; + struct msg *handler_msg; + unsigned int status, delta; + u16 old_tx_tail; + + msg_fill(&msg, UART_MODULE_ID, MSG_GET, MSG_GET_MODEM, 0); + + old_tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + + PHYT_MSG_INSERT(pup, &msg); + + handler_msg = + (struct msg *)(pup->shmem_base + TX_MSG_SIZE * old_tx_tail); + + while (!tx_ring_buffer_is_empty(pup)) + cpu_relax(); + + status = handler_msg->data[0]; + + delta = status ^ pup->old_status; + pup->old_status = status; + + if (!delta) + return; + + if (delta & MODEM_DCD) + uart_handle_dcd_change(&pup->port, status & MODEM_DCD); + + if (delta & MODEM_DSR) + pup->port.icount.dsr++; + + if (delta & MODEM_CTS) + uart_handle_cts_change(&pup->port, status & MODEM_CTS); + + wake_up_interruptible(&pup->port.state->port.delta_msr_wait); +} + +static irqreturn_t phytium_uart_interrupt(int irq, void *uart_port) +{ + struct phytium_uart_port *pup = uart_port; + unsigned long flags; + unsigned int status; + int handled = 0; + + spin_lock_irqsave(&pup->port.lock, flags); + status = phytium_uart_read(pup, REG_RP_INT_STATE); + if (status) { + /* clear rx_tail,tx_data_complete interrupts */ + phytium_uart_write(status & ~(TX_HEAD_INT | RX_TAIL_INT | + PHYT_MSG_DATA_COMPLETED | MODEM_INT), + pup, REG_RP_INT_STATE); + + phytium_uart_write(TX_HEAD_INT | RX_TAIL_INT | + PHYT_MSG_DATA_COMPLETED | MODEM_INT, + pup, REG_RP_INT_STATE_CLR); + + if (status & RX_TAIL_INT) + phytium_rx_chars(pup); + + if (status & MODEM_INT) + phytium_modem_status(pup); + + if (status & PHYT_MSG_DATA_COMPLETED) + phytium_tx_chars(pup, true); + + handled = 1; + } + spin_unlock_irqrestore(&pup->port.lock, flags); + + return IRQ_RETVAL(handled); +} + +static unsigned int phytium_tx_empty(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + unsigned int status; + + status = tx_ring_buffer_is_full(pup); + return status ? 0 : TIOCSER_TEMT; +} + + +static void phytium_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + struct msg msg; + unsigned int status; + cmd_subid_type cmd_subid; + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_MCTRL; + + cmd_subid = getHexValue(cmd); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + msg.data[3] = (mctrl >> 24) & MSG_DATA_MASK; + msg.data[2] = (mctrl >> 16) & MSG_DATA_MASK; + msg.data[1] = (mctrl >> 8) & MSG_DATA_MASK; + msg.data[0] = (mctrl) & MSG_DATA_MASK; + + if (port->status & UPSTAT_AUTORTS) { + status = 1; + msg.data[7] = (status >> 24) & MSG_DATA_MASK; + msg.data[6] = (status >> 16) & MSG_DATA_MASK; + msg.data[5] = (status >> 8) & MSG_DATA_MASK; + msg.data[4] = (status) & MSG_DATA_MASK; + } + + PHYT_MSG_INSERT(pup, &msg); + +} + +static unsigned int phytium_get_mctrl(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int cr = 0; + unsigned int status; + struct msg msg; + struct msg *handler_msg; + u16 old_tx_tail; + + old_tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + + msg_fill(&msg, UART_MODULE_ID, MSG_GET, MSG_GET_MODEM, 0); + + PHYT_MSG_INSERT(pup, &msg); + handler_msg = + (struct msg *)(pup->shmem_base + TX_MSG_SIZE * old_tx_tail); + while (!tx_ring_buffer_is_empty(pup)) + cpu_relax(); + status = handler_msg->data[0]; + + if (status & MODEM_CTS) + cr |= TIOCM_CTS; + if (status & MODEM_DSR) + cr |= TIOCM_DSR; + if (status & MODEM_CAR) + cr |= TIOCM_CAR; + if (status & MODEM_RNG) + cr |= TIOCM_RNG; + if (status & MODEM_RTS) + cr |= TIOCM_RTS; + if (status & MODEM_DTR) + cr |= TIOCM_DTR; + if (status & MODEM_OUT1) + cr |= TIOCM_OUT1; + if (status & MODEM_OUT2) + cr |= TIOCM_OUT2; + + return cr; +} + +static void phytium_start_tx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + unsigned int int_mask = 0; + + /* unmask tx_ring_buffer interrupt */ + if (phytium_tx_chars(pup, false)) { + int_mask = phytium_uart_read(pup, REG_RP_INT_MASK); + phytium_uart_write(int_mask & ~PHYT_MSG_DATA_COMPLETED, + pup, REG_RP_INT_MASK); + } +} + +static void phytium_stop_rx(struct uart_port *port) +{ + + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + struct msg msg; + cmd_subid_type cmd_subid; + unsigned int int_mask; + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_ERROR_IM; + + cmd_subid = getHexValue(cmd); + + /* mask rx tail int */ + int_mask = phytium_uart_read(pup, REG_RP_INT_MASK); + phytium_uart_write(int_mask | RX_TAIL_INT, pup, REG_RP_INT_MASK); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + PHYT_MSG_INSERT(pup, &msg); + +} + +static void phytium_throttle_rx(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + phytium_stop_rx(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + + +static void phytium_enable_ms(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + struct msg msg; + cmd_subid_type cmd_subid; + + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_MODEM_IM; + + cmd_subid = getHexValue(cmd); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + msg.data[0] = 0xf; + + PHYT_MSG_INSERT(pup, &msg); + +} + +static void phytium_break_ctl(struct uart_port *port, int break_state) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + struct msg msg; + unsigned long flags; + unsigned int ctrl = 1; + cmd_subid_type cmd_subid; + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_BREAK_EN; + + cmd_subid = getHexValue(cmd); + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + spin_lock_irqsave(&pup->port.lock, flags); + if (break_state == -1) { + + msg.data[3] = (ctrl >> 24) & MSG_DATA_MASK; + msg.data[2] = (ctrl >> 16) & MSG_DATA_MASK; + msg.data[1] = (ctrl >> 8) & MSG_DATA_MASK; + msg.data[0] = (ctrl) & MSG_DATA_MASK; + } + + PHYT_MSG_INSERT(pup, &msg); + + spin_unlock_irqrestore(&pup->port.lock, flags); + +} + +static int phytium_hwinit(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + struct msg msg; + int retval; + u64 uart_clk; + unsigned int int_mask; + unsigned int status; + cmd_subid_type cmd_subid; + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_HWINIT; + + cmd_subid = getHexValue(cmd); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + pinctrl_pm_select_default_state(port->dev); + + retval = clk_prepare_enable(pup->clk); + if (retval) + return retval; + if (has_acpi_companion(pup->port.dev)) { + device_property_read_u64(pup->port.dev, "clock-frequency", + &uart_clk); + if (uart_clk) { + if (uart_clk > 100000000 || ('n' == uart_clk)) { + dev_err(pup->port.dev, "uartclk get from acpi is error or NULL!\n"); + pup->port.uartclk = DEFAULT_CLK; + } else + pup->port.uartclk = (uint32_t)uart_clk; + } else { + dev_err(pup->port.dev, "have no acpi clk, use default clk!\n"); + pup->port.uartclk = DEFAULT_CLK; + } + } else + pup->port.uartclk = clk_get_rate(pup->clk); + + PHYT_MSG_INSERT(pup, &msg); + + status = phytium_uart_read(pup, REG_RP_INT_STATE); + phytium_uart_write(status & ~RX_TAIL_INT, pup, REG_RP_INT_STATE); + phytium_uart_write(RX_TAIL_INT, pup, REG_RP_INT_STATE_CLR); + /* unmask rx tail int */ + int_mask = phytium_uart_read(pup, REG_RP_INT_MASK); + phytium_uart_write(int_mask & ~RX_TAIL_INT, pup, REG_RP_INT_MASK); + + return 0; +} + +static int phytium_uart_allocate_irq(struct phytium_uart_port *pup) +{ + if (!pup->port.irq) + return -EINVAL; + + return request_irq(pup->port.irq, phytium_uart_interrupt, + IRQF_SHARED, DRV_NAME, pup); +} + +static void phytium_enable_interrupts(struct phytium_uart_port *pup) +{ + unsigned long flags; + unsigned int i; + u16 rx_head, rx_tail; + unsigned int status; + unsigned int int_mask; + + unsigned int buffer_size = RX_BUFFER_SIZE; + + spin_lock_irqsave(&pup->port.lock, flags); + + /* clear out rx-int-status */ + status = phytium_uart_read(pup, REG_RP_INT_STATE); + phytium_uart_write(status & ~RX_TAIL_INT, pup, REG_RP_INT_STATE); + /* when enable int, first empty rx ring buffer */ + for (i = 0; i < buffer_size * 2; i++) { + if (rx_ring_buffer_is_empty(pup)) + break; + rx_head = phytium_uart_read(pup, REG_RX_HEAD) + & BUFFER_POINTER_MASK; + rx_head = (rx_head + 1) % buffer_size; + phytium_uart_write(rx_head, pup, REG_RX_HEAD); + } + + /* enable rx_tail int */ + rx_tail = phytium_uart_read(pup, REG_RX_TAIL); + phytium_uart_write(rx_tail | RX_TAIL_INT_ENABLE, pup, REG_RX_TAIL); + + /* unmask rx tail int */ + int_mask = phytium_uart_read(pup, REG_RP_INT_MASK); + phytium_uart_write(int_mask & ~RX_TAIL_INT, pup, REG_RP_INT_MASK); + spin_unlock_irqrestore(&pup->port.lock, flags); +} + +static void phytium_unthrottle_rx(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + phytium_enable_interrupts(pup); +} + + +static int phytium_startup(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + struct msg msg; + struct msg *handler_msg; + cmd_subid_type cmd_subid; + int ret = 0; + u16 old_tx_tail; + + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_STARTUP; + + cmd_subid = getHexValue(cmd); + + ret = phytium_hwinit(port); + if (ret) + goto out; + + ret = phytium_uart_allocate_irq(pup); + if (ret) + goto out; + + spin_lock_irq(&pup->port.lock); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + PHYT_MSG_INSERT(pup, &msg); + + spin_unlock_irq(&pup->port.lock); + + msg_fill(&msg, UART_MODULE_ID, MSG_GET, MSG_GET_MODEM, 0); + + old_tx_tail = phytium_uart_read(pup, REG_TX_TAIL) & BUFFER_POINTER_MASK; + + PHYT_MSG_INSERT(pup, &msg); + + handler_msg = (struct msg *) + (pup->shmem_base + TX_MSG_SIZE * old_tx_tail); + + while (!tx_ring_buffer_is_empty(pup)) + cpu_relax(); + + pup->old_status = handler_msg->data[0]; + + phytium_enable_interrupts(pup); + + return 0; +out: + return ret; +} + +static void phytium_disable_uart(struct phytium_uart_port *pup) +{ + + struct msg msg; + cmd_subid_type cmd_subid; + + enum phytuart_set_subid cmd = PHYTUART_MSG_CMD_SET_DISABLE_UART; + + cmd_subid = getHexValue(cmd); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid, 0); + + /* set none flow control status */ + pup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + spin_lock_irq(&pup->port.lock); + + PHYT_MSG_INSERT(pup, &msg); + + spin_unlock_irq(&pup->port.lock); + +} + +static void phytium_disable_interrupts(struct phytium_uart_port *pup) +{ + struct msg msg; + + /* reset uart */ + msg_fill(&msg, UART_MODULE_ID, MSG_DEFAULT, MSG_DEFAULT_SUBID, 0); + + spin_lock_irq(&pup->port.lock); + + PHYT_MSG_INSERT(pup, &msg); + + /* clear all RP INT STATUS */ + phytium_uart_write(0, pup, REG_RP_INT_STATE); + phytium_uart_write(0xffff, pup, REG_RP_INT_STATE_CLR); + spin_unlock_irq(&pup->port.lock); +} + +static void phytium_shutdown(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + phytium_disable_interrupts(pup); + + free_irq(pup->port.irq, pup); + + phytium_disable_uart(pup); + + /* Shut down the clock producer */ + clk_disable_unprepare(pup->clk); + + /* Optionally let pins go into sleep states */ + pinctrl_pm_select_sleep_state(port->dev); + +} + +static void +phytium_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ + port->read_status_mask = DATA_OE | CHAR_MASK; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= DATA_FE | DATA_PE; + + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= DATA_BE; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= DATA_FE | DATA_PE; + + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= DATA_BE; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= DATA_OE; + } + + /* + * Ignore all characters if CREAD is not set. + */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= DATA_DUMMY_RX; +} + +static void +phytium_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + struct msg msg; + u64 uart_clk; + unsigned long flags; + unsigned int baud, quot; + u8 databits = 0; + u8 stopbits = 0; + u8 parity_en = 0; + u8 parodd = 1; + u8 cmspar = 0; + u8 crtscts = 0; + cmd_subid_type cmd_subid1, cmd_subid2; + + enum phytuart_set_subid cmd1 = PHYTUART_MSG_CMD_SET_BAUD; + enum phytuart_set_subid cmd2 = PHYTUART_MSG_CMD_SET_TERMIOS; + + cmd_subid1 = getHexValue(cmd1); + cmd_subid2 = getHexValue(cmd2); + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid1, 0); + + spin_lock_irqsave(&port->lock, flags); + /* Ask the core to calculate the divisor for us. */ + if (has_acpi_companion(pup->port.dev)) { + device_property_read_u64(pup->port.dev, "clock-frequency", + &uart_clk); + if (uart_clk) { + if (uart_clk > 100000000 || ('n' == uart_clk)) { + dev_err(pup->port.dev, "uartclk get from acpi is error or NULL!\n"); + port->uartclk = DEFAULT_CLK; + } else + port->uartclk = (uint32_t)uart_clk; + } else { + dev_err(pup->port.dev, "have no acpi clk, use default clk!\n"); + port->uartclk = DEFAULT_CLK; + } + } + baud = uart_get_baud_rate(port, termios, old, 0, + port->uartclk/16); + dev_info(pup->port.dev, "get baud to set:%d\n", baud); + if (baud > port->uartclk/16) + quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); + else + quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); + + /* set baud */ + msg.data[7] = (port->uartclk >> 24) & MSG_DATA_MASK; + msg.data[6] = (port->uartclk >> 16) & MSG_DATA_MASK; + msg.data[5] = (port->uartclk >> 8) & MSG_DATA_MASK; + msg.data[4] = (port->uartclk) & MSG_DATA_MASK; + msg.data[3] = (baud >> 24) & MSG_DATA_MASK; + msg.data[2] = (baud >> 16) & MSG_DATA_MASK; + msg.data[1] = (baud >> 8) & MSG_DATA_MASK; + msg.data[0] = (baud) & MSG_DATA_MASK; + + PHYT_MSG_INSERT(pup, &msg); + + switch (termios->c_cflag & CSIZE) { + case CS5: + databits = 0; + break; + case CS6: + databits = 1; + break; + case CS7: + databits = 2; + break; + default: /* CS8 */ + databits = 3; + break; + } + + if (termios->c_cflag & CSTOPB) + stopbits = 1; + if (termios->c_cflag & PARENB) { + parity_en = 1; + if (!(termios->c_cflag & PARODD)) + parodd = 0; + if (termios->c_cflag & CMSPAR) + cmspar = 1; + + + } + + /* + * Update the per-port timeout. + */ + + uart_update_timeout(port, termios->c_cflag, baud); + + phytium_setup_status_masks(port, termios); + + if (UART_ENABLE_MS(port, termios->c_cflag)) + phytium_enable_ms(port); + + if (termios->c_cflag & CRTSCTS) { + crtscts = 1; + port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + } else { + crtscts = 0; + port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + + + msg_fill(&msg, UART_MODULE_ID, MSG_SET, cmd_subid2, 0); + + msg.data[0] = databits & MSG_DATA_MASK; + msg.data[1] = stopbits & MSG_DATA_MASK; + msg.data[2] = parity_en & MSG_DATA_MASK; + msg.data[3] = parodd & MSG_DATA_MASK; + msg.data[4] = cmspar & MSG_DATA_MASK; + msg.data[5] = crtscts & MSG_DATA_MASK; + + PHYT_MSG_INSERT(pup, &msg); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *phytium_type(struct uart_port *port) +{ + struct phytium_uart_port *pup = + container_of(port, struct phytium_uart_port, port); + + return pup->port.type == PORT_PHYTIUM ? pup->type : NULL; +} + +static void phytium_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_PHYTIUM; +} + +static int phytium_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_PHYTIUM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= nr_irqs) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + if (port->mapbase != (unsigned long) ser->iomem_base) + ret = -EINVAL; + return ret; +} + +static const struct uart_ops phytium_uart_ops = { + .tx_empty = phytium_tx_empty, + .set_mctrl = phytium_set_mctrl, + .get_mctrl = phytium_get_mctrl, + .stop_tx = phytium_stop_tx, + .start_tx = phytium_start_tx, + .stop_rx = phytium_stop_rx, + .throttle = phytium_throttle_rx, + .unthrottle = phytium_unthrottle_rx, + .enable_ms = phytium_enable_ms, + .break_ctl = phytium_break_ctl, + .startup = phytium_startup, + .shutdown = phytium_shutdown, + .set_termios = phytium_set_termios, + .type = phytium_type, + .config_port = phytium_config_port, + .verify_port = phytium_verify_port, +}; + +static struct phytium_uart_port *uart_ports[UART_NR]; + +static struct uart_driver phytium_uart = { + .owner = THIS_MODULE, + .driver_name = DRV_NAME, + .dev_name = "ttyVS", + .nr = UART_NR, +}; + +static void phytium_unregister_port(struct phytium_uart_port *pup) +{ + int i; + bool busy = false; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) { + if (uart_ports[i] == pup) + uart_ports[i] = NULL; + else if (uart_ports[i]) + busy = true; + } + + if (!busy) + uart_unregister_driver(&phytium_uart); +} + +static int phytium_find_free_port(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) + if (uart_ports[i] == NULL) + return i; + + return -EBUSY; +} + +static int phytium_register_port(struct phytium_uart_port *pup) +{ + struct msg msg; + int rc, i; + + /* mask TX_HEAD_INT at the beginning, because now we are + * unable to deal with it yet + */ + phytium_uart_write(TX_HEAD_INT, pup, REG_RP_INT_MASK); + + msg_fill(&msg, UART_MODULE_ID, MSG_DEFAULT, MSG_DEFAULT, 0); + PHYT_MSG_INSERT(pup, &msg); + + /* clear all redundant RP INT STATUS at the beginning */ + phytium_uart_write(0, pup, REG_RP_INT_STATE); + phytium_uart_write(0xffff, pup, REG_RP_INT_STATE_CLR); + if (!phytium_uart.state) { + rc = uart_register_driver(&phytium_uart); + if (rc < 0) { + dev_err(pup->port.dev, + "Failed to register Phytium platform UART driver\n"); + for (i = 0; i < ARRAY_SIZE(uart_ports); i++) + if (uart_ports[i] == pup) + uart_ports[i] = NULL; + return rc; + } + } + + rc = uart_add_one_port(&phytium_uart, &pup->port); + if (rc) + phytium_unregister_port(pup); + + return rc; +} + +#if defined(CONFIG_SERIAL_PHYTIUM_V2_DEBUG) +static int phytium_uart_enable_debug(struct phytium_uart_port *pup, + bool enable) +{ + u32 dbg_regval; + + if (pup->debug_enable == enable) { + pr_warn("PHYUART:set enable debug with repeative operation.\n"); + return -EINVAL; + } + pup->debug_enable = enable; + dbg_regval = phytium_uart_read(pup, PHYUART_DBG_REG); + if (!enable && (dbg_regval & PHYUART_DBG_ENABLE_MASK)) + dbg_regval &= ~PHYUART_DBG_ENABLE_MASK; + else if (enable && !(dbg_regval & PHYUART_DBG_ENABLE_MASK)) + dbg_regval |= PHYUART_DBG_ENABLE_MASK; + + phytium_uart_write(dbg_regval, pup, PHYUART_DBG_REG); + return 0; +} + +static int phytium_uart_enable_heartbeat(struct phytium_uart_port *pup, + bool enable) +{ + u32 dbg_regval; + + if (pup->heartbeat_enable == enable) { + pr_warn("PHYUART:set heartbeat with repeative operation.\n"); + return -EINVAL; + } + pup->heartbeat_enable = enable; + dbg_regval = phytium_uart_read(pup, PHYUART_DBG_REG); + if (!enable && (dbg_regval & PHYUART_DBG_HEARTBEAT_MASK)) { + dbg_regval &= ~PHYUART_DBG_HEARTBEAT_MASK; + phytium_uart_write(dbg_regval, pup, PHYUART_DBG_REG); + del_timer(&pup->alive_timer); + } else if (enable && !(dbg_regval & PHYUART_DBG_HEARTBEAT_MASK)) { + dbg_regval |= PHYUART_DBG_HEARTBEAT_MASK + | PHYUART_DBG_HEARTBEAT_ENABLE_MASK; + phytium_uart_write(dbg_regval, pup, PHYUART_DBG_REG); + add_timer(&pup->alive_timer); + } + + return 0; +} + +static void alive_timer_routine(struct timer_list *tlist) +{ + struct phytium_uart_port *pup; + u32 dbg_regval; + + pup = from_timer(pup, tlist, alive_timer); + if (!pup) { + pr_err("PHYUART: get uart port error.\n"); + return; + } + + dbg_regval = phytium_uart_read(pup, PHYUART_DBG_REG); + phytium_uart_write(dbg_regval | PHYUART_DBG_HEARTBEAT_MASK, + pup, PHYUART_DBG_REG); + mod_timer(&pup->alive_timer, jiffies + msecs_to_jiffies(5000)); +} + +static ssize_t debug_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct phytium_uart_port *pup = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", pup->debug_enable); +} + +static ssize_t debug_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct phytium_uart_port *pup = dev_get_drvdata(dev); + int enable; + int ret; + + ret = sscanf(buf, "%d\n", &enable); + if (ret == 0) { + ret = -EINVAL; + return ret; + } + phytium_uart_enable_debug(pup, enable); + return count; +} + +static ssize_t heartbeat_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct phytium_uart_port *pup = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", pup->heartbeat_enable); +} + +static ssize_t heartbeat_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct phytium_uart_port *pup = dev_get_drvdata(dev); + int heartbeat_enable; + int ret; + + ret = sscanf(buf, "%d\n", &heartbeat_enable); + if (ret == 0) { + ret = -EINVAL; + return ret; + } + phytium_uart_enable_heartbeat(pup, heartbeat_enable); + return count; +} +static DEVICE_ATTR_RW(debug_enable); +static DEVICE_ATTR_RW(heartbeat_enable); +#endif +static int phytium_uart_probe(struct platform_device *pdev) +{ + struct phytium_uart_port *pup; + struct resource *res_rf, *res_sm; + int portnr, ret; + u64 uart_clk; + + portnr = phytium_find_free_port(); + if (portnr < 0) + return portnr; + + pup = devm_kzalloc(&pdev->dev, sizeof(struct phytium_uart_port), + GFP_KERNEL); + if (!pup) + return -ENOMEM; + + if (pdev->dev.of_node) { + pup->clk = devm_clk_get(&pdev->dev, "uartclk"); + if (IS_ERR(pup->clk)) { + dev_err(&pdev->dev, "Device clock not found.\n"); + ret = PTR_ERR(pup->clk); + goto free; + } + } + + res_rf = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_rf) { + dev_err(&pdev->dev, "UNABLE TO GET REGFILE RESOURCE!\n"); + goto free; + } + + res_sm = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res_sm) { + dev_err(&pdev->dev, "UNABLE TO GET SHMEM RESOURCE!\n"); + goto free; + } + + pup->shmem_base = devm_ioremap_resource(&pdev->dev, res_sm); + + pup->dev = &pdev->dev; + pup->port.irq = platform_get_irq(pdev, 0); + + ret = clk_prepare_enable(pup->clk); + if (ret) { + dev_err(&pdev->dev, "PHYUART:clk_prepare_enable error.\n"); + goto free; + } + + pup->port.dev = &pdev->dev; + if (has_acpi_companion(pup->port.dev)) { + device_property_read_u64(pup->port.dev, "clock-frequency", + &uart_clk); + if (uart_clk) { + if (uart_clk > 100000000 || ('n' == uart_clk)) { + dev_err(pup->port.dev, "uartclk get from acpi is error or NULL!\n"); + pup->port.uartclk = DEFAULT_CLK; + } else + pup->port.uartclk = (uint32_t)uart_clk; + } else { + dev_err(pup->port.dev, "have no acpi clk, use default clk!\n"); + pup->port.uartclk = DEFAULT_CLK; + } + } else + pup->port.uartclk = clk_get_rate(pup->clk); + pup->port.mapbase = res_rf->start; + pup->port.membase = devm_ioremap_resource(&pdev->dev, res_rf); + pup->port.iotype = UPIO_MEM32; + pup->port.ops = &phytium_uart_ops; + pup->port.dev = &pdev->dev; + pup->port.fifosize = UART_FIFOSIZE; + pup->port.flags = UPF_BOOT_AUTOCONF; + pup->port.line = portnr; + uart_ports[portnr] = pup; + pup->old_cr = 0; + pup->m_buf_empty = true; + snprintf(pup->type, sizeof(pup->type), "phytium,uart-v2"); +#if defined(CONFIG_SERIAL_PHYTIUM_V2_DEBUG) + pup->debug_enable = false; + pup->heartbeat_enable = false; + + phytium_uart_enable_heartbeat(pup, true); + phytium_uart_enable_debug(pup, true); + + pup->alive_timer.expires = jiffies + msecs_to_jiffies(5000); + timer_setup(&pup->alive_timer, alive_timer_routine, 0); + add_timer(&pup->alive_timer); + ret = device_create_file(&pdev->dev, + &dev_attr_debug_enable); + if (ret < 0) { + dev_err(&pdev->dev, "PHYUART: device_create_debug file error.\n"); + goto debug_enable_free; + } + ret = device_create_file(&pdev->dev, + &dev_attr_heartbeat_enable); + if (ret < 0) { + dev_err(&pdev->dev, "PHYUART: device_create_heartbeat file error.\n"); + goto heartbeat_enable_free; + } +#endif + platform_set_drvdata(pdev, pup); + return phytium_register_port(pup); + +#if defined(CONFIG_SERIAL_PHYTIUM_V2_DEBUG) +heartbeat_enable_free: + device_remove_file(pup->dev, &dev_attr_heartbeat_enable); +debug_enable_free: + device_remove_file(pup->dev, &dev_attr_debug_enable); +#endif +free: + kfree(pup); + return -1; +} + +static int phytium_uart_remove(struct platform_device *pdev) +{ + struct phytium_uart_port *pup = platform_get_drvdata(pdev); + + uart_remove_one_port(&phytium_uart, &pup->port); + + phytium_unregister_port(pup); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_uart_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_uart_port *pup = platform_get_drvdata(pdev); + + if (pup) + uart_suspend_port(&phytium_uart, &pup->port); + + return 0; +} + +static int phytium_uart_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct phytium_uart_port *pup = platform_get_drvdata(pdev); + + if (pup) + uart_resume_port(&phytium_uart, &pup->port); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(phytium_dev_pm_ops, + phytium_uart_suspend, phytium_uart_resume); + +/* Match table for OF platform binding */ +static const struct of_device_id phytium_uart_of_ids[] = { + { .compatible = "phytium,uart-2.0", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, phytium_uart_of_ids); + +static const struct acpi_device_id __maybe_unused phytium_uart_acpi_match[] = { + { "PHYT0055", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phytium_uart_acpi_match); + +static struct platform_driver phytium_uart_driver = { + .probe = phytium_uart_probe, + .remove = phytium_uart_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = phytium_uart_of_ids, + .acpi_match_table = ACPI_PTR(phytium_uart_acpi_match), + .pm = &phytium_dev_pm_ops, + }, +}; + +static int __init phytium_uart_init(void) +{ + pr_info("Serial: Phytium uart v2 driver\n"); + return platform_driver_register(&phytium_uart_driver); +} + +static void __exit phytium_uart_exit(void) +{ + platform_driver_unregister(&phytium_uart_driver); +} +arch_initcall(phytium_uart_init); +module_exit(phytium_uart_exit); + +MODULE_AUTHOR("Lan Hengyu "); +MODULE_DESCRIPTION("Phytium serial port driver for uart v2"); +MODULE_VERSION(PHYT_UART_DRV_VER); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/phytium-uart-v2.h b/drivers/tty/serial/phytium-uart-v2.h new file mode 100644 index 0000000000000000000000000000000000000000..cb2459fca9464ba592224fde41d9ec2a3e567c01 --- /dev/null +++ b/drivers/tty/serial/phytium-uart-v2.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2020-2025, Phytium Technology, Co., Ltd. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "ttyVS" +#define MSG_COMPLETE 0x1 +#define UART_NR 14 + +#define TX_DATA_MAXINUM 120 +#define RX_DATA_MAXINUM ((TX_DATA_MAXINUM / 2) + 1) + +#define DATA_OE (1 << 11) +#define DATA_BE (1 << 10) +#define DATA_PE (1 << 9) +#define DATA_FE (1 << 8) +#define DATA_ERROR (DATA_OE|DATA_BE|DATA_PE|DATA_FE) +#define DATA_DUMMY_RX (1 << 16) + +#define REG_TX_HEAD 0x0 /* tx_ring_buffer head pointer reg */ +#define REG_TX_TAIL 0x4 /* tx_ring_buffer tail pointer reg */ +#define REG_RX_HEAD 0x8 /* rx_ring_buffer tail pointer reg */ +#define REG_RX_TAIL 0xc /* rx_ring_buffer tail pointer reg */ +#define REG_PHYT_INT_STATE 0x24 +#define REG_RP_INT_STATE 0x2c +#define REG_RP_INT_STATE_CLR 0x74 +#define REG_PHYT_INT_MASK 0x20 +#define REG_RP_INT_MASK 0x28 +#define REG_FAKE_DR 0x30 +#define REG_FAKE_FR 0x34 +#define REG_CHECK_TX 0x4c + +#define MODEM_CTS 0x1 +#define MODEM_DSR 0x2 +#define MODEM_DCD 0x004 +#define MODEM_CAR MODEM_DCD +#define MODEM_RNG 0x8 +#define MODEM_RTS 0x10 +#define MODEM_DTR 0x20 +#define MODEM_OUT1 0x40 +#define MODEM_OUT2 0x80 + +#define TX_HEAD_INT 0x1 +#define RX_TAIL_INT 0x2 +#define PHYT_INT_TRIGGER_BIT 0x10 +#define PHYT_MSG_DATA_COMPLETED 0x10 +#define MODEM_INT 0x20 + +#define UART_MODULE_ID 0x1 +#define MSG_DEFAULT 0x0 +#define MSG_DEFAULT_SUBID 0x10 +#define MSG_SET 0x1 +#define MSG_GET 0x2 +#define MSG_DATA 0x3 +#define MSG_GET_MODEM 0x3 +#define MSG_GET_RX_EMPTY 0x4 +#define MSG_TX_DATA 0x0 +#define RX_TAIL_INT_ENABLE 0x10000 +#define MSG_DATA_MASK 0xff +#define BUFFER_POINTER_MASK 0xffff +#define CHAR_MASK 255 + +#define TX_BUFFER_SIZE 8 +#define RX_BUFFER_SIZE 8 +#define BUFFER_SIZE 20 +#define UART_FIFOSIZE 64 +#define TX_MSG_SIZE 0x80 +#define RX_MSG_SIZE 0x80 +#define RX_CHARS_MAX 28 + +/* uart debug mechanism */ +#define PHYUART_DBG_REG 0x58 +/* uart debug register mask bit */ +#define PHYUART_DBG_ENABLE_MASK 0x1 +#define PHYUART_DBG_HEARTBEAT_ENABLE_MASK (0x1 << 1) +#define PHYUART_DBG_HEARTBEAT_MASK (0x1 << 2) +#define PHYUART_DBG_LOG_EXIST_MASK (0x1 << 3) +#define PHYUART_DBG_SIZE_MASK (0xf << 4) +#define PHYUART_DBG_ADDR_MASK (0x3fff << 8) + +/* enum all type-set subid */ +enum phytuart_set_subid { + /* enable/disable */ + PHYTUART_MSG_CMD_SET_DEVICE_EN = 0x0, + /* set baud */ + PHYTUART_MSG_CMD_SET_BAUD, + /* set trans bit width 0: 5,1: 6,2: 7,3:8 */ + PHYTUART_MSG_CMD_SET_DATABIT, + /* set stop bit,0:1 bit, 1: 2bit */ + PHYTUART_MSG_CMD_SET_STOPBIT, + /* set parity enable, 0:DISABLE, 1:ENABLE */ + PHYTUART_MSG_CMD_SET_PARITY_EN, + /* set parity bit, 0:ODD, 1: EVEN */ + PHYTUART_MSG_CMD_SET_PARITY_EVEN_SET, + /* set 0/1 parity enable */ + PHYTUART_MSG_CMD_SET_PARITY_STICK_SET, + /* set break signal,0:no break 1:send break */ + PHYTUART_MSG_CMD_SET_BREAK_EN, + /* set RX en, 0:disable, single byte, 1:enable,*/ + PHYTUART_MSG_CMD_SET_RX_BUFFER_EN, + /* set TX en, 0:disable,single byte, 1:enable */ + PHYTUART_MSG_CMD_SET_TX_BUFFER_EN, + /* set TX enable, 0:disable, 1:enable */ + PHYTUART_MSG_CMD_SET_TX_EN, + /* set RX enable, 0:disable, 1:enable */ + PHYTUART_MSG_CMD_SET_RX_EN, + /* set loop enable, 0:disable, 1:enable */ + PHYTUART_MSG_CMD_SET_LOOP_EN, + PHYTUART_MSG_CMD_SET_RTS_EN, + PHYTUART_MSG_CMD_SET_CTS_EN, + PHYTUART_MSG_CMD_SET_DTR_SET, + PHYTUART_MSG_CMD_SET_RTS_SET, + PHYTUART_MSG_CMD_SET_DTE_DCD_SET, + PHYTUART_MSG_CMD_SET_DTE_RI_SET, + PHYTUART_MSG_CMD_SET_RX_IM, + PHYTUART_MSG_CMD_SET_TX_IM, + PHYTUART_MSG_CMD_SET_ERROR_IM, + + PHYTUART_MSG_CMD_SET_MODEM_IM, + PHYTUART_MSG_CMD_SET_STARTUP, + PHYTUART_MSG_CMD_SET_HWINIT, + PHYTUART_MSG_CMD_SET_MCTRL, + PHYTUART_MSG_CMD_SET_TERMIOS, + PHYTUART_MSG_CMD_SET_DISABLE_UART, +}; + +/* for trans subid to hex */ +uint8_t getHexValue(enum phytuart_set_subid cmd) +{ + return (uint8_t)cmd; +} + diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 7eaccbc6a5b13e5d6bcbe90ba102c170b0d9c459..bb019c8b2304268d82fe3b9e7fa9bdc6fea64e99 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -131,6 +131,8 @@ source "drivers/usb/isp1760/Kconfig" source "drivers/usb/phytium/Kconfig" +source "drivers/usb/phytium_usb_v2/Kconfig" + comment "USB port drivers" if USB diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index d28331dde09a83ed9add05fe8145f8fb7b05b0ba..1a879f1701eaa9325c4dc05151e78a1fc3345dfc 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_USB_CDNS3) += cdns3/ obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/ obj-$(CONFIG_USB_PHYTIUM) += phytium/ +obj-$(CONFIG_USB_PHYTIUM_V2) += phytium_usb_v2/ obj-$(CONFIG_USB_FOTG210) += fotg210/ diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index db2bddcfda719916fae39dafe111b2e424e67180..d7f8986bfd2cce60370b8ab95a90beb90e5ff909 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2368,7 +2368,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) + if (xhci->quirks & XHCI_TRB_OVERFETCH) + /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); else diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7920b1d50c4772beb8857c440351630900f51a72..62853912bf52c718bf865acd57c606b4781c7d5d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -38,6 +38,8 @@ #define PCI_DEVICE_ID_EJ168 0x7023 #define PCI_DEVICE_ID_EJ188 0x7052 +#define PCI_DEVICE_ID_VIA_VL805 0x3483 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 @@ -498,8 +500,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) { xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_TRB_OVERFETCH; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { @@ -548,11 +552,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == 0x9202) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->device == 0x9203) - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->vendor == PCI_DEVICE_ID_CADENCE && diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 7aa8dc4ca3a101fe69e6cc4ee276a7f31b0d9436..20254651523b0914317a8133d022a88260028a7b 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -100,6 +100,10 @@ static int xhci_plat_start(struct usb_hcd *hcd) return xhci_run(hcd); } +static const struct xhci_plat_priv xhci_plat_phytium_pe220x = { + .quirks = XHCI_RESET_ON_RESUME | XHCI_S1_SUSPEND_WAKEUP, +}; + #ifdef CONFIG_OF static const struct xhci_plat_priv xhci_plat_marvell_armada = { .init_quirk = xhci_mvebu_mbus_init_quirk, @@ -113,10 +117,6 @@ static const struct xhci_plat_priv xhci_plat_brcm = { .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS, }; -static const struct xhci_plat_priv xhci_plat_phytium_pe220x = { - .quirks = XHCI_RESET_ON_RESUME | XHCI_S1_SUSPEND_WAKEUP, -}; - static const struct of_device_id usb_xhci_of_match[] = { { .compatible = "generic-xhci", diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index e80d040cd677051dde65a955e3531c87266cfa5b..3376f1f38dcc914358393b447c47b695787e264c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1658,7 +1658,7 @@ struct xhci_hcd { #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) -#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) +#define XHCI_TRB_OVERFETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) diff --git a/drivers/usb/phytium/dma.c b/drivers/usb/phytium/dma.c index 3048a89de81e796021135f4e77bb5036795e6899..ae734df3e488833dfc72ee66ba88590f9f11dee6 100644 --- a/drivers/usb/phytium/dma.c +++ b/drivers/usb/phytium/dma.c @@ -65,6 +65,8 @@ static int32_t phytium_dma_probe(struct DMA_CFG *config, struct DMA_SYSREQ *sysR static int32_t phytium_dma_init(struct DMA_CONTROLLER *priv, const struct DMA_CFG *config, struct DMA_CALLBACKS *callbacks) { + int i = 0; + if (!priv || !config || !callbacks) return -EINVAL; @@ -74,6 +76,11 @@ static int32_t phytium_dma_init(struct DMA_CONTROLLER *priv, memset((void *)priv, 0, sizeof(struct DMA_CONTROLLER)); memset((void *)config->trbAddr, 0, TRB_POOL_SIZE); + for (i = 0; i < MAX_DMA_CHANNELS; i++) { + INIT_LIST_HEAD(&priv->rx[i].trbChainDescList); + INIT_LIST_HEAD(&priv->tx[i].trbChainDescList); + } + priv->trbDMAPoolAddr = config->trbDmaAddr; priv->trbPoolAddr = config->trbAddr; priv->regs = (struct DMARegs *)config->regBase; @@ -252,6 +259,10 @@ static void phytium_dma_isr(struct DMA_CONTROLLER *priv) & DMARF_EP_IOC) || (channel->dmultGuard & DMARF_EP_ISP)) { retransmit: phytium_write32(&priv->regs->ep_sts, DMARF_EP_IOC | DMARF_EP_ISP); + + if (list_empty(&channel->trbChainDescList)) + break; + trbChainDesc = GetTrbChainDescEntry(channel->trbChainDescList.next); if (!(ep_sts & DMARF_EP_TRBERR) && channel->dmultEnabled && !trbChainDesc->lastTrbIsLink) { @@ -352,7 +363,6 @@ static void *phytium_dma_channelAlloc(struct DMA_CONTROLLER *priv, if (isIsoc) channel->isIsoc = 1; - INIT_LIST_HEAD(&channel->trbChainDescList); channel->numOfTrbChain = 0; channel->controller = priv; channel->dmultGuard = 0; diff --git a/drivers/usb/phytium_usb_v2/Kconfig b/drivers/usb/phytium_usb_v2/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..0bc39dc37219ce8d650e5d9e888f297482ecd799 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 + +config USB_PHYTIUM_PCI_V2 + tristate "PHYTIUM USB V2 Support for PCI" + depends on USB && USB_GADGET && USB_ROLE_SWITCH && ARCH_PHYTIUM + help + Say Y or M here if your system has a OTG USB Controller based on PHYTIUM SOC. + + If you choose to build this driver is a dynamically linked modules, the module will + be called phytium-usb-v2-pci.ko + +config USB_PHYTIUM_V2 + tristate "PHYTIUM USB V2 Support for Platform" + depends on USB && USB_GADGET && USB_ROLE_SWITCH && ARCH_PHYTIUM + help + Say Y or M here if your system has a OTG USB Controller based on PHYTIUM SOC. + + If you choose to build this driver is a dynamically linked modules, the module will + be called phytium-usb-v2-platform.ko diff --git a/drivers/usb/phytium_usb_v2/Makefile b/drivers/usb/phytium_usb_v2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0c65b85df595fcb624c84ddc3d46bb5b06b3facc --- /dev/null +++ b/drivers/usb/phytium_usb_v2/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_USB_PHYTIUM_PCI_V2) += phytium-usb-pci-v2.o + +obj-$(CONFIG_USB_PHYTIUM_V2) += phytium-usb-platform-v2.o + +phytium-usb-pci-v2-y := core.o pci.o host.o gadget.o otg.o mem.o ring.o + +phytium-usb-platform-v2-y := core.o platform.o host.o gadget.o otg.o mem.o ring.o diff --git a/drivers/usb/phytium_usb_v2/core.c b/drivers/usb/phytium_usb_v2/core.c new file mode 100644 index 0000000000000000000000000000000000000000..00f6b9f159d3cb98f0594f0af14c9317af9a153e --- /dev/null +++ b/drivers/usb/phytium_usb_v2/core.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include +#include + +#include "core.h" +#include "otg.h" +#include "host.h" +#include "gadget.h" + +static enum usb_role hw_role_state_machine(struct phytium_usb *phytium_usb) +{ + enum usb_role role = USB_ROLE_NONE; + int id, vbus; + + if (phytium_usb->dr_mode != USB_DR_MODE_OTG) { + if (phytium_usb_otg_is_host(phytium_usb)) + role = USB_ROLE_HOST; + + if (phytium_usb_otg_is_device(phytium_usb)) + role = USB_ROLE_DEVICE; + + return role; + } + + id = phytium_usb_otg_get_id(phytium_usb); + vbus = phytium_usb_otg_get_vbus(phytium_usb); + + role = phytium_usb->role; + + switch (role) { + case USB_ROLE_NONE: + if (!id) + role = USB_ROLE_HOST; + else if (vbus) + role = USB_ROLE_DEVICE; + break; + case USB_ROLE_HOST: + if (id) + role = USB_ROLE_NONE; + break; + case USB_ROLE_DEVICE: + if (!vbus) + role = USB_ROLE_NONE; + break; + } + + dev_info(phytium_usb->dev, "role %d -> %d\n", phytium_usb->role, role); + + return role; +} + +int phytium_usb_role_start(struct phytium_usb *phytium_usb, enum usb_role role) +{ + int ret; + + mutex_lock(&phytium_usb->mutex); + phytium_usb->role = role; + mutex_unlock(&phytium_usb->mutex); + + if (!phytium_usb->roles[role]) + return -ENXIO; + + if (phytium_usb->roles[role]->state == ROLE_STATE_ACTIVE) + return 0; + + mutex_lock(&phytium_usb->mutex); + ret = phytium_usb->roles[role]->start((void *)phytium_usb); + if (!ret) + phytium_usb->roles[role]->state = ROLE_STATE_ACTIVE; + mutex_unlock(&phytium_usb->mutex); + + return ret; +} + +void phytium_usb_role_stop(struct phytium_usb *phytium_usb) +{ + enum usb_role role; + + if (phytium_usb) { + role = phytium_usb->role; + + if (phytium_usb->roles[role]->state == ROLE_STATE_INACTIVE) + return; + + mutex_lock(&phytium_usb->mutex); + phytium_usb->roles[role]->stop((void *)phytium_usb); + phytium_usb->roles[role]->state = ROLE_STATE_INACTIVE; + mutex_unlock(&phytium_usb->mutex); + } +} + +static enum usb_role role_get(struct usb_role_switch *sw) +{ + struct phytium_usb *phytium_usb = usb_role_switch_get_drvdata(sw); + + return phytium_usb->role; +} + +static int role_set(struct usb_role_switch *sw, enum usb_role role) +{ + struct phytium_usb *phytium_usb = usb_role_switch_get_drvdata(sw); + int ret = 0; + + if (phytium_usb->role == role) + return ret; + + if (phytium_usb->dr_mode == USB_DR_MODE_HOST) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_HOST: + break; + default: + return ret; + } + } + + if (phytium_usb->dr_mode == USB_DR_MODE_PERIPHERAL) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_DEVICE: + break; + default: + return ret; + } + } + + phytium_usb_role_stop(phytium_usb); + ret = phytium_usb_role_start(phytium_usb, role); + if (ret) + dev_err(phytium_usb->dev, "set role %d has failed\n", role); + + return ret; +} + +int phytium_usb_hw_role_switch(struct phytium_usb *phytium_usb) +{ + enum usb_role real_role, current_role; + int ret = 0; + + if (phytium_usb) { + if (phytium_usb->role_sw) + return ret; + + current_role = phytium_usb->role; + real_role = hw_role_state_machine(phytium_usb); + + if (current_role == real_role) + goto exit; + + if (current_role != 0 && real_role == 0) + goto exit; + + phytium_usb_role_stop(phytium_usb); + + dev_info(phytium_usb->dev, "Switching role %d -> %d\n", current_role, real_role); + + ret = phytium_usb_role_start(phytium_usb, real_role); + if (ret) { + dev_err(phytium_usb->dev, "set %d has failed, back to %d\n", + real_role, current_role); + ret = phytium_usb_role_start(phytium_usb, current_role); + if (ret) + dev_err(phytium_usb->dev, "Back to %d failed too\n", current_role); + } + } + +exit: + return ret; +} + + +static int phytium_usb_set_mode(struct phytium_usb *phytium_usb) +{ + if (phytium_usb) { + if (phytium_usb->dr_mode == USB_DR_MODE_OTG) { + phytium_usb_otg_disable_irq((void *)phytium_usb); + phytium_usb_otg_clear_irq((void *)phytium_usb); + } + } + + return 0; +} + +static int phytium_usb_start_mode(struct phytium_usb *phytium_usb) +{ + int ret = 0; + + if (phytium_usb) { + switch (phytium_usb->dr_mode) { + case USB_DR_MODE_HOST: + ret = phytium_usb_role_start(phytium_usb, USB_ROLE_HOST); + break; + case USB_DR_MODE_PERIPHERAL: + ret = phytium_usb_role_start(phytium_usb, USB_ROLE_DEVICE); + break; + case USB_DR_MODE_OTG: + ret = phytium_usb_hw_role_switch(phytium_usb); + break; + default: + dev_warn(phytium_usb->dev, "unknown dr_mode:%d\n", phytium_usb->dr_mode); + ret = -EINVAL; + goto err; + } + } + + return ret; +err: + phytium_usb_role_stop(phytium_usb); + phytium_usb_otg_disable_irq((void *)phytium_usb); + + return ret; +} + +static int idle_role_start(void *data) +{ + return 0; +} + +static void idle_role_stop(void *data) +{ +} + +static int phytium_usb_idle_init(struct phytium_usb *phytium_usb) +{ + struct phytium_usb_role_driver *rdrv; + + rdrv = devm_kzalloc(phytium_usb->dev, sizeof(*rdrv), GFP_KERNEL); + if (!rdrv) + return -ENOMEM; + + rdrv->start = idle_role_start; + rdrv->stop = idle_role_stop; + rdrv->state = ROLE_STATE_INACTIVE; + rdrv->suspend = NULL; + rdrv->resume = NULL; + rdrv->name = "idle"; + + phytium_usb->roles[USB_ROLE_NONE] = rdrv; + + return 0; +} + +static int phytium_usb_init_role(struct phytium_usb *phytium_usb) +{ + int ret = 0; + enum usb_dr_mode dr_mode; + + phytium_usb->role = USB_ROLE_NONE; + + ret = phytium_usb_idle_init(phytium_usb); + if (ret) + return ret; + + dr_mode = usb_get_dr_mode(phytium_usb->dev); + if (dr_mode == USB_DR_MODE_UNKNOWN) + dr_mode = USB_DR_MODE_OTG; + + phytium_usb_host_init((void *)phytium_usb); + + phytium_usb_gadget_init((void *)phytium_usb); + + + phytium_usb_set_mode(phytium_usb); + + phytium_usb->dr_mode = dr_mode; + + ret = phytium_usb_start_mode(phytium_usb); + + return ret; +} + +int phytium_usb_init(struct phytium_usb *phytium_usb) +{ + int ret; + + if (!phytium_usb) + return 0; + + mutex_init(&phytium_usb->mutex); + + ret = dma_set_mask_and_coherent(phytium_usb->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(phytium_usb->dev, "set dma mask failed with:%d\n", ret); + return ret; + } + + if (device_property_read_bool(phytium_usb->dev, "usb-role-switch")) { + struct usb_role_switch_desc sw_desc = { }; + + sw_desc.get = role_get; + sw_desc.set = role_set; + sw_desc.allow_userspace_control = true; + sw_desc.driver_data = phytium_usb; + sw_desc.fwnode = phytium_usb->dev->fwnode; + + phytium_usb->role_sw = usb_role_switch_register(phytium_usb->dev, &sw_desc); + if (IS_ERR(phytium_usb->role_sw)) { + dev_warn(phytium_usb->dev, "Unalbe to register Role Switch\n"); + return PTR_ERR(phytium_usb->role_sw); + } + } + + ret = phytium_usb_otg_init((void *)phytium_usb); + if (ret) + goto init_failed; + + ret = phytium_usb_init_role(phytium_usb); + if (ret) + goto init_failed; + + spin_lock_init(&phytium_usb->lock); + + return 0; + +init_failed: + phytium_usb_otg_disable_irq((void *)phytium_usb); + + if (phytium_usb->role_sw) + usb_role_switch_unregister(phytium_usb->role_sw); + + return ret; +} + +int phytium_usb_uninit(struct phytium_usb *phytium_usb) +{ + if (phytium_usb->role_sw) + usb_role_switch_unregister(phytium_usb->role_sw); + + phytium_usb_role_stop(phytium_usb); + phytium_usb_otg_uninit((void *)phytium_usb); + + kfree(phytium_usb); + + return 0; +} + +int phytium_usb_resume(struct phytium_usb *phytium_usb, u8 set_active) +{ + enum usb_role real_role; + bool role_changed = false; + int ret = 0; + + if (phytium_usb_otg_power_is_lost((void *)phytium_usb)) { + if (!phytium_usb->role_sw) { + real_role = hw_role_state_machine(phytium_usb); + if (real_role != phytium_usb->role) { + ret = phytium_usb_hw_role_switch(phytium_usb); + if (ret) + return ret; + + role_changed = true; + } + } + + if (!role_changed) { + if (phytium_usb->role == USB_ROLE_HOST) + ret = phytium_usb_otg_host_on((void *)phytium_usb); + else if (phytium_usb->role == USB_ROLE_DEVICE) + ret = phytium_usb_otg_gadget_on((void *)phytium_usb); + + if (ret) + return ret; + } + } + + if (phytium_usb->roles[phytium_usb->role]->resume) + phytium_usb->roles[phytium_usb->role]->resume(phytium_usb, + phytium_usb_otg_power_is_lost(phytium_usb)); + + return 0; +} + +int phytium_usb_suspend(struct phytium_usb *phytium_usb) +{ + unsigned long flags; + + if (phytium_usb->roles[phytium_usb->role]->suspend) { + spin_lock_irqsave(&phytium_usb->lock, flags); + phytium_usb->roles[phytium_usb->role]->suspend(phytium_usb, false); + spin_unlock_irqrestore(&phytium_usb->lock, flags); + } + + return 0; +} + +MODULE_AUTHOR("Zhenhua Chen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium USB2 DRD Controller Driver"); diff --git a/drivers/usb/phytium_usb_v2/core.h b/drivers/usb/phytium_usb_v2/core.h new file mode 100644 index 0000000000000000000000000000000000000000..759a867fbdd20a0936e1cd218ad96514ee00dc06 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/core.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_CORE_H__ +#define __LINUX_PHYTIUM_USB_CORE_H__ + +#include +#include +#include + +#include "otg.h" + +#define ROLE_STATE_INACTIVE 0 +#define ROLE_STATE_ACTIVE 1 +#define PHYTIUM_USB_DRIVER_V2_VERSION "1.0.1" + +struct phytium_usb_platform_data { + int (*platform_suspend)(struct device *dev, bool suspend, bool wakeup); + unsigned long quirks; +}; + +struct phytium_usb_role_driver { + int (*start)(void *data); + void (*stop)(void *data); + int (*suspend)(void *data, bool do_wakeup); + int (*resume)(void *data, bool hibernated); + const char *name; + int state; +}; + +struct phytium_usb { + struct device *dev; + void __iomem *host_regs; + struct resource host_res[2]; + struct phytium_usb_platform_data *platdata; + int device_irq; + void __iomem *device_regs; + int otg_irq; + struct resource otg_res; + struct phytium_usb_otg_regs __iomem *otg_regs; + enum usb_dr_mode dr_mode; + enum usb_role role; + struct phytium_usb_role_driver *roles[USB_ROLE_DEVICE + 1]; + struct platform_device *host_dev; + struct xhci_plat_priv *host_plat_data; + spinlock_t lock; + void *gadget_dev; + struct mutex mutex; + bool in_lpm; + struct usb_role_switch *role_sw; +}; + +int phytium_usb_init(struct phytium_usb *phytium_usb); +int phytium_usb_uninit(struct phytium_usb *phytium_usb); +int phytium_usb_hw_role_switch(struct phytium_usb *phytium_usb); +int phytium_usb_resume(struct phytium_usb *phytium_usb, u8 set_active); +int phytium_usb_suspend(struct phytium_usb *phytium_usb); +int phytium_usb_role_start(struct phytium_usb *phytium_usb, enum usb_role role); +void phytium_usb_role_stop(struct phytium_usb *phytium_usb); + +#endif diff --git a/drivers/usb/phytium_usb_v2/gadget.c b/drivers/usb/phytium_usb_v2/gadget.c new file mode 100644 index 0000000000000000000000000000000000000000..473c12c4b43825acdfd617e303f8a7a5934c6237 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/gadget.c @@ -0,0 +1,1668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include +#include +#include + +#include "gadget.h" +#include "otg.h" +#include "core.h" +#include "mem.h" +#include "ring.h" + +#define GADGET_IF_EP_EXIST(pdev, ep_num, dir) \ + (readl(&(pdev)->rev_cap->ep_supported) & \ + (BIT(ep_num) << ((dir) ? 0 : 16))) + +#define STREAM_LOG_STREAMS 4 +#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS) + +#define EP0_SETUP_SIZE 512 +#define RTL_REV_CAP 0xc4 + +#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0) + +#define HCC_PARAMS_OFFSET 0x10 +#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16) +#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0)) +#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0)) + +#define MAX_HALT_USEC (16 * 1000) //16ms + +#define D_XEC_PER_REGS_CAP 0xC8 +#define REG_CHICKEN_BITS_2_OFFSET 0x48 +#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) + +#define XBUF_CAP_ID 0xCB +#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C +#define XBUF_RX_TAG_MASK_1_OFFSET 0x24 +#define XBUF_TX_CMD_OFFSET 0x2c + +#define HCS_ENDPOINTS_MASK GENMASK(7, 0) +#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0) + +#define to_gadget_ep(ep) (container_of(ep, struct gadget_ep, endpoint)) +#define to_gadget_request(r) (container_of(r, struct gadget_request, request)) +#define gadget_to_device(g) (container_of(g, struct phytium_device, gadget)) +#define request_to_gadget_request(r) (container_of(r, struct gadget_request, request)) + +#define IMOD_INTERVAL_MASK GENMASK(15, 0) +#define IMOD_COUNTER_MASK GENMASK(31, 16) +#define IMOD_DEFAULT_INTERVAL 0 + +#define CFG_3XPORT_SSP_SUPPORT BIT(31) +#define PORT_REG6_FORCE_FS BIT(0) +#define PORT_REG6_L1_L0_HW_EN BIT(1) + +#define GADGET_MAX_HALT_USEC (16 * 1000) +#define GADGET_DEFAULT_BESL 0 + +unsigned int gadget_port_speed(unsigned int port_status) +{ + if (DEV_SUPERSPEEDPLUS(port_status)) + return USB_SPEED_SUPER_PLUS; + else if (DEV_SUPERSPEED(port_status)) + return USB_SPEED_SUPER; + else if (DEV_HIGHSPEED(port_status)) + return USB_SPEED_HIGH; + else if (DEV_FULLSPEED(port_status)) + return USB_SPEED_FULL; + + return USB_SPEED_UNKNOWN; +} + + +u32 gadget_port_state_to_neutral(u32 state) +{ + return (state & GADGET_PORT_RO) | (state & GADGET_PORT_RWS); +} + +int gadget_alloc_streams(struct phytium_device *pdev, struct gadget_ep *pep) +{ + return 0; +} + +static unsigned int + gadget_get_endpoint_index(const struct usb_endpoint_descriptor *desc) +{ + unsigned int index = (unsigned int)usb_endpoint_num(desc); + + if (usb_endpoint_xfer_control(desc)) + return index * 2; + + return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; +} + +static unsigned int + gadget_get_endpoint_flag(const struct usb_endpoint_descriptor *desc) +{ + return 1 << (gadget_get_endpoint_index(desc) + 1); +} + +static void gadget_set_chicken_bits_2(struct phytium_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = phytium_gadget_find_next_ext_cap(base, offset, + D_XEC_PER_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) | bit; + writel(bit, reg); +} + +bool gadget_last_trb_on_seg(struct gadget_segment *seg, union gadget_trb *trb) +{ + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; +} + +bool gadget_last_trb_on_ring(struct gadget_ring *ring, struct gadget_segment *seg, + union gadget_trb *trb) +{ + return gadget_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +int gadget_wait_for_cmd_compl(struct phytium_device *pdev) +{ + struct gadget_segment *event_deq_seg; + union gadget_trb *cmd_trb; + dma_addr_t cmd_deq_dma; + union gadget_trb *event; + u32 cycle_state; + int ret, val; + u64 cmd_dma; + u32 flags; + + cmd_trb = pdev->cmd.command_trb; + pdev->cmd.status = 0; + + ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val, + !CMD_RING_BUSY(val), 1, GADGET_CMD_TIMEOUT); + if (ret) { + dev_err(pdev->dev, "ERR: Timeout while waiting for command\n"); + pdev->gadget_state = GADGET_STATE_DYING; + return -ETIMEDOUT; + } + + event = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + cmd_deq_dma = gadget_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb); + if (!cmd_deq_dma) + return -EINVAL; + + while (1) { + flags = le32_to_cpu(event->event_cmd.flags); + + if ((flags & TRB_CYCLE) != cycle_state) + return -EINVAL; + + cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); + + if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION || + cmd_dma != (u64)cmd_deq_dma) { + if (!gadget_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (gadget_last_trb_on_ring(pdev->event_ring, event_deq_seg, event)) + cycle_state ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + continue; + } + + pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status)); + if (pdev->cmd.status == COMP_SUCCESS) + return 0; + + return pdev->cmd.status; + } +} + +static int gadget_configure_endpoint(struct phytium_device *pdev) +{ + int ret; + + gadget_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma); + gadget_ring_cmd_db(pdev); + + ret = gadget_wait_for_cmd_compl(pdev); + if (ret) { + dev_err(pdev->dev, "ERR: unexpected command completion code\n"); + return -EINVAL; + } + + return ret; +} + +static void gadget_zero_in_ctx(struct phytium_device *pdev) +{ + struct gadget_input_control_ctx *ctrl_ctx; + struct gadget_slot_ctx *slot_ctx; + struct gadget_ep_ctx *ep_ctx; + int i; + + ctrl_ctx = gadget_get_input_control_ctx(&pdev->in_ctx); + + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = gadget_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + + for (i = 1; i < GADGET_ENDPOINTS_NUM; ++i) { + ep_ctx = gadget_get_ep_ctx(&pdev->in_ctx, i); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + } +} + +static int gadget_update_eps_configuration(struct phytium_device *pdev, + struct gadget_ep *pep) +{ + struct gadget_input_control_ctx *ctrl_ctx; + struct gadget_slot_ctx *slot_ctx; + int ret = 0; + int i; + u32 ep_sts; + + ctrl_ctx = gadget_get_input_control_ctx(&pdev->in_ctx); + + if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0) + return 0; + + ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); + ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); + ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); + + slot_ctx = gadget_get_slot_ctx(&pdev->in_ctx); + for (i = GADGET_ENDPOINTS_NUM; i >= 1; i--) { + __le32 le32 = cpu_to_le32(BIT(i)); + + if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) || + (ctrl_ctx->add_flags & le32) || i == 1) { + slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); + break; + } + } + + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) && + ep_sts == EP_STATE_DISABLED) || + (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags)) + ret = gadget_configure_endpoint(pdev); + + gadget_zero_in_ctx(pdev); + + return ret; +} + +static int gadget_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + struct gadget_input_control_ctx *ctrl_ctx; + struct phytium_device *pdev; + struct gadget_ep *pep; + unsigned long flags; + u32 added_ctxs; + int ret; + + if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT || + !desc->wMaxPacketSize) + return -EINVAL; + + pep = to_gadget_ep(ep); + pdev = pep->pdev; + pep->ep_state &= ~EP_UNCONFIGURED; + + if (pep->ep_state & EP_ENABLED) { + dev_warn(pdev->dev, "%s is already enalbed\n", pep->name); + return 0; + } + + spin_lock_irqsave(&pdev->lock, flags); + + added_ctxs = gadget_get_endpoint_flag(desc); + if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { + dev_err(pdev->dev, "Bad endpoint number\n"); + ret = -EINVAL; + goto unlock; + } + + pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + + if (pdev->gadget.speed == USB_SPEED_FULL) { + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) + pep->interval = desc->bInterval << 3; + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) + pep->interval = BIT(desc->bInterval - 1) << 3; + } + + if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) { + if (pep->interval > BIT(12)) { + dev_err(pdev->dev, "bInterval %d not supported\n", + desc->bInterval); + ret = -EINVAL; + goto unlock; + } + gadget_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + } + + ret = gadget_endpoint_init(pdev, pep, GFP_ATOMIC); + if (ret) + goto unlock; + + ctrl_ctx = gadget_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->add_flags = cpu_to_le32(added_ctxs); + ctrl_ctx->drop_flags = 0; + + ret = gadget_update_eps_configuration(pdev, pep); + if (ret) { + gadget_free_endpoint_rings(pdev, pep); + goto unlock; + } + + pep->ep_state |= EP_ENABLED; + pep->ep_state &= ~EP_STOPPED; + +unlock: + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static struct usb_request *gadget_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) +{ + struct gadget_ep *pep = to_gadget_ep(ep); + struct gadget_request *preq; + + preq = kzalloc(sizeof(*preq), gfp_flags); + if (!preq) + return NULL; + + preq->epnum = pep->number; + preq->pep = pep; + + return &preq->request; +} + +static void gadget_ep_free_request(struct usb_ep *ep, struct usb_request *request) +{ + struct gadget_request *preq = to_gadget_request(request); + + kfree(preq); +} + + +static void gadget_invalidate_ep_events(struct phytium_device *pdev, struct gadget_ep *pep) +{ + struct gadget_segment *segment; + union gadget_trb *event; + u32 cycle_state; + u32 data; + + event = pdev->event_ring->dequeue; + segment = pdev->event_ring->deq_seg; + cycle_state = pdev->event_ring->cycle_state; + + while (1) { + data = le32_to_cpu(event->trans_event.flags); + if ((data & TRB_CYCLE) != cycle_state) + break; + + if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER && + TRB_TO_EP_ID(data) == (pep->idx + 1)) { + data |= TRB_EVENT_INVALIDATE; + event->trans_event.flags = cpu_to_le32(data); + } + + if (gadget_last_trb_on_seg(segment, event)) { + cycle_state ^= 1; + segment = pdev->event_ring->deq_seg->next; + event = segment->trbs; + } else { + event++; + } + } +} + +int ep_dequeue(struct gadget_ep *pep, struct gadget_request *preq) +{ + struct phytium_device *pdev = pep->pdev; + int ret_stop = 0; + int ret_rem; + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) + ret_stop = gadget_cmd_stop_ep(pdev, pep); + + ret_rem = gadget_remove_request(pdev, preq, pep); + + return ret_rem ? ret_rem : ret_stop; +} + +int gadget_ep_enqueue(struct gadget_ep *pep, struct gadget_request *preq) +{ + struct phytium_device *pdev = pep->pdev; + struct usb_request *request; + int ret; + + if (preq->epnum == 0 && !list_empty(&pep->pending_list)) + return -EBUSY; + + request = &preq->request; + request->actual = 0; + request->status = -EINPROGRESS; + preq->direction = pep->direction; + preq->epnum = pep->number; + preq->td.drbl = 0; + + ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction); + if (ret) + return ret; + + list_add_tail(&preq->list, &pep->pending_list); + + switch (usb_endpoint_type(pep->endpoint.desc)) { + case USB_ENDPOINT_XFER_CONTROL: + ret = gadget_queue_ctrl_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + ret = gadget_queue_bulk_tx(pdev, preq); + break; + case USB_ENDPOINT_XFER_ISOC: + ret = gadget_queue_isoc_tx_prepare(pdev, preq); + break; + } + + if (ret) + goto unmap; + + return 0; + +unmap: + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, pep->direction); + + list_del(&preq->list); + + return ret; +} + +static int gadget_ep_disable(struct usb_ep *ep) +{ + struct gadget_input_control_ctx *ctrl_ctx; + struct gadget_request *preq; + struct phytium_device *pdev; + struct gadget_ep *pep; + unsigned long flags; + u32 drop_flag; + int ret = 0; + + if (!ep) + return -EINVAL; + + pep = to_gadget_ep(ep); + pdev = pep->pdev; + + spin_lock_irqsave(&pdev->lock, flags); + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s is already disabled\n", pep->name); + ret = -EINVAL; + goto finish; + } + + pep->ep_state |= EP_DIS_IN_PROGRESS; + + if (!(pep->ep_state & EP_UNCONFIGURED)) { + gadget_cmd_stop_ep(pdev, pep); + gadget_cmd_flush_ep(pdev, pep); + } + + while (!list_empty(&pep->pending_list)) { + preq = next_request(&pep->pending_list); + ep_dequeue(pep, preq); + } + + gadget_invalidate_ep_events(pdev, pep); + + pep->ep_state &= ~EP_DIS_IN_PROGRESS; + drop_flag = gadget_get_endpoint_flag(pep->endpoint.desc); + ctrl_ctx = gadget_get_input_control_ctx(&pdev->in_ctx); + ctrl_ctx->drop_flags = cpu_to_le32(drop_flag); + ctrl_ctx->add_flags = 0; + + gadget_endpoint_zero(pdev, pep); + + if (!(pep->ep_state & EP_UNCONFIGURED)) + ret = gadget_update_eps_configuration(pdev, pep); + + gadget_free_endpoint_rings(pdev, pep); + + pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED); + pep->ep_state |= EP_STOPPED; + +finish: + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags) +{ + struct gadget_request *preq; + struct phytium_device *pdev; + struct gadget_ep *pep; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + pep = to_gadget_ep(ep); + pdev = pep->pdev; + + if (!(pep->ep_state & EP_ENABLED)) { + dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n", pep->name); + return -EINVAL; + } + + preq = to_gadget_request(request); + spin_lock_irqsave(&pdev->lock, flags); + ret = gadget_ep_enqueue(pep, preq); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int gadget_ep_dequeue(struct usb_ep *ep, struct usb_request *request) +{ + struct phytium_device *pdev; + struct gadget_ep *pep; + unsigned long flags; + int ret; + + if (!request || !ep) + return -EINVAL; + + pep = to_gadget_ep(ep); + pdev = pep->pdev; + if (!pep->endpoint.desc) { + dev_err(pdev->dev, "%s: can't dequeue to disabled endpoint\n", pep->name); + return -ESHUTDOWN; + } + + if (!(pep->ep_state & EP_ENABLED)) + return 0; + + spin_lock_irqsave(&pdev->lock, flags); + ret = ep_dequeue(pep, to_gadget_request(request)); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int gadget_ep_set_halt(struct usb_ep *ep, int value) +{ + struct gadget_ep *pep = to_gadget_ep(ep); + struct phytium_device *pdev = pep->pdev; + struct gadget_request *preq; + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + + preq = next_request(&pep->pending_list); + if (value) { + if (preq) { + ret = -EAGAIN; + goto done; + } + } + + ret = gadget_halt_endpoint(pdev, pep, value); + +done: + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static int gadget_ep_set_wedge(struct usb_ep *ep) +{ + struct gadget_ep *pep = to_gadget_ep(ep); + struct phytium_device *pdev = pep->pdev; + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pep->ep_state |= EP_WEDGE; + ret = gadget_halt_endpoint(pdev, pep, 1); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +static const struct usb_ep_ops gadget_ep0_ops = { + .enable = gadget_ep_enable, + .disable = gadget_ep_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep_queue, + .dequeue = gadget_ep_dequeue, + .set_halt = gadget_ep_set_halt, + .set_wedge = gadget_ep_set_wedge, +}; + +static const struct usb_ep_ops gadget_ep_ops = { + .enable = gadget_ep_enable, + .disable = gadget_ep_disable, + .alloc_request = gadget_ep_alloc_request, + .free_request = gadget_ep_free_request, + .queue = gadget_ep_queue, + .dequeue = gadget_ep_dequeue, + .set_halt = gadget_ep_set_halt, + .set_wedge = gadget_ep_set_wedge, +}; + +static struct usb_endpoint_descriptor gadget_ep0_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_CONTROL, +}; + +void resume_gadget(struct phytium_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->resume) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->resume(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +void suspend_gadget(struct phytium_device *pdev) +{ + if (pdev->gadget_driver && pdev->gadget_driver->suspend) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->suspend(&pdev->gadget); + spin_lock(&pdev->lock); + } +} + +static void gadget_clear_chicken_bit_2(struct phytium_device *pdev, u32 bit) +{ + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + + base = &pdev->cap_regs->hc_capbase; + offset = phytium_gadget_find_next_ext_cap(base, offset, D_XEC_PER_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_2_OFFSET; + + bit = readl(reg) & ~bit; + writel(bit, reg); +} + +int gadget_setup_device(struct phytium_device *pdev, enum gadget_setup_dev setup) +{ + struct gadget_input_control_ctx *ctrl_ctx; + struct gadget_slot_ctx *slot_ctx; + int dev_state = 0; + int ret; + + if (!pdev->slot_id) + return -EINVAL; + + if (!pdev->active_port->port_num) + return -EINVAL; + + slot_ctx = gadget_get_slot_ctx(&pdev->out_ctx); + dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) + return 0; + + slot_ctx = gadget_get_slot_ctx(&pdev->in_ctx); + ctrl_ctx = gadget_get_input_control_ctx(&pdev->in_ctx); + + if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) { + ret = gadget_setup_addressable_priv_dev((void *)pdev); + if (ret) + return ret; + } + + gadget_copy_ep0_dequeue_into_input_ctx(pdev); + + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); + ctrl_ctx->drop_flags = 0; + + gadget_queue_address_device(pdev, pdev->in_ctx.dma, setup); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; + + return ret; +} + +static int gadget_get_frame(struct usb_gadget *g) +{ + struct phytium_device *pdev = gadget_to_device(g); + + return readl(&pdev->run_regs->microframe_index) >> 3; +} + +static void __gadget_wakeup(struct phytium_device *pdev) +{ + struct gadget_port_regs __iomem *port_regs; + u32 portpm, portsc; + + port_regs = pdev->active_port->regs; + portsc = readl(&port_regs->portsc) & PORT_PLS_MASK; + + if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) { + portpm = readl(&port_regs->portpmsc); + if (!(portpm & PORT_RWE)) + return; + } + + if (portsc == XDEV_U3 && !pdev->may_wakeup) + return; + + gadget_set_link_state(pdev, &port_regs->portsc, XDEV_U0); + + pdev->gadget_state |= GADGET_WAKEUP_PENDING; +} + +static int gadget_wakeup(struct usb_gadget *g) +{ + struct phytium_device *pdev = gadget_to_device(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + __gadget_wakeup(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int gadget_set_selfpowered(struct usb_gadget *g, + int is_selfpowered) +{ + struct phytium_device *pdev = gadget_to_device(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + g->is_selfpowered = !!is_selfpowered; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +int gadget_halt_endpoint(struct phytium_device *pdev, struct gadget_ep *pep, int value) +{ + int ret; + + ret = gadget_cmd_stop_ep(pdev, pep); + if (ret) + return ret; + + if (value) { + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) { + gadget_queue_halt_endpoint(pdev, pep->idx); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + } + pep->ep_state |= EP_HALTED; + } else { + gadget_queue_reset_ep(pdev, pep->idx); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + if (ret) + return ret; + + pep->ep_state &= ~EP_HALTED; + + if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE)) + gadget_ring_doorbell_for_active_rings(pdev, pep); + + pep->ep_state &= ~EP_WEDGE; + } + + return 0; +} + +int gadget_reset_device(struct phytium_device *pdev) +{ + struct gadget_slot_ctx *slot_ctx; + int slot_state; + int ret, i; + + slot_ctx = gadget_get_slot_ctx(&pdev->in_ctx); + slot_ctx->dev_info = 0; + pdev->device_address = 0; + + slot_ctx = gadget_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (slot_state <= SLOT_STATE_DEFAULT && pdev->eps[0].ep_state & EP_HALTED) + gadget_halt_endpoint(pdev, &pdev->eps[0], 0); + + pdev->eps[0].ep_state &= ~(EP_STATE_STOPPED | EP_HALTED); + pdev->eps[0].ep_state |= EP_ENABLED; + + if (slot_state <= SLOT_STATE_DEFAULT) + return 0; + + gadget_queue_reset_device(pdev); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + for (i = 1; i < GADGET_ENDPOINTS_NUM; i++) + pdev->eps[i].ep_state |= EP_STATE_STOPPED | EP_UNCONFIGURED; + + if (ret) + dev_err(pdev->dev, "Reset device failed with %d\n", ret); + + return ret; +} + +void gadget_irq_reset(struct phytium_device *pdev) +{ + struct gadget_port_regs __iomem *port_regs; + + gadget_reset_device(pdev); + + port_regs = pdev->active_port->regs; + pdev->gadget.speed = gadget_port_speed(readl(port_regs)); + + spin_unlock(&pdev->lock); + usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver); + spin_lock(&pdev->lock); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + case USB_SPEED_SUPER: + gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + pdev->gadget.ep0->maxpacket = 512; + break; + case USB_SPEED_HIGH: + case USB_SPEED_FULL: + gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); + pdev->gadget.ep0->maxpacket = 64; + break; + default: + dev_err(pdev->dev, "Unknown device speed\n"); + break; + } + + gadget_clear_chicken_bit_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS); + gadget_setup_device(pdev, SETUP_CONTEXT_ONLY); + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); +} + +static int gadget_pullup(struct usb_gadget *g, int is_on) +{ + struct phytium_device *pdev = gadget_to_device(g); + struct phytium_usb *phytium_usb = dev_get_drvdata(pdev->dev); + unsigned long flags; + + disable_irq(phytium_usb->device_irq); + spin_lock_irqsave(&pdev->lock, flags); + + if (!is_on) + gadget_reset_device(pdev); + + spin_unlock_irqrestore(&pdev->lock, flags); + enable_irq(phytium_usb->device_irq); + + return 0; +} + +static void gadget_disable_port(struct phytium_device *pdev, __le32 __iomem *port_regs) +{ + u32 temp = gadget_port_state_to_neutral(readl(port_regs)); + + writel(temp | PORT_PED, port_regs); +} + +static int start(struct phytium_device *pdev) +{ + u32 temp; + int ret; + + temp = readl(&pdev->op_regs->command); + temp |= (CMD_R_S | CMD_DEVEN); + writel(temp, &pdev->op_regs->command); + + pdev->gadget_state = 0; + + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_HALT), 1, GADGET_MAX_HALT_USEC); + if (ret) { + pdev->gadget_state = GADGET_STATE_DYING; + dev_err(pdev->dev, "Error: Controller run failed\n"); + } + + return ret; +} + +static int gadget_run(struct phytium_device *pdev, enum usb_device_speed speed) +{ + u32 fs_speed = 0; + u32 temp; + int ret; + + temp = readl(&pdev->ir_set->irq_control); + temp &= ~IMOD_INTERVAL_MASK; + temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK); + writel(temp, &pdev->ir_set->irq_control); + + temp = readl(&pdev->port3x_regs->mode_addr); + + switch (speed) { + case USB_SPEED_SUPER_PLUS: + temp |= CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_SUPER: + temp &= ~CFG_3XPORT_SSP_SUPPORT; + break; + case USB_SPEED_HIGH: + break; + case USB_SPEED_FULL: + fs_speed = PORT_REG6_FORCE_FS; + break; + default: + dev_err(pdev->dev, "Invalid max_speed parameter %d\n", speed); + fallthrough; + case USB_SPEED_UNKNOWN: + speed = USB_SPEED_SUPER; + break; + } + + if (speed >= USB_SPEED_SUPER) { + writel(temp, &pdev->port3x_regs->mode_addr); + gadget_set_link_state(pdev, &pdev->usb3_port.regs->portsc, XDEV_RXDETECT); + } else { + gadget_disable_port(pdev, &pdev->usb3_port.regs->portsc); + } + + gadget_set_link_state(pdev, &pdev->usb2_port.regs->portsc, XDEV_RXDETECT); + + gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + + writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_regs6); + + ret = start(pdev); + if (ret) { + ret = -ENODEV; + goto err; + } + + temp = readl(&pdev->op_regs->command); + temp |= CMD_INTE; + writel(temp, &pdev->op_regs->command); + + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending); + + return 0; + +err: + gadget_halt(pdev); + + return ret; +} + +static int gadget_udc_start(struct usb_gadget *g, + struct usb_gadget_driver *driver) +{ + enum usb_device_speed max_speed = driver->max_speed; + struct phytium_device *pdev = gadget_to_device(g); + unsigned long flags; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + pdev->gadget_driver = driver; + + max_speed = min(driver->max_speed, g->max_speed); + ret = gadget_run(pdev, max_speed); + + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +void gadget_set_usb2_hardware_lpm(struct phytium_device *pdev, + struct usb_request *req, int enable) +{ + if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable) + return; + + if (enable) + writel(PORT_BESL(GADGET_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE, + &pdev->active_port->regs->portpmsc); + else + writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc); +} + +int gadget_disable_slot(struct phytium_device *pdev) +{ + int ret; + + gadget_queue_slot_control(pdev, TRB_DISABLE_SLOT); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + pdev->slot_id = 0; + pdev->active_port = NULL; + + memset(pdev->in_ctx.bytes, 0, GADGET_CTX_SIZE); + memset(pdev->out_ctx.bytes, 0, GADGET_CTX_SIZE); + + return ret; +} + +int gadget_enable_slot(struct phytium_device *pdev) +{ + struct gadget_slot_ctx *slot_ctx; + int slot_state, ret; + + slot_ctx = gadget_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + + if (slot_state != SLOT_STATE_DISABLED) + return 0; + + gadget_queue_slot_control(pdev, TRB_ENABLE_SLOT); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + if (ret) + return ret; + + pdev->slot_id = 1; + + return ret; +} + +static void gadget_clear_port_change_bit(struct phytium_device *pdev, __le32 __iomem *port_regs) +{ + u32 portsc = readl(port_regs); + + writel(gadget_port_state_to_neutral(portsc) | (portsc & PORT_CHANGE_BITS), port_regs); +} + +void gadget_update_erst_dequeue(struct phytium_device *pdev, union gadget_trb *event_ring_deq, + u8 clear_ehb) +{ + u64 temp_64; + dma_addr_t deq; + + temp_64 = lo_hi_readq(&pdev->ir_set->erst_dequeue); + + if (event_ring_deq != pdev->event_ring->dequeue) { + deq = gadget_trb_virt_to_dma(pdev->event_ring->deq_seg, pdev->event_ring->dequeue); + temp_64 &= ERST_PTR_MASK; + temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK); + } + + if (clear_ehb) + temp_64 |= ERST_EHB; + else + temp_64 &= ~ERST_EHB; + + lo_hi_writeq(temp_64, &pdev->ir_set->erst_dequeue); +} + +static void gadget_consume_all_events(struct phytium_device *pdev) +{ + struct gadget_segment *event_deq_seg; + union gadget_trb *event_ring_deq; + union gadget_trb *event; + u32 cycle_bit; + + event_ring_deq = pdev->event_ring->dequeue; + event_deq_seg = pdev->event_ring->deq_seg; + event = pdev->event_ring->dequeue; + + while (1) { + cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE); + + if (cycle_bit != pdev->event_ring->cycle_state) + break; + + gadget_inc_deq(pdev, pdev->event_ring); + + if (!gadget_last_trb_on_seg(event_deq_seg, event)) { + event++; + continue; + } + + if (gadget_last_trb_on_ring(pdev->event_ring, event_deq_seg, event)) + cycle_bit ^= 1; + + event_deq_seg = event_deq_seg->next; + event = event_deq_seg->trbs; + } + + gadget_update_erst_dequeue(pdev, event_ring_deq, 1); +} + +static void gadget_clear_cmd_ring(struct phytium_device *pdev) +{ + struct gadget_segment *seg; + u64 val_64; + int i; + + gadget_initialize_ring_info(pdev->cmd_ring); + + seg = pdev->cmd_ring->first_seg; + for (i = 0; i < pdev->cmd_ring->num_segs; i++) { + memset(seg->trbs, 0, sizeof(union gadget_trb) * (TRBS_PER_SEGMENT - 1)); + seg = seg->next; + } + + val_64 = lo_hi_readq(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) + | pdev->cmd_ring->cycle_state; + lo_hi_writeq(val_64, &pdev->op_regs->cmd_ring); +} + +static void stop(struct phytium_device *pdev) +{ + u32 temp; + + gadget_cmd_flush_ep(pdev, &pdev->eps[0]); + + if (!list_empty(&pdev->eps[0].pending_list)) { + struct gadget_request *req; + + req = next_request(&pdev->eps[0].pending_list); + if (req == &pdev->ep0_preq) + ep_dequeue(&pdev->eps[0], req); + } + + gadget_disable_port(pdev, &pdev->usb2_port.regs->portsc); + gadget_disable_port(pdev, &pdev->usb3_port.regs->portsc); + gadget_disable_slot(pdev); + gadget_halt(pdev); + + temp = readl(&pdev->op_regs->status); + writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status); + + temp = readl(&pdev->ir_set->irq_pending); + writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending); + + gadget_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc); + gadget_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc); + + temp = readl(&pdev->ir_set->irq_pending); + temp |= IMAN_IP; + writel(temp, &pdev->ir_set->irq_pending); + + gadget_consume_all_events(pdev); + gadget_clear_cmd_ring(pdev); +} + +static int gadget_udc_stop(struct usb_gadget *g) +{ + struct phytium_device *pdev = gadget_to_device(g); + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + stop(pdev); + pdev->gadget_driver = NULL; + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static const struct usb_gadget_ops phytium_usb_gadget_ops = { + .get_frame = gadget_get_frame, + .wakeup = gadget_wakeup, + .set_selfpowered = gadget_set_selfpowered, + .pullup = gadget_pullup, + .udc_start = gadget_udc_start, + .udc_stop = gadget_udc_stop, +}; + +void gadget_died(struct phytium_device *pdev) +{ + dev_err(pdev->dev, "Error: controller not responding\n"); + pdev->gadget_state |= GADGET_STATE_DYING; + gadget_halt(pdev); +} + +static void gadget_quiesce(struct phytium_device *pdev) +{ + u32 halted, mask, cmd; + + mask = ~(u32)(GADGET_IRQS); + + halted = readl(&pdev->op_regs->status) & STS_HALT; + if (!halted) + mask &= ~(CMD_R_S | CMD_DEVEN); + + cmd = readl(&pdev->op_regs->command); + cmd &= mask; + writel(cmd, &pdev->op_regs->command); +} + +int gadget_halt(struct phytium_device *pdev) +{ + int ret; + u32 val; + + gadget_quiesce(pdev); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val, + val & STS_HALT, 1, MAX_HALT_USEC); + if (ret) { + dev_err(pdev->dev, "gadget halt failed\n"); + return ret; + } + + pdev->gadget_state |= GADGET_STATE_HALTED; + + return 0; +} + +int gadget_reset(struct phytium_device *pdev) +{ + u32 cmd, temp; + int ret; + + temp = readl(&pdev->op_regs->status); + if (temp == ~(u32)0) { + dev_err(pdev->dev, "Controller not accessible\n"); + return -ENODEV; + } + + if ((temp & STS_HALT) == 0) { + dev_err(pdev->dev, "controller not halted, abort reset\n"); + return -EINVAL; + } + + cmd = readl(&pdev->op_regs->command); + cmd |= CMD_RESET; + writel(cmd, &pdev->op_regs->command); + + ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp, + !(temp & CMD_RESET), 1, 10 * 1000); + if (ret) { + dev_err(pdev->dev, "controller reset failed\n"); + return ret; + } + + ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp, + !(temp & STS_CNR), 1, 10 * 1000); + if (ret) { + dev_err(pdev->dev, "controller not ready to work\n"); + return ret; + } + + dev_info(pdev->dev, "controller ready to work\n"); + + return ret; +} + +int phytium_gadget_find_next_ext_cap(void __iomem *base, u32 start, int id) +{ + u32 offset = start; + u32 next; + u32 val; + + if (!start || start == HCC_PARAMS_OFFSET) { + val = readl(base + HCC_PARAMS_OFFSET); + if (val == ~0) + return 0; + + offset = HCC_EXT_CAPS(val) << 2; + if (!offset) + return 0; + } + + do { + val = readl(base + offset); + if (val == ~0) + return 0; + + if (EXT_CAPS_ID(val) == id && offset != start) + return offset; + + next = EXT_CAPS_NEXT(val); + offset += next << 2; + } while (next); + + return 0; +} + +static void phytium_gadget_get_rev_cap(struct phytium_device *pdev) +{ + void __iomem *reg; + + if (!pdev) + return; + + reg = &pdev->cap_regs->hc_capbase; + reg += phytium_gadget_find_next_ext_cap(reg, 0, RTL_REV_CAP); + pdev->rev_cap = reg; + + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", + readl(&pdev->rev_cap->ctrl_revision), + readl(&pdev->rev_cap->rtl_revision), + readl(&pdev->rev_cap->ep_supported), + readl(&pdev->rev_cap->rx_buff_size), + readl(&pdev->rev_cap->tx_buff_size)); +} + +static int gadget_gen_setup(struct phytium_device *pdev) +{ + int ret; + u32 reg; + + if (!pdev) + return 0; + + pdev->cap_regs = pdev->regs; + pdev->op_regs = pdev->regs + + HC_LENGTH(readl(&pdev->cap_regs->hc_capbase)); + pdev->run_regs = pdev->regs + + (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK); + pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1); + pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase); + pdev->hci_version = HC_VERSION(pdev->hcc_params); + pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + + phytium_gadget_get_rev_cap(pdev); + + ret = gadget_halt(pdev); + if (ret) + return ret; + + ret = gadget_reset(pdev); + if (ret) + return ret; + + if (HCC_64BIT_ADDR(pdev->hcc_params) && + !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) { + dev_info(pdev->dev, "Enableing 64 bit DMA addr\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64)); + } else { + ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + dev_info(pdev->dev, "Enableing 32 bit DMA addr\n"); + dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32)); + } + + spin_lock_init(&pdev->lock); + + ret = gadget_mem_init((void *)pdev); + if (ret) + return ret; + + reg = readl(&pdev->port3x_regs->mode_2); + reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN; + writel(reg, &pdev->port3x_regs->mode_2); + + return 0; +} + +static void gadget_get_ep_buffering(struct phytium_device *pdev, struct gadget_ep *pep) +{ + void __iomem *reg = &pdev->cap_regs->hc_capbase; + int endpoints; + + reg += phytium_gadget_find_next_ext_cap(reg, 0, XBUF_CAP_ID); + + if (!pep->direction) { + pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET); + pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET); + pep->buffering = (pep->buffering + 1) / 2; + pep->buffering_period = (pep->buffering_period + 1) / 2; + + return; + } + + endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2; + + reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32); + reg += pep->number * sizeof(u32) * 2; + pep->buffering = (readl(reg) + 1) / 2; + pep->buffering_period = pep->buffering; +} + +static int gadget_init_endpoints(struct phytium_device *pdev) +{ + int max_streams = HCC_MAX_PSA(pdev->hcc_params); + struct gadget_ep *pep; + int i; + + INIT_LIST_HEAD(&pdev->gadget.ep_list); + + if (max_streams < STREAM_LOG_STREAMS) { + dev_err(pdev->dev, "Stream size %d not supported\n", + max_streams); + return -EINVAL; + } + + max_streams = STREAM_LOG_STREAMS; + + for (i = 0; i < GADGET_ENDPOINTS_NUM; i++) { + bool direction = !(i & 1); + u8 epnum = ((i + 1) >> 1); + + if (!GADGET_IF_EP_EXIST(pdev, epnum, direction)) + continue; + + pep = &pdev->eps[i]; + pep->pdev = pdev; + pep->number = epnum; + pep->direction = direction; + + if (epnum == 0) { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", + epnum, "BiDir"); + + pep->idx = 0; + usb_ep_set_maxpacket_limit(&pep->endpoint, 512); + pep->endpoint.maxburst = 1; + pep->endpoint.ops = &gadget_ep0_ops; + pep->endpoint.desc = &gadget_ep0_desc; + pep->endpoint.comp_desc = NULL; + pep->endpoint.caps.type_control = true; + pep->endpoint.caps.dir_in = true; + pep->endpoint.caps.dir_out = true; + + pdev->ep0_preq.epnum = pep->number; + pdev->ep0_preq.pep = pep; + pdev->gadget.ep0 = &pep->endpoint; + } else { + snprintf(pep->name, sizeof(pep->name), "ep%d%s", epnum, + (pep->direction) ? "in" : "out"); + + pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1; + usb_ep_set_maxpacket_limit(&pep->endpoint, 1024); + + pep->endpoint.max_streams = max_streams; + pep->endpoint.ops = &gadget_ep_ops; + list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list); + + pep->endpoint.caps.type_iso = true; + pep->endpoint.caps.type_bulk = true; + pep->endpoint.caps.type_int = true; + pep->endpoint.caps.dir_in = direction; + pep->endpoint.caps.dir_out = !direction; + } + + pep->endpoint.name = pep->name; + pep->in_ctx = gadget_get_ep_ctx(&pdev->in_ctx, pep->idx); + pep->out_ctx = gadget_get_ep_ctx(&pdev->out_ctx, pep->idx); + gadget_get_ep_buffering(pdev, pep); + + INIT_LIST_HEAD(&pep->pending_list); + } + + return 0; +} + +static void gadget_free_endpoints(struct phytium_device *pdev) +{ +} + +static int gadget_start(void *data) +{ + struct phytium_device *pdev; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + u32 max_speed; + int ret = 0; + + if (!phytium_usb) + return 0; + + phytium_usb_otg_gadget_on((void *)phytium_usb); + + pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + + phytium_usb->gadget_dev = pdev; + pdev->dev = phytium_usb->dev; + pdev->regs = phytium_usb->device_regs; + max_speed = usb_get_maximum_speed(pdev->dev); + + switch (max_speed) { + case USB_SPEED_FULL: + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + break; + case USB_SPEED_UNKNOWN: + max_speed = USB_SPEED_SUPER_PLUS; + break; + default: + dev_err(pdev->dev, "invalid max speed %d\n", max_speed); + } + + pdev->gadget.ops = &phytium_usb_gadget_ops; + pdev->gadget.name = "phytium_usb_gadget"; + pdev->gadget.speed = USB_SPEED_UNKNOWN; + pdev->gadget.sg_supported = 0; + pdev->gadget.max_speed = max_speed; + pdev->gadget.lpm_capable = 0; + pdev->gadget.quirk_ep_out_aligned_size = true; + + pdev->setup_buf = kzalloc(EP0_SETUP_SIZE, GFP_KERNEL); + if (!pdev->setup_buf) + goto free_pdev; + + ret = gadget_gen_setup(pdev); + if (ret) { + dev_err(pdev->dev, "gadget_gen_setup failed\n"); + goto free_setup; + } + + ret = gadget_init_endpoints(pdev); + if (ret) { + dev_err(pdev->dev, "gadget_init_endpoints failed\n"); + goto free_pdev; + } + + ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget); + if (ret) { + dev_err(pdev->dev, "usb_add_gadget_udc failed\n"); + goto free_endpoints; + } + + ret = devm_request_irq(pdev->dev, phytium_usb->device_irq, gadget_irq_handler, + IRQF_SHARED, "phytium_usb_gadget", pdev); + if (ret) + goto del_gadget; + + return 0; + +del_gadget: + usb_del_gadget_udc(&pdev->gadget); +free_endpoints: + gadget_free_endpoints(pdev); +free_setup: + kfree(pdev->setup_buf); +free_pdev: + kfree(pdev); + + return ret; +} + +static void gadget_stop(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + struct phytium_device *pdev = phytium_usb->gadget_dev; + + devm_free_irq(pdev->dev, phytium_usb->device_irq, pdev); + usb_del_gadget_udc(&pdev->gadget); + gadget_free_endpoints(pdev); + gadget_mem_cleanup((void *)pdev); + kfree(pdev); + phytium_usb->gadget_dev = NULL; + phytium_usb_otg_gadget_off((void *)phytium_usb); +} + +void disconnect_gadget(struct phytium_device *pdev) +{ + pdev->gadget_state |= GADGET_STATE_DISCONNECT_PENDING; + + if (pdev->gadget_driver && pdev->gadget_driver->disconnect) { + spin_unlock(&pdev->lock); + pdev->gadget_driver->disconnect(&pdev->gadget); + spin_lock(&pdev->lock); + } + + pdev->gadget.speed = USB_SPEED_UNKNOWN; + usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED); + + pdev->gadget_state &= ~GADGET_STATE_DISCONNECT_PENDING; +} + +static int gadget_suspend(void *data, bool do_wakeup) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + struct phytium_device *pdev = phytium_usb->gadget_dev; + unsigned long flags; + + spin_lock_irqsave(&pdev->lock, flags); + disconnect_gadget(pdev); + stop(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return 0; +} + +static int gadget_resume(void *data, bool hibernated) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + struct phytium_device *pdev = phytium_usb->gadget_dev; + unsigned long flags; + enum usb_device_speed max_speed; + int ret; + + spin_lock_irqsave(&pdev->lock, flags); + if (!pdev->gadget_driver) { + spin_unlock_irqrestore(&pdev->lock, flags); + return 0; + } + + max_speed = pdev->gadget_driver->max_speed; + max_speed = min(max_speed, pdev->gadget.max_speed); + + ret = gadget_run(pdev, max_speed); + + if (pdev->link_state == XDEV_U3) + __gadget_wakeup(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + + return ret; +} + +int phytium_usb_gadget_init(void *data) +{ + struct phytium_usb_role_driver *role_driver; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + role_driver = devm_kzalloc(phytium_usb->dev, sizeof(*phytium_usb), GFP_KERNEL); + if (!role_driver) + return -ENOMEM; + + role_driver->start = gadget_start; + role_driver->stop = gadget_stop; + role_driver->suspend = gadget_suspend; + role_driver->resume = gadget_resume; + role_driver->state = ROLE_STATE_INACTIVE; + role_driver->name = "gadget"; + phytium_usb->roles[USB_ROLE_DEVICE] = role_driver; + } + + return 0; +} diff --git a/drivers/usb/phytium_usb_v2/gadget.h b/drivers/usb/phytium_usb_v2/gadget.h new file mode 100644 index 0000000000000000000000000000000000000000..89379625132d39840d71107bad5814be80802e74 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/gadget.h @@ -0,0 +1,712 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_GADGET_H__ +#define __LINUX_PHYTIUM_USB_GADGET_H__ + +#include + +#define SCT_SEC_TR 0 +#define SCT_PRI_TR 1 +#define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1)) + + +#define TRB_EVENT_INVALIDATE 8 + +#define COMP_CODE_MASK (0xff << 24) +#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24) +#define COMP_INVALID 0 +#define COMP_SUCCESS 1 +#define COMP_DATA_BUFFER_ERROR 2 +#define COMP_BABBLE_DETECTED_ERROR 3 +#define COMP_TRB_ERROR 5 +#define COMP_RESOURCE_ERROR 7 +#define COMP_NO_SLOTS_AVAILABLE_ERROR 9 +#define COMP_INVALID_STREAM_TYPE_ERROR 10 +#define COMP_SLOT_NOT_ENABLED_ERROR 11 +#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12 +#define COMP_SHORT_PACKET 13 +#define COMP_RING_UNDERRUN 14 +#define COMP_RING_OVERRUN 15 +#define COMP_VF_EVENT_RING_FULL_ERROR 16 +#define COMP_PARAMETER_ERROR 17 +#define COMP_CONTEXT_STATE_ERROR 19 +#define COMP_EVENT_RING_FULL_ERROR 21 +#define COMP_INCOMPATIBLE_DEVICE_ERROR 22 +#define COMP_MISSED_SERVICE_ERROR 23 +#define COMP_COMMAND_RING_STOPPED 24 +#define COMP_COMMAND_ABORTED 25 +#define COMP_STOPPED 26 +#define COMP_STOPPED_LENGTH_INVALID 27 +#define COMP_STOPPED_SHORT_PACKET 28 +#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29 +#define COMP_ISOCH_BUFFER_OVERRUN 31 +#define COMP_EVENT_LOST_ERROR 32 +#define COMP_UNDEFINED_ERROR 33 +#define COMP_INVALID_STREAM_ID_ERROR 34 + + +#define GADGET_DEV_MAX_SLOTS 1 +#define GADGET_CTX_SIZE 2112 + +#define GADGET_ENDPOINTS_NUM 31 + +#define RTSOFF_MASK GENMASK(31, 5) + +#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0)) +#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1)) + +#define HCC_64BIT_ADDR(p) ((p) & BIT(0)) +#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1) + +#define GADGET_STATE_HALTED BIT(1) +#define GADGET_STATE_DYING BIT(2) +#define GADGET_STATE_DISCONNECT_PENDING BIT(3) +#define GADGET_WAKEUP_PENDING BIT(4) + +#define LINK_TOGGLE BIT(1) + +#define XDEV_U0 (0x0 << 5) +#define XDEV_U1 (0x1 << 5) +#define XDEV_U2 (0x2 << 5) +#define XDEV_U3 (0x3 << 5) +#define XDEV_DISABLED (0x4 << 5) +#define XDEV_RXDETECT (0x5 << 5) +#define XDEV_INACTIVE (0x6 << 5) +#define XDEV_POLLING (0x7 << 5) +#define XDEV_RECOVERY (0x8 << 5) +#define XDEV_HOT_RESET (0x9 << 5) +#define XDEV_COMP_MODE (0xa << 5) +#define XDEV_TEST_MODE (0xb << 5) +#define XDEV_RESUME (0xf << 5) + +#define PORT_CONNECT BIT(0) +#define PORT_PED BIT(1) +#define PORT_RESET BIT(4) +#define PORT_LINK_STROBE BIT(16) +#define PORT_CSC BIT(17) +#define PORT_WRC BIT(19) +#define PORT_RC BIT(21) +#define PORT_PLC BIT(22) +#define PORT_CEC BIT(23) +#define PORT_WKCONN_E BIT(25) +#define PORT_WKDISC_E BIT(26) +#define PORT_WR BIT(31) +#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC) +#define PORT_PLS_MASK GENMASK(8, 5) +#define DEV_SPEED_MASK GENMASK(13, 10) +#define XDEV_FS (0x1 << 10) +#define XDEV_HS (0x3 << 10) +#define XDEV_SS (0x4 << 10) +#define XDEV_SSP (0x5 << 10) +#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10)) +#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) +#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) +#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) +#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) +#define DEV_PORT_SPEED(p) (((p) >> 10) & 0xf) +#define DEV_PORT(p) (((p) & 0xff) << 16) +#define DEV_ADDR_MASK GENMASK(7, 0) + +#define GADGET_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK) +#define GADGET_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E) +#define PORT_RWE BIT(3) +#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4)) +#define PORT_HLE BIT(16) +#define RRBESL(p) (((p) & GENMASK(20, 17)) >> 17) +#define PORT_L1S_MASK GENMASK(2, 0) +#define PORT_L1S(p) ((p) & PORT_L1S_MASK) +#define PORT_L1S_ACK PORT_L1S(1) +#define PORT_L1S_NYET PORT_L1S(2) +#define PORT_L1S_STALL PORT_L1S(3) +#define PORT_L1S_TIMEOUT PORT_L1S(4) +#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0) +#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK) +#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8) +#define PORT_U2_TIMEOUT(p) ((p) & PORT_U2_TIMEOUT_MASK) + +#define PORT_TEST_MODE_MASK GENMASK(31, 28) +#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK) + +#define IMAN_IP BIT(0) +#define IMAN_IE BIT(1) +#define IMAN_IE_SET(p) ((p) | IMAN_IE) +#define IMAN_IE_CLEAR(p) ((p) & ~IMAN_IE) + +#define CMD_R_S BIT(0) +#define CMD_RESET BIT(1) +#define CMD_INTE BIT(2) +#define CMD_DSEIE BIT(3) +#define CMD_EWE BIT(10) +#define CMD_DEVEN BIT(17) +#define GADGET_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE) + + +#define STS_HALT BIT(0) +#define STS_FATAL BIT(2) +#define STS_EINT BIT(3) +#define STS_PCD BIT(4) +#define STS_SSS BIT(8) +#define STS_RSS BIT(9) +#define STS_SRE BIT(10) +#define STS_CNR BIT(11) +#define STS_HCE BIT(12) + +#define CMD_RING_RSVD_BITS GENMASK(5, 0) +#define DBOFF_MASK GENMASK(31, 2) + + +#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16)) +#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) +#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24)) +#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f) +#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16)) + +#define TRB_TYPE_BITMASK GENMASK(15, 10) +#define TRB_TYPE(p) ((p) << 10) +#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10) + +#define TRB_NORMAL 1 +#define TRB_SETUP 2 +#define TRB_DATA 3 +#define TRB_STATUS 4 +#define TRB_ISOC 5 +#define TRB_LINK 6 +#define TRB_EVENT_DATA 7 +#define TRB_TR_NOOP 8 +#define TRB_ENABLE_SLOT 9 +#define TRB_DISABLE_SLOT 10 +#define TRB_ADDR_DEV 11 +#define TRB_CONFIG_EP 12 +#define TRB_EVAL_CONTEXT 13 +#define TRB_RESET_EP 14 +#define TRB_STOP_RING 15 +#define TRB_SET_DEQ 16 +#define TRB_RESET_DEV 17 +#define TRB_FORCE_EVENT 18 +#define TRB_FORCE_HEADER 22 +#define TRB_CMD_NOOP 23 +#define TRB_TRANSFER 32 +#define TRB_COMPLETION 33 +#define TRB_PORT_STATUS 34 +#define TRB_HC_EVENT 37 +#define TRB_MFINDEX_WRAP 39 +#define TRB_ENDPOINT_NRDY 48 +#define TRB_HALT_ENDPOINT 54 +#define TRB_HALT_OVERFLOW 57 +#define TRB_FLUSH_ENDPOINT 58 + +#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17) +#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17) +#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17) +#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22)) +#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22) +#define TRB_TBC(p) (((p) & 0x3) << 7) +#define TRB_TLBPC(p) (((p) & 0xf) << 16) + +#define TRB_LEN(p) ((p) & GENMASK(16, 0)) +#define TRB_CYCLE BIT(0) +#define TRB_ENT BIT(1) +#define TRB_ISP BIT(2) +#define TRB_NO_SNOOP BIT(3) +#define TRB_CHAIN BIT(4) +#define TRB_IOC BIT(5) +#define TRB_IDT BIT(6) +#define TRB_STAT BIT(7) +#define TRB_BEI BIT(9) +#define TRB_DIR_IN BIT(16) + +#define TRB_CONFIG_EP 12 + +#define TRBS_PER_SEGMENT 256 +#define TRBS_PER_EV_DEQ_UPDATE 100 +#define TRBS_PER_EVENT_SEGMENT 256 +#define TRB_MAX_BUFF_SHIFT 16 +#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT) +#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \ + ((addr) & (TRB_MAX_BUFF_SIZE - 1))) + +#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) +#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_TR_NOOP))) + +#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16) + +#define ERST_EHB BIT(3) +#define ERST_PTR_MASK GENMASK(3, 0) + +#define LAST_CTX_MASK ((unsigned int)GENMASK(31, 27)) +#define LAST_CTX(p) ((p) << 27) +#define SLOT_FLAG BIT(0) +#define EP0_FLAG BIT(1) + +#define EP_TYPE(p) ((p) << 3) + +#define ISOC_OUT_EP 1 +#define BULK_OUT_EP 2 +#define INT_OUT_EP 3 +#define CTRL_EP 4 +#define ISOC_IN_EP 5 +#define BULK_IN_EP 6 +#define INT_IN_EP 7 + +#define EP_ENABLED BIT(0) +#define EP_DIS_IN_PROGRESS BIT(1) +#define EP_HALTED BIT(2) +#define EP_STOPPED BIT(3) +#define EP_WEDGE BIT(4) +#define EP_HALTED_STATUS BIT(5) +#define EP_HAS_STREAMS BIT(6) +#define EP_UNCONFIGURED BIT(7) + +#define EP_STATE_MASK GENMASK(3, 0) +#define EP_STATE_DISABLED 0 +#define EP_STATE_RUNNING 1 +#define EP_STATE_HALTED 2 +#define EP_STATE_STOPPED 3 +#define EP_STATE_ERROR 4 +#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK) + + +#define GADGET_CMD_TIMEOUT (15 * 1000) + +#define CMD_RING_BUSY(p) ((p) & BIT(4)) +#define CMD_RING_RUNNING BIT(3) +#define CMD_RING_RSVD_BITS GENMASK(5, 0) + +#define SLOT_STATE GENMASK(31, 27) +#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27) +#define SLOT_STATE_DISABLED 0 +#define SLOT_STATE_ENABLED 0 //czh test +#define SLOT_STATE_DEFAULT 1 +#define SLOT_STATE_ADDRESSED 2 +#define SLOT_STATE_CONFIGURED 3 + +#define EP0_HALTED_STATUS BIT(5) + +enum gadget_setup_dev { + SETUP_CONTEXT_ONLY, + SETUP_CONTEXT_ADDRESS, +}; + +enum gadget_ep0_stage { + GADGET_SETUP_STAGE, + GADGET_DATA_STAGE, + GADGET_STATUS_STAGE, +}; + +struct gadget_slot_ctx { + __le32 dev_info; + __le32 dev_port; + __le32 int_target; + __le32 dev_state; + __le32 reserved[4]; +}; + +struct gadget_input_control_ctx { + __le32 drop_flags; + __le32 add_flags; + __le32 rsvd2[6]; +}; + +struct gadget_link_trb { + __le64 segment_ptr; + __le32 intr_target; + __le32 control; +}; + +struct gadget_event_cmd { + __le64 cmd_trb; + __le32 status; + __le32 flags; +}; + +struct gadget_transfer_event { + __le64 buffer; + __le32 transfer_len; + __le32 flags; +}; + +struct gadget_generic_trb { + __le32 field[4]; +}; + +enum gadget_ring_type { + TYPE_CTRL = 0, + TYPE_ISOC, + TYPE_BULK, + TYPE_INTR, + TYPE_STREAM, + TYPE_COMMAND, + TYPE_EVENT, +}; + +struct phytium_device_intr_reg { + __le32 irq_pending; + __le32 irq_control; + __le32 erst_size; + __le32 rsvd; + __le64 erst_base; + __le64 erst_dequeue; +}; + +struct phytium_device_cap_regs { + __le32 hc_capbase; + __le32 hcs_params1; + __le32 hcs_params2; + __le32 hcs_params3; + __le32 hcc_params; + __le32 db_off; + __le32 run_regs_off; + __le32 hcc_params2; +}; + +struct phytium_device_op_regs { + __le32 command; + __le32 status; + __le32 page_size; + __le32 reserved1; + __le32 reserved2; + __le32 dnctrl; + __le64 cmd_ring; + __le32 reserved3[4]; + __le64 dcbaa_ptr; + __le32 config_reg; + __le32 reserved4[241]; + __le32 port_reg_base; +}; + +struct phytium_device_run_regs { + __le32 microframe_index; + __le32 rsvd[7]; + struct phytium_device_intr_reg ir_set[128]; +}; + +struct phytium_device_rev_cap { + __le32 ext_cap; + __le32 rtl_revision; + __le32 rx_buff_size; + __le32 tx_buff_size; + __le32 ep_supported; + __le32 ctrl_revision; +}; + +struct gadget_context_array { + __le64 dev_context_ptrs[GADGET_DEV_MAX_SLOTS + 1]; + dma_addr_t dma; +}; + +union gadget_trb { + struct gadget_link_trb link; + struct gadget_transfer_event trans_event; + struct gadget_event_cmd event_cmd; + struct gadget_generic_trb generic; +}; + +struct gadget_segment { + union gadget_trb *trbs; + struct gadget_segment *next; + dma_addr_t dma; + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; +}; + +struct gadget_ring { + struct gadget_segment *first_seg; + struct gadget_segment *last_seg; + union gadget_trb *enqueue; + struct gadget_segment *enq_seg; + union gadget_trb *dequeue; + struct gadget_segment *deq_seg; + struct list_head td_list; + u32 cycle_state; + unsigned int steam_id; + unsigned int stream_active; + unsigned int stream_rejected; + int num_tds; + unsigned int num_segs; + unsigned int num_trbs_free; + unsigned int bounce_buf_len; + enum gadget_ring_type type; + bool last_td_was_short; + struct radix_tree_root *trb_address_map; +}; + +struct phytium_device_doorbell_array { + __le32 cmd_db; + __le32 ep_db; +}; + +struct gadget_erst_entry { + __le64 seg_addr; + __le32 seg_size; + __le32 rsvd; +}; + +struct gadget_erst { + struct gadget_erst_entry *entries; + unsigned int num_entries; + dma_addr_t erst_dma_addr; +}; + +struct phytium_device_20port_cap { + __le32 ext_cap; + __le32 port_regs1; + __le32 port_regs2; + __le32 port_regs3; + __le32 port_regs4; + __le32 port_regs5; + __le32 port_regs6; +}; + +struct phytium_device_3xport_cap { + __le32 ext_cap; + __le32 mode_addr; + __le32 reserved[52]; + __le32 mode_2; +}; + +struct gadget_port_regs { + __le32 portsc; + __le32 portpmsc; + __le32 portli; + __le32 reserved; +}; + +struct gadget_port { + struct gadget_port_regs __iomem *regs; + u8 port_num; + u8 exist; + u8 maj_rev; + u8 min_rev; +}; + +struct gadget_container_ctx { + unsigned int type; + int size; + int ctx_size; + dma_addr_t dma; + u8 *bytes; +}; + +struct gadget_ep_ctx { + __le32 ep_info; + __le32 ep_info2; + __le64 deq; + __le32 tx_info; + __le32 reserved[3]; +}; + +struct gadget_stream_ctx { + __le64 stream_ring; + __le32 reserved[2]; +}; + +struct gadget_stream_info { + struct gadget_ring **stream_rings; + unsigned int num_streams; + struct gadget_stream_ctx *stream_ctx_array; + unsigned int num_stream_ctxs; + dma_addr_t ctx_array_dma; + struct radix_tree_root trb_address_map; + int td_count; + u8 first_prime_det; + u8 drbls_count; +}; + +struct gadget_ep { + struct usb_ep endpoint; + struct list_head pending_list; + struct phytium_device *pdev; + u8 number; + u8 idx; + u32 interval; + char name[20]; + u8 direction; + u8 buffering; + u8 buffering_period; + struct gadget_ep_ctx *in_ctx; + struct gadget_ep_ctx *out_ctx; + struct gadget_ring *ring; + struct gadget_stream_info stream_info; + unsigned int ep_state; + bool skip; +}; + +struct gadget_command { + struct gadget_container_ctx *in_ctx; + u32 status; + union gadget_trb *command_trb; +}; + +struct gadget_td { + struct list_head td_list; + struct gadget_request *preq; + struct gadget_segment *start_seg; + union gadget_trb *first_trb; + union gadget_trb *last_trb; + struct gadget_segment *bounce_seg; + bool request_length_set; + bool drbl; +}; + +struct gadget_dequeue_state { + struct gadget_segment *new_deq_seg; + union gadget_trb *new_deq_ptr; + int new_cycle_state; + unsigned int stream_id; +}; + +struct gadget_request { + struct gadget_td td; + struct usb_request request; + struct list_head list; + struct gadget_ep *pep; + u8 epnum; + unsigned direction:1; +}; + +struct phytium_device { + struct device *dev; + struct usb_gadget gadget; + struct usb_gadget_driver *gadget_driver; + void __iomem *regs; + void *setup_buf; + u8 setup_id; + u8 setup_speed; + struct usb_ctrlrequest setup; + + struct phytium_device_cap_regs __iomem *cap_regs; + struct phytium_device_op_regs __iomem *op_regs; + struct phytium_device_run_regs __iomem *run_regs; + struct phytium_device_rev_cap __iomem *rev_cap; + struct phytium_device_doorbell_array __iomem *dba; + struct phytium_device_intr_reg __iomem *ir_set; + struct phytium_device_20port_cap __iomem *port20_regs; + struct phytium_device_3xport_cap __iomem *port3x_regs; + + __u32 hcs_params1; + __u32 hcs_params3; + __u32 hcc_params; + + u8 ep0_expect_in; + u8 device_address; + + int may_wakeup; + u16 hci_version; + unsigned int gadget_state; + unsigned int link_state; + + spinlock_t lock; + + struct gadget_context_array *dcbaa; + struct gadget_ring *cmd_ring; + struct gadget_command cmd; + struct gadget_ring *event_ring; + struct gadget_erst erst; + int slot_id; + + struct gadget_container_ctx out_ctx; + struct gadget_container_ctx in_ctx; + struct gadget_ep eps[GADGET_ENDPOINTS_NUM]; + + struct dma_pool *device_pool; + struct dma_pool *segment_pool; + + struct gadget_port usb2_port; + struct gadget_port usb3_port; + struct gadget_port *active_port; + u16 test_mode; + + struct gadget_request ep0_preq; + enum gadget_ep0_stage ep0_stage; + u8 three_stage_setup; + + u8 u1_allowed:1; + u8 u2_allowed:1; + u8 usb2_hw_lpm_capable:1; +}; + +static inline struct gadget_request *next_request(struct list_head *list) +{ + return list_first_entry_or_null(list, struct gadget_request, list); +} + + +int phytium_usb_gadget_init(void *data); + +int phytium_gadget_find_next_ext_cap(void __iomem *base, u32 start, int id); + +int gadget_reset(struct phytium_device *pdev); + +int gadget_endpoint_init(struct phytium_device *pdev, + struct gadget_ep *pep, gfp_t mem_flags); + +int gadget_alloc_streams(struct phytium_device *pdev, struct gadget_ep *pep); + +struct gadget_input_control_ctx + *gadget_get_input_control_ctx(struct gadget_container_ctx *ctx); + +void gadget_free_endpoint_rings(struct phytium_device *pdev, + struct gadget_ep *pep); + +struct gadget_slot_ctx *gadget_get_slot_ctx(struct gadget_container_ctx *ctx); + +//gadget +u32 gadget_port_state_to_neutral(u32 state); +int gadget_wait_for_cmd_compl(struct phytium_device *pdev); +void disconnect_gadget(struct phytium_device *pdev); +bool gadget_last_trb_on_seg(struct gadget_segment *seg, union gadget_trb *trb); +bool gadget_last_trb_on_ring(struct gadget_ring *ring, struct gadget_segment *seg, + union gadget_trb *trb); +int gadget_halt(struct phytium_device *pdev); +int gadget_disable_slot(struct phytium_device *pdev); +int gadget_enable_slot(struct phytium_device *pdev); +void gadget_set_usb2_hardware_lpm(struct phytium_device *pdev, + struct usb_request *req, int enable); +unsigned int gadget_port_speed(unsigned int port_status); +void resume_gadget(struct phytium_device *pdev); +void suspend_gadget(struct phytium_device *pdev); +void gadget_irq_reset(struct phytium_device *pdev); +int gadget_ep_enqueue(struct gadget_ep *pep, struct gadget_request *preq); +void gadget_died(struct phytium_device *pdev); +int gadget_halt_endpoint(struct phytium_device *pdev, struct gadget_ep *pep, int value); +int gadget_reset_device(struct phytium_device *pdev); +int gadget_setup_device(struct phytium_device *pdev, enum gadget_setup_dev setup); +int ep_dequeue(struct gadget_ep *pep, struct gadget_request *preq); +void gadget_update_erst_dequeue(struct phytium_device *pdev, union gadget_trb *event_ring_deq, + u8 clear_ehb); + +//mem +int gadget_ring_expansion(struct phytium_device *pdev, struct gadget_ring *ring, + unsigned int num_trbs, gfp_t flags); + +struct gadget_ep_ctx *gadget_get_ep_ctx(struct gadget_container_ctx *ctx, + unsigned int ep_index); + +void gadget_endpoint_zero(struct phytium_device *pdev, struct gadget_ep *pep); +struct gadget_ring *gadget_dma_to_transfer_ring(struct gadget_ep *pep, u64 address); + + + +//ring +void gadget_ring_cmd_db(struct phytium_device *pdev); +int gadget_cmd_stop_ep(struct phytium_device *pdev, struct gadget_ep *pep); +int gadget_cmd_flush_ep(struct phytium_device *pdev, struct gadget_ep *pep); +int gadget_remove_request(struct phytium_device *pdev, struct gadget_request *preq, + struct gadget_ep *pep); +void gadget_queue_slot_control(struct phytium_device *pdev, u32 trb_type); +void gadget_inc_deq(struct phytium_device *pdev, struct gadget_ring *ring); +void gadget_queue_reset_ep(struct phytium_device *pdev, unsigned int ep_index); +void gadget_queue_halt_endpoint(struct phytium_device *pdev, unsigned int ep_index); +void gadget_queue_reset_device(struct phytium_device *pdev); +void gadget_set_link_state(struct phytium_device *pdev, + __le32 __iomem *port_regs, u32 link_state); + +#endif diff --git a/drivers/usb/phytium_usb_v2/host.c b/drivers/usb/phytium_usb_v2/host.c new file mode 100644 index 0000000000000000000000000000000000000000..28151f1faf542f125dc2aacc9cbb7293f8f8d43c --- /dev/null +++ b/drivers/usb/phytium_usb_v2/host.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include + +#include "host.h" +#include "core.h" +#include "otg.h" +#include "../host/xhci.h" +#include "../host/xhci-plat.h" + +#define HOST_RESOURCE_NUM 2 +#define XECP_AUX_CTRL_REG1 0x8120 +#define XECP_PORT_CAP_REG 0x8000 +#define CFG_RXDET_P3_EN BIT(15) +#define LPM_2_STB_SWITCH_EN BIT(25) +#define PM_RUNTIME_ALLOW BIT(0) + +static void plat_phytium_start(struct usb_hcd *hcd) +{ + struct xhci_hcd *host = hcd_to_xhci(hcd); + u32 value; + + if (host) { + value = readl(&host->op_regs->command); + value |= CMD_PM_INDEX; + writel(value, &host->op_regs->command); + } +} + +static int plat_phytium_resume_quirk(struct usb_hcd *hcd) +{ + plat_phytium_start(hcd); + + return 0; +} + +static const struct xhci_plat_priv plat_phytium_usb = { + .plat_start = plat_phytium_start, + .resume_quirk = plat_phytium_resume_quirk, +}; + +static int host_start(void *data) +{ + struct phytium_usb *phytium_usb; + struct platform_device *host; + struct usb_hcd *hcd; + int ret; + + phytium_usb = (struct phytium_usb *)data; + if (!phytium_usb) + return 0; + + phytium_usb_otg_host_on((void *)phytium_usb); + + host = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO); + if (!host) { + dev_err(phytium_usb->dev, "platform_device_alloc failed\n"); + return -ENOMEM; + } + + host->dev.parent = phytium_usb->dev; + phytium_usb->host_dev = host; + + ret = platform_device_add_resources(host, phytium_usb->host_res, + HOST_RESOURCE_NUM); + if (ret) { + dev_err(phytium_usb->dev, "platform_device_add_resources failed\n"); + goto err; + } + + phytium_usb->host_plat_data = kmemdup(&plat_phytium_usb, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (!phytium_usb->host_plat_data) { + ret = -ENOMEM; + goto err; + } + + + phytium_usb->host_plat_data->quirks |= XHCI_RESET_ON_RESUME; + ret = platform_device_add_data(host, phytium_usb->host_plat_data, + sizeof(struct xhci_plat_priv)); + if (ret) + goto free_memory; + + ret = platform_device_add(host); + if (ret) { + dev_err(phytium_usb->dev, "register host failed\n"); + goto free_memory; + } + + hcd = platform_get_drvdata(host); + if (hcd) + phytium_usb->host_regs = hcd->regs; + + return 0; + +free_memory: + kfree(phytium_usb->host_plat_data); + +err: + platform_device_put(host); + + return ret; +} + +static void host_stop(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + phytium_usb_otg_host_off((void *)phytium_usb); + kfree(phytium_usb->host_plat_data); + platform_device_unregister(phytium_usb->host_dev); + phytium_usb->host_dev = NULL; + } +} + +int phytium_usb_host_init(void *data) +{ + struct phytium_usb_role_driver *role_driver; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + role_driver = devm_kzalloc(phytium_usb->dev, sizeof(*phytium_usb), GFP_KERNEL); + if (!role_driver) + return -ENOMEM; + + role_driver->start = host_start; + role_driver->stop = host_stop; + role_driver->state = ROLE_STATE_INACTIVE; + role_driver->name = "host"; + phytium_usb->roles[USB_ROLE_HOST] = role_driver; + } + return 0; +} + +MODULE_AUTHOR("Zhenhua Chen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium USB2 DRD Controller Driver"); diff --git a/drivers/usb/phytium_usb_v2/host.h b/drivers/usb/phytium_usb_v2/host.h new file mode 100644 index 0000000000000000000000000000000000000000..87de49ae90f95c7e0288b8007c43d7e5ac07bad9 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/host.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_HOST_H__ +#define __LINUX_PHYTIUM_USB_HOST_H__ + +int phytium_usb_host_init(void *data); +#endif diff --git a/drivers/usb/phytium_usb_v2/mem.c b/drivers/usb/phytium_usb_v2/mem.c new file mode 100644 index 0000000000000000000000000000000000000000..6fb43dd6a7afb056e8731c247c4a667237bad941 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/mem.c @@ -0,0 +1,1040 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include +#include "mem.h" +#include "gadget.h" +#include "ring.h" + +#define NUM_PORT_REGS 4 + +#define GADGET_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define GADGET_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff) +#define GADGET_EXT_PORT_OFF(x) ((x) & 0xff) +#define GADGET_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2)) +#define CTX_SIZE(hcc) (HCC_64BYTE_CONTEXT(hcc) ? 64 : 32) + +#define GADGET_CTX_TYPE_DEVICE 0x1 +#define GADGET_CTX_TYPE_INPUT 0x2 + +#define EXT_CAPS_PROTOCOL 2 +#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1 +#define D_XEC_CFG_3XPORT_CAP 0xC0 + +#define MAX_DEVS GENMASK(7, 0) +#define CONFIG_U3E BIT(8) + +#define ERST_SIZE_MASK GENMASK(31, 16) +#define ERST_NUM_SEGS 1 +#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16) +#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE)) + + +#define ERROR_COUNT(p) (((p) & 0x3) << 1) + +#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8)) +#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16)) + +#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16)) +#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8)) + +#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0)) + +#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16)) +#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24) + +#define GADGET_CTX_TYPE_DEVICE 0x1 +#define GADGET_CTX_TYPE_INPTU 0x2 + +#define SLOT_SPEED_FS (XDEV_FS << 10) +#define SLOT_SPEED_HS (XDEV_HS << 10) +#define SLOT_SPEED_SS (XDEV_SS << 10) +#define SLOT_SPEED_SSP (XDEV_SSP << 10) + +struct gadget_ep_ctx *gadget_get_ep_ctx(struct gadget_container_ctx *ctx, + unsigned int ep_index) +{ + ep_index++; + if (ctx->type == GADGET_CTX_TYPE_INPUT) + ep_index++; + + return (struct gadget_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size)); +} + +struct gadget_slot_ctx *gadget_get_slot_ctx(struct gadget_container_ctx *ctx) +{ + if (ctx->type == GADGET_CTX_TYPE_DEVICE) + return (struct gadget_slot_ctx *)ctx->bytes; + + return (struct gadget_slot_ctx *)(ctx->bytes + ctx->ctx_size); +} + + struct gadget_input_control_ctx + *gadget_get_input_control_ctx(struct gadget_container_ctx *ctx) +{ + if (ctx->type != GADGET_CTX_TYPE_INPUT) + return NULL; + + return (struct gadget_input_control_ctx *)ctx->bytes; +} + +static void gadget_link_segments(struct phytium_device *pdev, + struct gadget_segment *prev, struct gadget_segment *next, + enum gadget_ring_type type) +{ + struct gadget_link_trb *link; + u32 val; + + if (!prev || !next) + return; + + prev->next = next; + if (type != TYPE_EVENT) { + link = &prev->trbs[TRBS_PER_SEGMENT - 1].link; + link->segment_ptr = cpu_to_le64(next->dma); + + val = le32_to_cpu(link->control); + val &= ~TRB_TYPE_BITMASK; + val |= TRB_TYPE(TRB_LINK); + link->control = cpu_to_le32(val); + } +} + +static void gadget_segment_free(struct phytium_device *pdev, + struct gadget_segment *seg) +{ + if (seg->trbs) + dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma); + + kfree(seg->bounce_buf); + kfree(seg); +} + +static void gadget_free_segments_for_ring(struct phytium_device *pdev, + struct gadget_segment *first) +{ + struct gadget_segment *seg; + + seg = first->next; + + while (seg != first) { + struct gadget_segment *next = seg->next; + + gadget_segment_free(pdev, seg); + seg = next; + } + + gadget_segment_free(pdev, first); +} + +static struct gadget_segment *gadget_segment_alloc(struct phytium_device *pdev, + unsigned int cycle_state, unsigned int max_packet, gfp_t flags) +{ + struct gadget_segment *seg; + dma_addr_t dma; + int i; + + seg = kzalloc(sizeof(struct gadget_segment), flags); + if (!seg) + return NULL; + + seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma); + if (!seg->trbs) { + kfree(seg); + return NULL; + } + + if (max_packet) { + seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); + if (!seg->bounce_buf) + goto free_dma; + } + + if (cycle_state == 0) { + for (i = 0; i < TRBS_PER_SEGMENT; i++) + seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); + } + + seg->dma = dma; + seg->next = NULL; + + return seg; + +free_dma: + dma_pool_free(pdev->segment_pool, seg->trbs, dma); + kfree(seg); + + return NULL; +} + +static int gadget_alloc_segments_for_ring(struct phytium_device *pdev, + struct gadget_segment **first, struct gadget_segment **last, + unsigned int num_segs, unsigned int cycle_state, + enum gadget_ring_type type, unsigned int max_packet, + gfp_t flags) +{ + struct gadget_segment *prev; + + prev = gadget_segment_alloc(pdev, cycle_state, max_packet, flags); + if (!prev) + return -ENOMEM; + + num_segs--; + *first = prev; + + while (num_segs > 0) { + struct gadget_segment *next; + + next = gadget_segment_alloc(pdev, cycle_state, + max_packet, flags); + if (!next) { + gadget_free_segments_for_ring(pdev, *first); + return -ENOMEM; + } + + gadget_link_segments(pdev, prev, next, type); + + prev = next; + num_segs--; + } + + gadget_link_segments(pdev, prev, *first, type); + *last = prev; + + return 0; +} + +void gadget_initialize_ring_info(struct gadget_ring *ring) +{ + ring->enqueue = ring->first_seg->trbs; + ring->enq_seg = ring->first_seg; + ring->dequeue = ring->enqueue; + ring->deq_seg = ring->first_seg; + + ring->cycle_state = 1; + ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; +} + +static struct gadget_ring *gadget_ring_alloc(struct phytium_device *pdev, + unsigned int num_segs, enum gadget_ring_type type, + unsigned int max_packet, gfp_t flags) +{ + struct gadget_ring *ring; + int ret; + + ring = kzalloc(sizeof(struct gadget_ring), flags); + if (!ring) + return NULL; + + ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; + INIT_LIST_HEAD(&ring->td_list); + ring->type = type; + + if (num_segs == 0) + return ring; + + ret = gadget_alloc_segments_for_ring(pdev, &ring->first_seg, + &ring->last_seg, num_segs, 1, type, + max_packet, flags); + if (ret) + goto fail; + + if (type != TYPE_EVENT) + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= + cpu_to_le32(LINK_TOGGLE); + + gadget_initialize_ring_info(ring); + + return ring; + +fail: + kfree(ring); + + return NULL; +} + +static int gadget_insert_segment_mapping(struct radix_tree_root *addr_map, + struct gadget_ring *ring, struct gadget_segment *seg, gfp_t mem_flags) +{ + unsigned long key; + int ret; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + + if (radix_tree_lookup(addr_map, key)) + return 0; + + ret = radix_tree_maybe_preload(mem_flags); + if (ret) + return ret; + + ret = radix_tree_insert(addr_map, key, ring); + radix_tree_preload_end(); + + return ret; +} + +static void gadget_remove_segment_mapping(struct radix_tree_root *addr_map, + struct gadget_segment *seg) +{ + unsigned long key; + + key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); + if (radix_tree_lookup(addr_map, key)) + radix_tree_delete(addr_map, key); +} + +static void gadget_remove_stream_mapping(struct gadget_ring *ring) +{ + struct gadget_segment *seg; + + seg = ring->first_seg; + do { + gadget_remove_segment_mapping(ring->trb_address_map, seg); + seg = seg->next; + } while (seg != ring->first_seg); +} + +static void gadget_ring_free(struct phytium_device *pdev, + struct gadget_ring *ring) +{ + if (!ring) + return; + + if (ring->first_seg) { + if (ring->type == TYPE_STREAM) + gadget_remove_stream_mapping(ring); + + gadget_free_segments_for_ring(pdev, ring->first_seg); + } + + kfree(ring); +} + +static void gadget_free_stream_ctx(struct phytium_device *pdev, + struct gadget_ep *pep) +{ + dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array, + pep->stream_info.ctx_array_dma); +} + +static void gadget_free_stream_info(struct phytium_device *pdev, + struct gadget_ep *pep) +{ + struct gadget_stream_info *stream_info = &pep->stream_info; + struct gadget_ring *cur_ring; + int cur_stream; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + for (cur_stream = 1; cur_stream < stream_info->num_streams; + cur_stream++) { + cur_ring = stream_info->stream_rings[cur_stream]; + if (cur_ring) { + gadget_ring_free(pdev, cur_ring); + stream_info->stream_rings[cur_stream] = NULL; + } + } + + if (stream_info->stream_ctx_array) + gadget_free_stream_ctx(pdev, pep); + + kfree(stream_info->stream_rings); + pep->ep_state &= ~EP_HAS_STREAMS; +} + +void gadget_free_endpoint_rings(struct phytium_device *pdev, + struct gadget_ep *pep) +{ + gadget_ring_free(pdev, pep->ring); + pep->ring = NULL; + gadget_free_stream_info(pdev, pep); +} + +static int gadget_alloc_erst(struct phytium_device *pdev, + struct gadget_ring *evt_ring, + struct gadget_erst *erst) +{ + struct gadget_erst_entry *entry; + struct gadget_segment *seg; + unsigned int val; + size_t size; + + size = sizeof(struct gadget_erst_entry) * evt_ring->num_segs; + erst->entries = dma_alloc_coherent(pdev->dev, size, + &erst->erst_dma_addr, GFP_KERNEL); + if (!erst->entries) + return -ENOMEM; + + erst->num_entries = evt_ring->num_segs; + seg = evt_ring->first_seg; + for (val = 0; val < evt_ring->num_segs; val++) { + entry = &erst->entries[val]; + entry->seg_addr = cpu_to_le64(seg->dma); + entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); + entry->rsvd = 0; + seg = seg->next; + } + + return 0; +} + +static void gadget_free_erst(struct phytium_device *pdev, + struct gadget_erst *erst) +{ + size_t size = sizeof(struct gadget_erst_entry) * (erst->num_entries); + struct device *dev = pdev->dev; + + if (erst->entries) + dma_free_coherent(dev, size, erst->entries, + erst->erst_dma_addr); + + erst->entries = NULL; +} + +static void gadget_set_event_deq(struct phytium_device *pdev) +{ + dma_addr_t deq; + u64 temp; + + deq = gadget_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue); + + temp = lo_hi_readq(&pdev->ir_set->erst_dequeue); + temp &= ERST_PTR_MASK; + temp &= ~ERST_EHB; + lo_hi_writeq(((u64)deq & (u64)~ERST_PTR_MASK) | temp, + &pdev->ir_set->erst_dequeue); +} + +static void gadget_add_in_port(struct phytium_device *pdev, + struct gadget_port *port, __le32 __iomem *addr) +{ + u32 temp, port_offset, port_count; + + temp = readl(addr); + port->maj_rev = GADGET_EXT_PORT_MAJOR(temp); + port->min_rev = GADGET_EXT_PORT_MINOR(temp); + + temp = readl(addr + 2); + port_offset = GADGET_EXT_PORT_OFF(temp); + port_count = GADGET_EXT_PORT_COUNT(temp); + + port->port_num = port_offset; + port->exist = 1; +} + +static int gadget_setup_port_arrays(struct phytium_device *pdev) +{ + void __iomem *base; + u32 offset; + int i; + + base = &pdev->cap_regs->hc_capbase; + offset = phytium_gadget_find_next_ext_cap(base, 0, + EXT_CAP_CFG_DEV_20PORT_CAP_ID); + pdev->port20_regs = base + offset; + + offset = phytium_gadget_find_next_ext_cap(base, 0, + D_XEC_CFG_3XPORT_CAP); + pdev->port3x_regs = base + offset; + + offset = 0; + base = &pdev->cap_regs->hc_capbase; + + for (i = 0; i < 2; i++) { + u32 temp; + + offset = phytium_gadget_find_next_ext_cap(base, offset, + EXT_CAPS_PROTOCOL); + temp = readl(base + offset); + + if (GADGET_EXT_PORT_MAJOR(temp) == 0x03 && + !pdev->usb3_port.port_num) + gadget_add_in_port(pdev, &pdev->usb3_port, + base + offset); + + if (GADGET_EXT_PORT_MAJOR(temp) == 0x02 && + !pdev->usb2_port.port_num) + gadget_add_in_port(pdev, &pdev->usb2_port, + base + offset); + } + + pdev->usb2_port.regs = (struct gadget_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * (pdev->usb2_port.port_num - 1)); + + pdev->usb3_port.regs = (struct gadget_port_regs __iomem *) + (&pdev->op_regs->port_reg_base + NUM_PORT_REGS * (pdev->usb3_port.port_num - 1)); + + return 0; +} + +static int gadget_init_device_ctx(struct phytium_device *pdev) +{ + int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024; + + pdev->out_ctx.type = GADGET_CTX_TYPE_DEVICE; + pdev->out_ctx.size = size; + pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params); + pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->out_ctx.dma); + if (!pdev->out_ctx.bytes) + return -ENOMEM; + + pdev->in_ctx.type = GADGET_CTX_TYPE_INPUT; + pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size; + pdev->in_ctx.size = size + pdev->out_ctx.ctx_size; + pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC, + &pdev->in_ctx.dma); + + if (!pdev->in_ctx.bytes) { + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + return -ENOMEM; + } + + return 0; +} + +static int gadget_alloc_priv_device(struct phytium_device *pdev) +{ + int ret; + + ret = gadget_init_device_ctx(pdev); + if (ret) + return ret; + + pdev->eps[0].ring = gadget_ring_alloc(pdev, 2, TYPE_CTRL, 0, + GFP_ATOMIC); + if (!pdev->eps[0].ring) + goto fail; + + pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma); + pdev->cmd.in_ctx = &pdev->in_ctx; + + return 0; + +fail: + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, + pdev->out_ctx.dma); + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, + pdev->in_ctx.dma); + + return ret; +} + +int gadget_mem_init(void *data) +{ + unsigned int val; + u32 page_size; + dma_addr_t dma; + int ret = -ENOMEM; + u64 val_64; + struct phytium_device *pdev = (struct phytium_device *)data; + + if (!pdev) + return 0; + + page_size = 1 << 12; + + val = readl(&pdev->op_regs->config_reg); + val |= ((val & ~MAX_DEVS) | GADGET_DEV_MAX_SLOTS) | CONFIG_U3E; + writel(val, &pdev->op_regs->config_reg); + + pdev->dcbaa = dma_alloc_coherent(pdev->dev, sizeof(*pdev->dcbaa), + &dma, GFP_KERNEL); + if (!pdev->dcbaa) + return -ENOMEM; + + pdev->dcbaa->dma = dma; + lo_hi_writeq(dma, &pdev->op_regs->dcbaa_ptr); + + pdev->segment_pool = dma_pool_create("gadget input/output contexts", + pdev->dev, TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, + page_size); + if (!pdev->segment_pool) + goto release_dcbaa; + + pdev->device_pool = dma_pool_create("gadget input/output contexts", + pdev->dev, GADGET_CTX_SIZE, 64, page_size); + if (!pdev->device_pool) + goto destroy_segment_pool; + + pdev->cmd_ring = gadget_ring_alloc(pdev, 1, TYPE_COMMAND, 0, + GFP_KERNEL); + if (!pdev->cmd_ring) + goto destroy_device_pool; + + val_64 = lo_hi_readq(&pdev->op_regs->cmd_ring); + val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) | + (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) | + pdev->cmd_ring->cycle_state; + lo_hi_writeq(val_64, &pdev->op_regs->cmd_ring); + + val = readl(&pdev->cap_regs->db_off); + val &= DBOFF_MASK; + pdev->dba = (void __iomem *)pdev->cap_regs + val; + pdev->ir_set = &pdev->run_regs->ir_set[0]; + + pdev->event_ring = gadget_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT, + 0, GFP_KERNEL); + if (!pdev->event_ring) + goto free_cmd_ring; + + ret = gadget_alloc_erst(pdev, pdev->event_ring, &pdev->erst); + if (ret) + goto free_event_ring; + + val = readl(&pdev->ir_set->erst_size); + val &= ERST_SIZE_MASK; + val |= ERST_NUM_SEGS; + writel(val, &pdev->ir_set->erst_size); + + val_64 = lo_hi_readq(&pdev->ir_set->erst_base); + val_64 &= ERST_PTR_MASK; + val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK); + lo_hi_writeq(val_64, &pdev->ir_set->erst_base); + + gadget_set_event_deq(pdev); + + ret = gadget_setup_port_arrays(pdev); + if (ret) + goto free_erst; + + ret = gadget_alloc_priv_device(pdev); + if (ret) { + dev_err(pdev->dev, "gadget_alloc_priv_device failed\n"); + goto free_erst; + } + + return 0; +free_erst: + gadget_free_erst(pdev, &pdev->erst); +free_event_ring: + gadget_ring_free(pdev, pdev->event_ring); +free_cmd_ring: + gadget_ring_free(pdev, pdev->cmd_ring); +destroy_device_pool: + dma_pool_destroy(pdev->device_pool); +destroy_segment_pool: + dma_pool_destroy(pdev->segment_pool); + +release_dcbaa: + dma_free_coherent(pdev->dev, sizeof(*pdev->dcbaa), pdev->dcbaa, + pdev->dcbaa->dma); + + gadget_reset(pdev); + + return ret; +} + +static u32 gadget_get_endpoint_type(const struct usb_endpoint_descriptor *desc) +{ + int in; + + in = usb_endpoint_dir_in(desc); + + switch (usb_endpoint_type(desc)) { + case USB_ENDPOINT_XFER_CONTROL: + return CTRL_EP; + case USB_ENDPOINT_XFER_BULK: + return in ? BULK_IN_EP : BULK_OUT_EP; + case USB_ENDPOINT_XFER_ISOC: + return in ? ISOC_IN_EP : ISOC_OUT_EP; + case USB_ENDPOINT_XFER_INT: + return in ? INT_IN_EP : INT_OUT_EP; + } + + return 0; +} + +static unsigned int gadget_parse_exponent_interval(struct usb_gadget *g, + struct gadget_ep *pep) +{ + unsigned int interval; + + interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1; + if (interval != pep->endpoint.desc->bInterval - 1) + dev_warn(&g->dev, "ep %s -rounding interval to %d %s frames\n", + pep->name, 1 << interval, + g->speed == USB_SPEED_FULL ? "" : "micro"); + + if (g->speed == USB_SPEED_FULL) + interval += 3; + + if (interval > 12) + interval = 12; + + return interval; +} + +static unsigned int gadget_microframes_to_exponent(struct usb_gadget *g, + struct gadget_ep *pep, unsigned int desc_interval, + unsigned int min_exponent, unsigned int max_exponent) +{ + unsigned int interval; + + interval = fls(desc_interval) - 1; + + return clamp_val(interval, min_exponent, max_exponent); +} + +static u32 gadget_get_max_esit_payload(struct usb_gadget *g, + struct gadget_ep *pep) +{ + int max_packet; + int max_burst; + + if (usb_endpoint_xfer_control(pep->endpoint.desc) || + usb_endpoint_xfer_bulk(pep->endpoint.desc)) + return 0; + + if (g->speed >= USB_SPEED_SUPER_PLUS && + USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes)) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + else if (g->speed >= USB_SPEED_SUPER) + return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); + + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc); + + return max_packet * max_burst; +} + +static unsigned int gadget_get_endpoint_interval(struct usb_gadget *g, + struct gadget_ep *pep) +{ + unsigned int interval = 0; + + switch (g->speed) { + case USB_SPEED_HIGH: + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + if (usb_endpoint_xfer_int(pep->endpoint.desc) || + usb_endpoint_xfer_isoc(pep->endpoint.desc)) + interval = gadget_parse_exponent_interval(g, pep); + break; + case USB_SPEED_FULL: + if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) + interval = gadget_parse_exponent_interval(g, pep); + else if (usb_endpoint_xfer_int(pep->endpoint.desc)) { + interval = pep->endpoint.desc->bInterval << 3; + interval = gadget_microframes_to_exponent(g, pep, + interval, 3, 10); + } + break; + default: + break; + } + + return interval; +} + +static u32 gadget_get_endpoint_mult(struct usb_gadget *g, struct gadget_ep *pep) +{ + if (g->speed < USB_SPEED_SUPER || + !usb_endpoint_xfer_isoc(pep->endpoint.desc)) + return 0; + + return pep->endpoint.comp_desc->bmAttributes; +} + +static u32 gadget_get_endpoint_max_burst(struct usb_gadget *g, + struct gadget_ep *pep) +{ + if (g->speed >= USB_SPEED_SUPER) + return pep->endpoint.comp_desc->bMaxBurst; + + if (g->speed == USB_SPEED_HIGH && + (usb_endpoint_xfer_isoc(pep->endpoint.desc) || + usb_endpoint_xfer_int(pep->endpoint.desc))) + return usb_endpoint_maxp_mult(pep->endpoint.desc) - 1; + + return 0; +} + +int gadget_endpoint_init(struct phytium_device *pdev, + struct gadget_ep *pep, gfp_t mem_flags) +{ + enum gadget_ring_type ring_type; + struct gadget_ep_ctx *ep_ctx; + unsigned int err_count = 0; + unsigned int avg_trb_len, max_packet, max_burst, interval, mult; + u32 max_esit_payload, endpoint_type; + int ret; + + ep_ctx = pep->in_ctx; + + endpoint_type = gadget_get_endpoint_type(pep->endpoint.desc); + if (!endpoint_type) + return -EINVAL; + + ring_type = usb_endpoint_type(pep->endpoint.desc); + + max_esit_payload = gadget_get_max_esit_payload(&pdev->gadget, pep); + interval = gadget_get_endpoint_interval(&pdev->gadget, pep); + mult = gadget_get_endpoint_mult(&pdev->gadget, pep); + max_packet = usb_endpoint_maxp(pep->endpoint.desc); + max_burst = gadget_get_endpoint_max_burst(&pdev->gadget, pep); + avg_trb_len = max_esit_payload; + + if (!usb_endpoint_xfer_isoc(pep->endpoint.desc)) + err_count = 3; + + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed == USB_SPEED_HIGH) + max_packet = 512; + + if (usb_endpoint_xfer_control(pep->endpoint.desc)) + avg_trb_len = 8; + + pep->ring = gadget_ring_alloc(pdev, 2, ring_type, max_packet, + mem_flags); + if (!pep->ring) + return -ENOMEM; + + pep->skip = false; + + ep_ctx->ep_info = cpu_to_le32(EP_INTERVAL(interval) | EP_MULT(mult) | + EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload)); + + ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) | + MAX_PACKET(max_packet) | MAX_BURST(max_burst) | + ERROR_COUNT(err_count)); + + ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma | + pep->ring->cycle_state); + + ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | + EP_AVG_TRB_LENGTH(avg_trb_len)); + + if (usb_endpoint_xfer_bulk(pep->endpoint.desc) && + pdev->gadget.speed > USB_SPEED_HIGH) { + ret = gadget_alloc_streams(pdev, pep); + if (ret < 0) + return ret; + } + + return 0; +} + +static int gadget_update_stream_segment_mapping(struct radix_tree_root *trb_address_map, + struct gadget_ring *ring, struct gadget_segment *first_seg, + struct gadget_segment *last_seg, gfp_t mem_flags) +{ + struct gadget_segment *failed_seg; + struct gadget_segment *seg; + int ret; + + seg = first_seg; + do { + ret = gadget_insert_segment_mapping(trb_address_map, ring, seg, mem_flags); + if (ret) + goto remove_streams; + + if (seg == last_seg) + return 0; + + seg = seg->next; + } while (seg != first_seg); + + return 0; + +remove_streams: + failed_seg = seg; + seg = first_seg; + do { + gadget_remove_segment_mapping(trb_address_map, seg); + if (seg == failed_seg) + return ret; + seg = seg->next; + } while (seg != first_seg); + + return ret; +} + +static void gadget_link_rings(struct phytium_device *pdev, struct gadget_ring *ring, + struct gadget_segment *first, struct gadget_segment *last, unsigned int num_segs) +{ + struct gadget_segment *next; + + if (!ring || !first || !last) + return; + + next = ring->enq_seg->next; + gadget_link_segments(pdev, ring->enq_seg, first, ring->type); + gadget_link_segments(pdev, last, next, ring->type); + ring->num_segs += num_segs; + ring->num_trbs_free = (TRBS_PER_SEGMENT - 1) * num_segs; + + if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { + ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + ~cpu_to_le32(LINK_TOGGLE); + last->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); + ring->last_seg = last; + } +} + +int gadget_ring_expansion(struct phytium_device *pdev, struct gadget_ring *ring, + unsigned int num_trbs, gfp_t flags) +{ + unsigned int num_segs_needed; + struct gadget_segment *first; + struct gadget_segment *last; + unsigned int num_segs; + int ret; + + num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1)/ + (TRBS_PER_SEGMENT - 1); + + num_segs = max(ring->num_segs, num_segs_needed); + + ret = gadget_alloc_segments_for_ring(pdev, &first, &last, num_segs, + ring->cycle_state, ring->type, ring->bounce_buf_len, flags); + if (ret) + return -ENOMEM; + + if (ring->type == TYPE_STREAM) + ret = gadget_update_stream_segment_mapping(ring->trb_address_map, ring, + first, last, flags); + if (ret) { + gadget_free_segments_for_ring(pdev, first); + return ret; + } + + gadget_link_rings(pdev, ring, first, last, num_segs); + + return 0; +} + +void gadget_endpoint_zero(struct phytium_device *pdev, struct gadget_ep *pep) +{ + pep->in_ctx->ep_info = 0; + pep->in_ctx->ep_info2 = 0; + pep->in_ctx->deq = 0; + pep->in_ctx->tx_info = 0; +} + +static void gadget_free_priv_device(struct phytium_device *pdev) +{ + pdev->dcbaa->dev_context_ptrs[1] = 0; + + gadget_free_endpoint_rings(pdev, &pdev->eps[0]); + + if (pdev->in_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes, pdev->in_ctx.dma); + + if (pdev->out_ctx.bytes) + dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes, pdev->out_ctx.dma); + + pdev->in_ctx.bytes = NULL; + pdev->out_ctx.bytes = NULL; +} + +void gadget_mem_cleanup(void *data) +{ + struct phytium_device *pdev = (struct phytium_device *)data; + struct device *dev = pdev->dev; + + gadget_free_priv_device(pdev); + gadget_free_erst(pdev, &pdev->erst); + + if (pdev->event_ring) + gadget_ring_free(pdev, pdev->event_ring); + pdev->event_ring = NULL; + + if (pdev->cmd_ring) + gadget_ring_free(pdev, pdev->cmd_ring); + pdev->cmd_ring = NULL; + + dma_pool_destroy(pdev->segment_pool); + pdev->segment_pool = NULL; + + dma_pool_destroy(pdev->device_pool); + pdev->device_pool = NULL; + + dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa, pdev->dcbaa->dma); + pdev->dcbaa = NULL; + + pdev->usb2_port.exist = 0; + pdev->usb3_port.exist = 0; + pdev->usb2_port.port_num = 0; + pdev->usb3_port.port_num = 0; + pdev->active_port = NULL; +} + +int gadget_setup_addressable_priv_dev(void *data) +{ + struct phytium_device *pdev = (struct phytium_device *)data; + struct gadget_slot_ctx *slot_ctx; + struct gadget_ep_ctx *ep0_ctx; + u32 max_packets, port; + + ep0_ctx = gadget_get_ep_ctx(&pdev->in_ctx, 0); + slot_ctx = gadget_get_slot_ctx(&pdev->in_ctx); + + slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); + + switch (pdev->gadget.speed) { + case USB_SPEED_SUPER_PLUS: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_SUPER: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); + max_packets = MAX_PACKET(512); + break; + case USB_SPEED_HIGH: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); + max_packets = MAX_PACKET(64); + break; + case USB_SPEED_FULL: + slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); + max_packets = MAX_PACKET(64); + break; + default: + return -EINVAL; + } + + port = DEV_PORT(pdev->active_port->port_num); + slot_ctx->dev_port |= cpu_to_le32(port); + slot_ctx->dev_state = cpu_to_le32((pdev->device_address & DEV_ADDR_MASK)); + ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8)); + ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); + ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) | max_packets); + ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma | + pdev->eps[0].ring->cycle_state); + + return 0; +} + +void gadget_copy_ep0_dequeue_into_input_ctx(void *data) +{ + struct phytium_device *pdev = (struct phytium_device *)data; + struct gadget_ring *ep_ring = pdev->eps[0].ring; + struct gadget_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx; + dma_addr_t dma; + + dma = gadget_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue); + ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state); +} + +struct gadget_ring *gadget_dma_to_transfer_ring(struct gadget_ep *pep, u64 address) +{ + if (pep->ep_state & EP_HAS_STREAMS) + return radix_tree_lookup(&pep->stream_info.trb_address_map, + address >> TRB_SEGMENT_SHIFT); + + return pep->ring; +} + diff --git a/drivers/usb/phytium_usb_v2/mem.h b/drivers/usb/phytium_usb_v2/mem.h new file mode 100644 index 0000000000000000000000000000000000000000..8e30d18b5c616fa6dd8de15b15702db54b08d588 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/mem.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_MEM_H__ +#define __LINUX_PHYTIUM_USB_MEM_H__ + +int gadget_mem_init(void *data); +void gadget_mem_cleanup(void *data); +int gadget_setup_addressable_priv_dev(void *data); +void gadget_copy_ep0_dequeue_into_input_ctx(void *data); + +#endif + diff --git a/drivers/usb/phytium_usb_v2/otg.c b/drivers/usb/phytium_usb_v2/otg.c new file mode 100644 index 0000000000000000000000000000000000000000..1f8c4d45fd68cd2ecf99a88d78f745a183b7bac7 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/otg.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include +#include +#include + +#include "core.h" +#include "otg.h" + +#define OTG_NRDY BIT(11) +#define OTG_HOST_MODE BIT(12) +#define OTG_DEVICE_MODE BIT(13) + +#define OVERRIDE_IDPULLUP BIT(0) +#define OVERRIDE_SESS_VLD_SEL BIT(10) + +#define OTGIEN_ID_CHANGE BIT(0) +#define OTGIEN_VBUSVALID_RISE BIT(4) +#define OTGIEN_VBUSVALID_FALL BIT(5) + +#define OTGIEN_ID_CHANGE_INT BIT(0) +#define OTGIEN_VBUSVALID_RISE_INT BIT(4) +#define OTGIEN_VBUSVALID_FALL_INT BIT(5) + +#define OTGCMD_DEV_BUS_REQ BIT(0) +#define OTGCMD_HOST_BUS_REQ BIT(1) +#define OTGCMD_OTG_DIS BIT(3) +#define OTGCMD_DEV_BUS_DROP BIT(8) +#define OTGCMD_HOST_BUS_DROP BIT(9) + +#define OTGSTS_HOST_READY BIT(27) +#define OTGSTS_DEV_READY BIT(26) +#define OTGSTS_VBUS_VALID BIT(1) +#define OTGSTS_ID_VALUE BIT(0) + +#define OTGSTATE_HOST_STATE_MASK GENMASK(5, 3) +#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0) + +#define ID_HOST 0 +#define ID_PERIPHERAL 1 +//0 NONE 1:HOST 2:DEVICE + +static int switch_role(struct phytium_usb *phytium_usb, enum usb_role role) +{ + int ret = 0; + + if (phytium_usb->dr_mode == USB_DR_MODE_HOST) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_HOST: + break; + default: + return ret; + } + } + + if (phytium_usb->dr_mode == USB_DR_MODE_PERIPHERAL) { + switch (role) { + case USB_ROLE_NONE: + case USB_ROLE_DEVICE: + break; + default: + return ret; + } + } + + phytium_usb_role_stop(phytium_usb); + ret = phytium_usb_role_start(phytium_usb, role); + if (ret) + dev_err(phytium_usb->dev, "set role %d has failed\n", role); + + return ret; +} + +static ssize_t role_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", phytium_usb->role); +} + +static ssize_t role_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + unsigned int role; + int ret; + + ret = kstrtouint(buf, 10, &role); + if (ret) + return ret; + + if (role != phytium_usb->role) { + dev_err(phytium_usb->dev, "role switch %d -> %d\n", phytium_usb->role, role); + switch_role(phytium_usb, (enum usb_role)role); + } + + return count; +} + +static DEVICE_ATTR_RW(role); + +static struct attribute *phytium_attributes[] = { + &dev_attr_role.attr, + NULL +}; + +static const struct attribute_group phytium_attr_group = { + .attrs = phytium_attributes, +}; + +bool phytium_usb_otg_power_is_lost(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + if (!(readl(&phytium_usb->otg_regs->simulate) & BIT(0))) + return true; + } + + return false; +} + +int phytium_usb_otg_is_host(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + if (phytium_usb->dr_mode == USB_DR_MODE_HOST) + return true; + else if (phytium_usb_otg_get_id(data) == ID_HOST) + return true; + } + + return false; +} + +int phytium_usb_otg_is_device(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + if (phytium_usb->dr_mode == USB_DR_MODE_PERIPHERAL) + return true; + else if (phytium_usb->dr_mode == USB_DR_MODE_OTG) + if (phytium_usb_otg_get_id(data) == ID_PERIPHERAL) + return true; + } + + return false; +} + +int phytium_usb_otg_get_id(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + int id = 0; + + if (phytium_usb) + id = !!(readl(&phytium_usb->otg_regs->sts) & OTGSTS_ID_VALUE); + + dev_err(phytium_usb->dev, "OTG ID: %d\n", id); + + return id; +} + +int phytium_usb_otg_get_vbus(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + int vbus = 0; + + if (phytium_usb) + vbus = !!(readl(&phytium_usb->otg_regs->sts) & OTGSTS_VBUS_VALID); + + dev_err(phytium_usb->dev, "OTG VBUS: %d\n", vbus); + + return vbus; +} + +static irqreturn_t phytium_usb_otg_thread_irq(int irq, void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) + phytium_usb_hw_role_switch(phytium_usb); + + return IRQ_HANDLED; +} + +static irqreturn_t phytium_usb_otg_irq(int irq, void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + irqreturn_t ret = IRQ_NONE; + u32 reg; + + if (phytium_usb) { + if (phytium_usb->dr_mode != USB_DR_MODE_OTG) + return IRQ_NONE; + + if (phytium_usb->in_lpm) + return ret; + + reg = readl(&phytium_usb->otg_regs->ivect); + if (!reg) + return ret; + + if (reg & OTGIEN_ID_CHANGE_INT) { + dev_err(phytium_usb->dev, "OTG IRQ: new ID:%d\n", + phytium_usb_otg_get_id(phytium_usb)); + ret = IRQ_WAKE_THREAD; + } + + if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) { + dev_err(phytium_usb->dev, "OTG IRQ: new VBUS: %d\n", + phytium_usb_otg_get_vbus(phytium_usb)); + ret = IRQ_WAKE_THREAD; + } + + writel(~0, &phytium_usb->otg_regs->ivect); + } + + return ret; +} + +int phytium_usb_otg_init(void *data) +{ + u32 state; + int ret; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + phytium_usb->otg_regs = devm_ioremap_resource(phytium_usb->dev, + &phytium_usb->otg_res); + if (IS_ERR(phytium_usb->otg_regs)) + return PTR_ERR(phytium_usb->otg_regs); + + state = readl(&phytium_usb->otg_regs->sts); + if (state & OTG_NRDY) { + dev_warn(phytium_usb->dev, "controller is not ready yet\n"); + return -ENODEV; + } + + phytium_usb->dr_mode = USB_DR_MODE_OTG;//USB_DR_MODE_PERIPHERAL; + if (OTG_HOST_MODE & state) + phytium_usb->dr_mode = USB_DR_MODE_HOST; + else if (OTG_DEVICE_MODE & state) + phytium_usb->dr_mode = USB_DR_MODE_PERIPHERAL; + + ret = devm_request_threaded_irq(phytium_usb->dev, phytium_usb->otg_irq, + phytium_usb_otg_irq, phytium_usb_otg_thread_irq, + IRQF_SHARED, "phytium_usb_otg", phytium_usb); + if (ret) { + dev_warn(phytium_usb->dev, "request otg irq failed with:%d\n", ret); + return ret; + } + + ret = devm_device_add_group(phytium_usb->dev, &phytium_attr_group); + if (ret) + dev_err(phytium_usb->dev, "Create sysfs attributes, err: %d\n", ret); + } + + return 0; +} + +int phytium_usb_otg_enable_iddig(void *data) +{ + u32 reg; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + reg = readl(&phytium_usb->otg_regs->override); + reg |= OVERRIDE_IDPULLUP; + writel(reg, &phytium_usb->otg_regs->override); + usleep_range(50000, 70000); + } + + return 0; +} + +int phytium_usb_otg_disable_irq(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) + writel(0, &phytium_usb->otg_regs->ien); + + return 0; +} + +int phytium_usb_otg_clear_irq(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) + writel(~0, &phytium_usb->otg_regs->ivect); + + return 0; +} + +int phytium_usb_otg_enable_irq(void *data) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) + writel(OTGIEN_ID_CHANGE | OTGIEN_VBUSVALID_RISE | + OTGIEN_VBUSVALID_FALL, + &phytium_usb->otg_regs->ien); + + return 0; +} + +int phytium_usb_otg_host_on(void *data) +{ + int ret = 0; + u32 val; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS, + &phytium_usb->otg_regs->cmd); + ret = readl_poll_timeout_atomic(&phytium_usb->otg_regs->sts, + val, val & OTGSTS_HOST_READY, 1, 100000); + if (ret) + dev_err(phytium_usb->dev, "wait for host_ready fail\n"); + phytium_usb->dr_mode = USB_DR_MODE_HOST; + } + + return ret; +} + +int phytium_usb_otg_host_off(void *data) +{ + u32 val; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP, + &phytium_usb->otg_regs->cmd); + + readl_poll_timeout_atomic(&phytium_usb->otg_regs->state, val, + !(val & OTGSTATE_HOST_STATE_MASK), 1, 2000000); + phytium_usb->dr_mode = USB_DR_MODE_OTG; + } + + return 0; +} + +int phytium_usb_otg_gadget_on(void *data) +{ + int ret = 0; + u32 val; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + writel(OTGCMD_DEV_BUS_REQ | OTGCMD_OTG_DIS, + &phytium_usb->otg_regs->cmd); + ret = readl_poll_timeout_atomic(&phytium_usb->otg_regs->sts, + val, val & OTGSTS_DEV_READY, 1, 100000); + if (ret) + dev_err(phytium_usb->dev, "wait for host_ready fail\n"); + phytium_usb->dr_mode = USB_DR_MODE_PERIPHERAL; + } + + return ret; +} + +int phytium_usb_otg_gadget_off(void *data) +{ + u32 val; + struct phytium_usb *phytium_usb = (struct phytium_usb *)data; + + if (phytium_usb) { + writel(OTGCMD_HOST_BUS_DROP | OTGCMD_DEV_BUS_DROP, + &phytium_usb->otg_regs->cmd); + + readl_poll_timeout_atomic(&phytium_usb->otg_regs->state, val, + !(val & OTGSTATE_DEV_STATE_MASK), 1, 2000000); + phytium_usb->dr_mode = USB_DR_MODE_OTG; + } + + return 0; +} + +void phytium_usb_otg_uninit(void *data) +{ + phytium_usb_otg_host_off(data); + phytium_usb_otg_disable_irq(data); +} + +MODULE_AUTHOR("Zhenhua Chen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium USB2 DRD Controller Driver"); diff --git a/drivers/usb/phytium_usb_v2/otg.h b/drivers/usb/phytium_usb_v2/otg.h new file mode 100644 index 0000000000000000000000000000000000000000..7216700c5872f5e834a1ce134f623a1413fd08b0 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/otg.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_OTG_H__ +#define __LINUX_PHYTIUM_USB_OTG_H__ + + +struct phytium_usb_otg_regs { + __le32 did; /*0x0*/ + __le32 rid; /*0x4*/ + __le32 cfgs1; /*0x8*/ + __le32 cfgs2; /*0xC*/ + __le32 cmd; /*0x10*/ + __le32 sts; /*0x14*/ + __le32 state; /*0x18*/ + __le32 ien; /*0x1C*/ + __le32 ivect; /*0x20*/ + __le32 tmr; /*0x24*/ + __le32 simulate; /*0x28*/ + __le32 adpbc_sts; /*0x2c*/ + __le32 adp_ramp_time; /*0x30*/ + __le32 adpbc_ctrl1; /*0x34*/ + __le32 adpbc_ctrl2; /*0x38*/ + __le32 override; /*0x3C*/ + __le32 vbusvalid_dbnc_cfg; /*0x40*/ + __le32 sessvalid_dbnc_cfg; /*0x44*/ + __le32 susp_timing_ctrl; /*0x48*/ +}; + +int phytium_usb_otg_init(void *data); +void phytium_usb_otg_uninit(void *data); +int phytium_usb_otg_enable_iddig(void *data); +int phytium_usb_otg_disable_irq(void *data); +int phytium_usb_otg_clear_irq(void *data); +int phytium_usb_otg_enable_irq(void *data); +int phytium_usb_otg_host_on(void *data); +int phytium_usb_otg_host_off(void *data); +int phytium_usb_otg_gadget_on(void *data); +int phytium_usb_otg_gadget_off(void *data); +int phytium_usb_otg_get_id(void *data); +int phytium_usb_otg_get_vbus(void *data); +int phytium_usb_otg_is_host(void *data); +int phytium_usb_otg_is_device(void *data); +bool phytium_usb_otg_power_is_lost(void *data); + +#endif diff --git a/drivers/usb/phytium_usb_v2/pci.c b/drivers/usb/phytium_usb_v2/pci.c new file mode 100644 index 0000000000000000000000000000000000000000..4d6d6e1189d84a76758dfb4d4525a1ff785d5be9 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/pci.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include + +#include "core.h" + +#define BAR_HOST_OFFSET 0x8000 +#define BAR_DEVICE_OFFSET 0x4000 +#define BAR_DEVICE_SIZE 0x4000 +#define BAR_OTG_SIZE 0x4000 + +static int phytium_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int ret; + + struct phytium_usb *phytium_usb; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto put_pci; + } + + pci_set_master(pdev); + if (pci_is_enabled(pdev)) { + phytium_usb = pci_get_drvdata(pdev); + if (!phytium_usb) { + phytium_usb = kzalloc(sizeof(*phytium_usb), GFP_KERNEL); + if (!phytium_usb) { + ret = -ENOMEM; + goto disabled_pci; + } + } + } + + //otg resource init + phytium_usb->otg_res.start = pci_resource_start(pdev, 0); + phytium_usb->otg_res.end = phytium_usb->otg_res.start + BAR_OTG_SIZE; + phytium_usb->otg_res.name = "otg"; + phytium_usb->otg_res.flags = IORESOURCE_MEM; + phytium_usb->otg_irq = pdev->irq; + + //gadget resource init + phytium_usb->device_irq = pdev->irq; + phytium_usb->device_regs = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0) + + BAR_DEVICE_OFFSET, BAR_DEVICE_SIZE); + if (IS_ERR(phytium_usb->device_regs)) { + dev_warn(&pdev->dev, "gadget ioremap failed\n"); + return PTR_ERR(phytium_usb->device_regs); + } + + //host resource init + dev_warn(&pdev->dev, "host resource init\n"); + phytium_usb->host_res[0].start = pdev->irq; + phytium_usb->host_res[0].name = "host"; + phytium_usb->host_res[0].flags = IORESOURCE_IRQ; + + phytium_usb->host_res[1].start = pci_resource_start(pdev, 0) + BAR_HOST_OFFSET; + phytium_usb->host_res[1].end = pci_resource_end(pdev, 0) - BAR_HOST_OFFSET; + phytium_usb->host_res[1].name = "host"; + phytium_usb->host_res[1].flags = IORESOURCE_MEM; + + if (pci_is_enabled(pdev)) { + phytium_usb->dev = &pdev->dev; + pci_set_drvdata(pdev, phytium_usb); + + ret = phytium_usb_init(phytium_usb); + if (ret) { + dev_err(&pdev->dev, "phytium_usb_init failed\n"); + goto free_phytium_usb; + } + } + + return 0; +free_phytium_usb: + kfree(phytium_usb); + +disabled_pci: + pci_disable_device(pdev); + +put_pci: + pci_dev_put(pdev); + + return 0; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct phytium_usb *phytium_usb = (struct phytium_usb *)pci_get_drvdata(pdev); + + if (!pci_is_enabled(pdev)) { + kfree(phytium_usb); + goto pci_put; + } + + phytium_usb_uninit(phytium_usb); + +pci_put: + pci_dev_put(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __maybe_unused phytium_pci_suspend(struct device *dev) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + + return phytium_usb_suspend(phytium_usb); +} + +static int __maybe_unused phytium_pci_resume(struct device *dev) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&phytium_usb->lock, flags); + ret = phytium_usb_resume(phytium_usb, 1); + spin_unlock_irqrestore(&phytium_usb->lock, flags); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_suspend, phytium_pci_resume) +}; + +static const struct pci_device_id phytium_pci_ids[] = { + { 0x10ee, 0x8012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver phytium_pci_driver = { + .name = "phytium-pci", + .id_table = &phytium_pci_ids[0], + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .driver = { + .pm = &phytium_pci_pm_ops, + } +}; + +module_pci_driver(phytium_pci_driver); +MODULE_DEVICE_TABLE(pci, phytium_pci_ids); + +MODULE_ALIAS("pci:phytium"); +MODULE_AUTHOR("Chen Zhenhua "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium USB PCI driver"); +MODULE_VERSION(PHYTIUM_USB_DRIVER_V2_VERSION); diff --git a/drivers/usb/phytium_usb_v2/platform.c b/drivers/usb/phytium_usb_v2/platform.c new file mode 100644 index 0000000000000000000000000000000000000000..ba0dafe94a7fa51fc4280653f69d88c462f19426 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/platform.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" + +static int phytium_usbplat_probe(struct platform_device *pdev) +{ + struct phytium_usb *phytium_usb; + struct device *dev = &pdev->dev; + int irq; + struct resource *res; + int ret; + + phytium_usb = devm_kzalloc(dev, sizeof(*phytium_usb), GFP_KERNEL); + if (!phytium_usb) + return -ENOMEM; + + phytium_usb->dev = dev; + phytium_usb->platdata = dev_get_platdata(dev); + + irq = platform_get_irq(pdev, 1); + if (irq < 0) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_warn(dev, "get otg resource failed\n"); + return -ENXIO; + } + phytium_usb->otg_res = *res; + phytium_usb->otg_irq = irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + phytium_usb->host_res[0].start = irq; + phytium_usb->host_res[0].end = irq; + phytium_usb->host_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(irq); + phytium_usb->host_res[0].name = "host"; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!res) { + dev_warn(dev, "get host resource failed\n"); + return -ENXIO; + } + + phytium_usb->host_res[1] = *res; + phytium_usb->device_irq = irq; + phytium_usb->device_regs = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(phytium_usb->device_regs)) + return PTR_ERR(phytium_usb->device_regs); + + ret = phytium_usb_init(phytium_usb); + if (ret) + return ret; + + platform_set_drvdata(pdev, phytium_usb); + + return 0; +} + +static int phytium_usbplat_remove(struct platform_device *pdev) +{ + struct phytium_usb *phytium_usb = platform_get_drvdata(pdev); + + if (phytium_usb) + phytium_usb_uninit(phytium_usb); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_usbplat_suspend(struct device *dev) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + + return phytium_usb_suspend(phytium_usb); +} + +static int phytium_usbplat_resume(struct device *dev) +{ + struct phytium_usb *phytium_usb = dev_get_drvdata(dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&phytium_usb->lock, flags); + ret = phytium_usb_resume(phytium_usb, 1); + spin_unlock_irqrestore(&phytium_usb->lock, flags); + + return ret; +} +#endif + +static const struct dev_pm_ops phytium_usbpm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_usbplat_suspend, phytium_usbplat_resume) +}; + +#ifdef CONFIG_OF +static const struct of_device_id of_phytium_usbmatch[] = { + { .compatible = "phytium,usb2-2.0" }, + { } +}; +MODULE_DEVICE_TABLE(of, of_phytium_usbmatch); +#endif + +static const struct acpi_device_id acpi_phytium_usbmatch[] = { + { "PHYT0064", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, acpi_phytium_usbmatch); + +static struct platform_driver phytium_usbdriver = { + .probe = phytium_usbplat_probe, + .remove = phytium_usbplat_remove, + .driver = { + .name = "phytium-usb2-2.0", + .of_match_table = of_match_ptr(of_phytium_usbmatch), + .acpi_match_table = ACPI_PTR(acpi_phytium_usbmatch), + .pm = &phytium_usbpm_ops, + }, +}; + +module_platform_driver(phytium_usbdriver); + +MODULE_AUTHOR("Zhenhua Chen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium USB2 DRD Controller Driver"); +MODULE_VERSION(PHYTIUM_USB_DRIVER_V2_VERSION); diff --git a/drivers/usb/phytium_usb_v2/ring.c b/drivers/usb/phytium_usb_v2/ring.c new file mode 100644 index 0000000000000000000000000000000000000000..3f891461ad535ab775c513c4390114b433a5cff8 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/ring.c @@ -0,0 +1,2294 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium USB DRD Driver. + * + * Copyright (C) 2023 - 2024 Phytium. + */ +#include +#include +#include +#include +#include "gadget.h" +#include "ring.h" + +#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16)) +#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff) +#define DB_VALUE_CMD 0x0 + +#define TRB_BSR BIT(9) +#define TRB_DC BIT(9) + +#define TRB_SETUPID_BITMASK GENMASK(9, 8) +#define TRB_SETUPID(p) ((p) << 8) +#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8) + +#define TRB_SETUP_SPEED_USB3 0x1 +#define TRB_SETUP_SPEED_USB2 0x1 + +#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0)) +#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25)) +#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4)) +#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8)) +#define TRB_FH_TR_PACKET 0x4 +#define TRB_FH_TR_PACKET_DEV_NOT 0x6 +#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1 + +#define TRB_SETUPSTAT_STALL 0x0 +#define TRB_SETUPSTAT_ACK 0x1 +#define TRB_SETUPSTAT(p) ((p) << 6) + +#define TRB_SIA BIT(31) + +#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7)) +#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24) +#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24)) + +#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0)) + +#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0)) +#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0)) +#define STREAM_PRIME_ACK 0xFFFE +#define STREAM_REJECTED 0xFFFF + +dma_addr_t gadget_trb_virt_to_dma(struct gadget_segment *seg, + union gadget_trb *trb) +{ + unsigned long segment_offset = trb - seg->trbs; + + if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) + return 0; + + return seg->dma + (segment_offset * sizeof(*trb)); +} + +static bool gadget_room_on_ring(struct phytium_device *pdev, + struct gadget_ring *ring, unsigned int num_trbs) +{ + int num_trbs_in_deq_seg; + + if (ring->num_trbs_free < num_trbs) + return false; + + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return false; + } + + return true; +} + +static bool gadget_trb_is_noop(union gadget_trb *trb) +{ + return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +} + +static bool gadget_trb_is_link(union gadget_trb *trb) +{ + return TRB_TYPE_LINK_LE32(trb->link.control); +} + +static bool gadget_link_trb_toggles_cycle(union gadget_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +} + +static int gadget_prepare_ring(struct phytium_device *pdev, + struct gadget_ring *ep_ring, u32 ep_state, unsigned int num_trbs, + gfp_t mem_flags) +{ + unsigned int num_trbs_needed; + + switch (ep_state) { + case EP_STATE_STOPPED: + case EP_STATE_RUNNING: + case EP_STATE_HALTED: + break; + default: + dev_err(pdev->dev, "Error: incorrect endpoint state\n"); + return -EINVAL; + } + + while (1) { + if (gadget_room_on_ring(pdev, ep_ring, num_trbs)) + break; + + num_trbs_needed = num_trbs - ep_ring->num_trbs_free; + if (gadget_ring_expansion(pdev, ep_ring, num_trbs_needed, mem_flags)) { + dev_err(pdev->dev, "Ring expansion failed\n"); + return -ENOMEM; + } + } + + while (gadget_trb_is_link(ep_ring->enqueue)) { + ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); + /* The cycle bit must be set as the last operation. */ + wmb(); + ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); + + if (gadget_link_trb_toggles_cycle(ep_ring->enqueue)) + ep_ring->cycle_state ^= 1; + + ep_ring->enq_seg = ep_ring->enq_seg->next; + ep_ring->enqueue = ep_ring->enq_seg->trbs; + } + + return 0; +} + +static void gadget_inc_enq(struct phytium_device *pdev, struct gadget_ring *ring, + bool more_trbs_coming) +{ + union gadget_trb *next; + u32 chain; + + chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; + + if (!gadget_trb_is_link(ring->enqueue)) + ring->num_trbs_free--; + next = ++(ring->enqueue); + + while (gadget_trb_is_link(next)) { + if (!chain && !more_trbs_coming) + break; + + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); + + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + if (gadget_link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + + ring->enq_seg = ring->enq_seg->next; + ring->enqueue = ring->enq_seg->trbs; + next = ring->enqueue; + } +} + +static void gadget_queue_trb(struct phytium_device *pdev, struct gadget_ring *ring, + bool more_trbs_coming, u32 field1, u32 field2, u32 field3, u32 field4) +{ + struct gadget_generic_trb *trb; + + trb = &ring->enqueue->generic; + + trb->field[0] = cpu_to_le32(field1); + trb->field[1] = cpu_to_le32(field2); + trb->field[2] = cpu_to_le32(field3); + trb->field[3] = cpu_to_le32(field4); + + gadget_inc_enq(pdev, ring, more_trbs_coming); +} + +static void gadget_queue_command(struct phytium_device *pdev, + u32 field1, u32 field2, u32 field3, u32 field4) +{ + gadget_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1, GFP_ATOMIC); + + pdev->cmd.command_trb = pdev->cmd_ring->enqueue; + + gadget_queue_trb(pdev, pdev->cmd_ring, false, field1, field2, field3, + field4 | pdev->cmd_ring->cycle_state); +} + +void gadget_queue_configure_endpoint(struct phytium_device *pdev, + dma_addr_t in_ctx_ptr) +{ + gadget_queue_command(pdev, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_CONFIG_EP) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +void gadget_ring_cmd_db(struct phytium_device *pdev) +{ + writel(DB_VALUE_CMD, &pdev->dba->cmd_db); +} + +void gadget_queue_stop_endpoint(struct phytium_device *pdev, unsigned int ep_indx) +{ + gadget_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_indx) | TRB_TYPE(TRB_STOP_RING)); +} + +int gadget_cmd_stop_ep(struct phytium_device *pdev, struct gadget_ep *pep) +{ + u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx); + int ret = 0; + + if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED || + ep_state == EP_STATE_HALTED) + goto ep_stopped; + + gadget_queue_stop_endpoint(pdev, pep->idx); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + +ep_stopped: + pep->ep_state |= EP_STOPPED; + + return ret; +} + +void gadget_queue_flush_endpoint(struct phytium_device *pdev, unsigned int ep_index) +{ + gadget_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | EP_ID_FOR_TRB(ep_index)); +} + +int gadget_cmd_flush_ep(struct phytium_device *pdev, struct gadget_ep *pep) +{ + int ret; + + gadget_queue_flush_endpoint(pdev, pep->idx); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + return ret; +} + +static struct gadget_ring *gadget_get_transfer_ring(struct phytium_device *pdev, + struct gadget_ep *pep, + unsigned int stream_id) +{ + if (!(pep->ep_state & EP_HAS_STREAMS)) + return pep->ring; + + if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) { + dev_err(pdev->dev, "Err:%s ring doesn't exist for SID:%d\n", pep->name, stream_id); + return NULL; + } + + return pep->stream_info.stream_rings[stream_id]; +} + +static struct gadget_ring * + gadget_request_to_transfer_ring(struct phytium_device *pdev, + struct gadget_request *preq) +{ + return gadget_get_transfer_ring(pdev, preq->pep, preq->request.stream_id); +} + +static u64 gadget_get_hw_deq(struct phytium_device *pdev, unsigned int ep_index, + unsigned int stream_id) +{ + struct gadget_stream_ctx *st_ctx; + struct gadget_ep *pep; + + pep = &pdev->eps[stream_id]; + + if (pep->ep_state & EP_HAS_STREAMS) { + st_ctx = &pep->stream_info.stream_ctx_array[stream_id]; + return le64_to_cpu(st_ctx->stream_ring); + } + + return le64_to_cpu(pep->out_ctx->deq); +} + +static struct gadget_segment *gadget_trb_in_td(struct phytium_device *pdev, + struct gadget_segment *start_seg, union gadget_trb *start_trb, + union gadget_trb *end_trb, dma_addr_t suspect_dma) +{ + struct gadget_segment *cur_seg; + union gadget_trb *temp_trb; + dma_addr_t end_seg_dma; + dma_addr_t end_trb_dma; + dma_addr_t start_dma; + + start_dma = gadget_trb_virt_to_dma(start_seg, start_trb); + cur_seg = start_seg; + + do { + if (start_dma == 0) + return NULL; + + temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; + end_seg_dma = gadget_trb_virt_to_dma(cur_seg, temp_trb); + end_trb_dma = gadget_trb_virt_to_dma(cur_seg, end_trb); + + if (end_trb_dma > 0) { + if (start_dma <= end_trb_dma) { + if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) + return cur_seg; + } else { + if ((suspect_dma >= start_dma && suspect_dma <= end_seg_dma) || + (suspect_dma >= cur_seg->dma && suspect_dma <= end_trb_dma)) + return cur_seg; + } + + return NULL; + } + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) + return cur_seg; + + cur_seg = cur_seg->next; + start_dma = gadget_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); + } while (cur_seg != start_seg); + + return NULL; +} + +static void gadget_next_trb(struct phytium_device *pdev, struct gadget_ring *ring, + struct gadget_segment **seg, union gadget_trb **trb) +{ + if (gadget_trb_is_link(*trb)) { + *seg = (*seg)->next; + *trb = ((*seg)->trbs); + } else { + (*trb)++; + } +} + +static void gadget_find_new_dequeue_state(struct phytium_device *pdev, + struct gadget_ep *pep, unsigned int stream_id, struct gadget_td *cur_td, + struct gadget_dequeue_state *state) +{ + bool td_last_trb_found = false; + struct gadget_segment *new_seg; + struct gadget_ring *ep_ring; + union gadget_trb *new_deq; + bool cycle_found = false; + u64 hw_dequeue; + + ep_ring = gadget_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + return; + + hw_dequeue = gadget_get_hw_deq(pdev, pep->idx, stream_id); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; + state->stream_id = stream_id; + + do { + if (!cycle_found && gadget_trb_virt_to_dma(new_seg, new_deq) == + (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + if (td_last_trb_found) + break; + } + + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && gadget_trb_is_link(new_deq) && + gadget_link_trb_toggles_cycle(new_deq)) + state->new_cycle_state ^= 0x1; + + gadget_next_trb(pdev, ep_ring, &new_seg, &new_deq); + + if (new_deq == pep->ring->dequeue) { + dev_err(pdev->dev, "Error: Failed finding new deuque state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; + +} + +static void gadget_trb_to_noop(union gadget_trb *trb, u32 noop_type) +{ + if (gadget_trb_is_link(trb)) { + trb->link.control &= cpu_to_le32(~TRB_CHAIN); + } else { + trb->generic.field[0] = 0; + trb->generic.field[1] = 0; + trb->generic.field[2] = 0; + trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); + trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); + } +} + +static void gadget_td_to_noop(struct phytium_device *pdev, + struct gadget_ring *ep_ring, struct gadget_td *td, bool flip_cycle) +{ + struct gadget_segment *seg = td->start_seg; + union gadget_trb *trb = td->first_trb; + + while (1) { + gadget_trb_to_noop(trb, TRB_TR_NOOP); + + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); + + if (trb == td->last_trb) + break; + + gadget_next_trb(pdev, ep_ring, &seg, &trb); + } +} + +void gadget_set_link_state(struct phytium_device *pdev, + __le32 __iomem *port_regs, u32 link_state) +{ + int port_num = 0xff; + u32 temp; + + temp = readl(port_regs); + temp = gadget_port_state_to_neutral(temp); + temp |= PORT_WKCONN_E | PORT_WKDISC_E; + writel(temp, port_regs); + + temp &= ~PORT_PLS_MASK; + temp |= PORT_LINK_STROBE | link_state; + + if (pdev->active_port) + port_num = pdev->active_port->port_num; + + writel(temp, port_regs); +} + +static void gadget_force_10_go(struct phytium_device *pdev) +{ + if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable) + gadget_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0); +} + +static bool gadget_ring_ep_doorbell(struct phytium_device *pdev, + struct gadget_ep *pep, unsigned int stream_id) +{ + __le32 __iomem *reg_addr = &pdev->dba->ep_db; + unsigned int ep_state = pep->ep_state; + unsigned int db_value; + + if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED)) + return false; + + if (pep->ep_state & EP_HAS_STREAMS) { + if (pep->stream_info.drbls_count >= 2) + return false; + + pep->stream_info.drbls_count++; + } + + pep->ep_state &= ~EP_STOPPED; + + if (pep->idx == 0 && pdev->ep0_stage == GADGET_DATA_STAGE && + !pdev->ep0_expect_in) + db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id); + else + db_value = DB_VALUE(pep->idx, stream_id); + + writel(db_value, reg_addr); + + gadget_force_10_go(pdev); + + return true; +} + +void gadget_ring_doorbell_for_active_rings(struct phytium_device *pdev, + struct gadget_ep *pep) +{ + struct gadget_stream_info *stream_info; + unsigned int stream_id; + int ret; + + if (pep->ep_state & EP_DIS_IN_PROGRESS) + return; + + if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) { + if (pep->ring && !list_empty(&pep->ring->td_list)) + gadget_ring_ep_doorbell(pdev, pep, 0); + + return; + } + + stream_info = &pep->stream_info; + for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) { + struct gadget_td *td, *td_temp; + struct gadget_ring *ep_ring; + + if (stream_info->drbls_count >= 2) + return; + + ep_ring = gadget_get_transfer_ring(pdev, pep, stream_id); + if (!ep_ring) + continue; + + if (!ep_ring->stream_active || ep_ring->stream_rejected) + continue; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, td_list) { + if (td->drbl) + continue; + + ret = gadget_ring_ep_doorbell(pdev, pep, stream_id); + if (ret) + td->drbl = 1; + } + } +} + +void gadget_queue_new_dequeue_state(struct phytium_device *pdev, + struct gadget_ep *pep, struct gadget_dequeue_state *deq_state) +{ + u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id); + u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id); + u32 type = TRB_TYPE(TRB_SET_DEQ); + u32 trb_sct = 0; + dma_addr_t addr; + + addr = gadget_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr); + + if (deq_state->stream_id) + trb_sct = SCT_FOR_TRB(SCT_PRI_TR); + + gadget_queue_command(pdev, lower_32_bits(addr) | trb_sct | + deq_state->new_cycle_state, upper_32_bits(addr), trb_stream_id, + trb_slot_id | EP_ID_FOR_TRB(pep->idx) | type); +} + +static int gadget_cmd_set_deq(struct phytium_device *pdev, + struct gadget_ep *pep, struct gadget_dequeue_state *deq_state) +{ + struct gadget_ring *ep_ring; + int ret; + + if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) { + gadget_ring_doorbell_for_active_rings(pdev, pep); + + return 0; + } + + gadget_queue_new_dequeue_state(pdev, pep, deq_state); + gadget_ring_cmd_db(pdev); + ret = gadget_wait_for_cmd_compl(pdev); + + ep_ring = gadget_get_transfer_ring(pdev, pep, deq_state->stream_id); + + if (gadget_trb_is_link(ep_ring->dequeue)) { + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + + while (ep_ring->dequeue != deq_state->new_deq_ptr) { + ep_ring->num_trbs_free++; + ep_ring->dequeue++; + + if (gadget_trb_is_link(ep_ring->dequeue)) { + if (ep_ring->dequeue == deq_state->new_deq_ptr) + break; + + ep_ring->deq_seg = ep_ring->deq_seg->next; + ep_ring->dequeue = ep_ring->deq_seg->trbs; + } + } + + if (ret) + return -ESHUTDOWN; + + gadget_ring_doorbell_for_active_rings(pdev, pep); + + return 0; +} + +static void gadget_unmap_td_bounce_buffer(struct phytium_device *pdev, + struct gadget_ring *ring, struct gadget_td *td) +{ + struct gadget_segment *seg = td->bounce_seg; + struct gadget_request *preq; + size_t len; + + if (!seg) + return; + + preq = td->preq; + + if (!preq->direction) { + dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len, DMA_TO_DEVICE); + return; + } + + dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); + + len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs, + seg->bounce_buf, seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + dev_warn(pdev->dev, "WARN Wrong bounce buffer read length:%ld != %d\n", + len, seg->bounce_len); + + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + +static void gadget_giveback(struct gadget_ep *pep, struct gadget_request *preq, int status) +{ + struct phytium_device *pdev = pep->pdev; + + list_del(&preq->list); + + if (preq->request.status == -EINPROGRESS) + preq->request.status = status; + + usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request, preq->direction); + + if (preq != &pdev->ep0_preq) { + spin_unlock(&pdev->lock); + usb_gadget_giveback_request(&pep->endpoint, &preq->request); + spin_lock(&pdev->lock); + } +} + +int gadget_remove_request(struct phytium_device *pdev, struct gadget_request *preq, + struct gadget_ep *pep) +{ + struct gadget_dequeue_state deq_state; + struct gadget_td *cur_td = NULL; + struct gadget_ring *ep_ring; + struct gadget_segment *seg; + int status = -ECONNRESET; + int ret = 0; + u64 hw_deq; + + memset(&deq_state, 0, sizeof(deq_state)); + + cur_td = &preq->td; + ep_ring = gadget_request_to_transfer_ring(pdev, preq); + + hw_deq = gadget_get_hw_deq(pdev, pep->idx, preq->request.stream_id); + hw_deq &= ~0xf; + + seg = gadget_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb, + cur_td->last_trb, hw_deq); + if (seg && (pep->ep_state & EP_ENABLED)) + gadget_find_new_dequeue_state(pdev, pep, preq->request.stream_id, + cur_td, &deq_state); + else + gadget_td_to_noop(pdev, ep_ring, cur_td, false); + + list_del_init(&cur_td->td_list); + ep_ring->num_tds--; + pep->stream_info.td_count--; + + if (pdev->gadget_state & GADGET_STATE_DISCONNECT_PENDING) { + status = -ESHUTDOWN; + ret = gadget_cmd_set_deq(pdev, pep, &deq_state); + } + + gadget_unmap_td_bounce_buffer(pdev, ep_ring, cur_td); + gadget_giveback(pep, cur_td->preq, status); + + return ret; +} + +void gadget_queue_slot_control(struct phytium_device *pdev, u32 trb_type) +{ + gadget_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +void gadget_inc_deq(struct phytium_device *pdev, struct gadget_ring *ring) +{ + if (ring->type == TYPE_EVENT) { + if (!gadget_last_trb_on_seg(ring->deq_seg, ring->dequeue)) { + ring->dequeue++; + return; + } + + if (gadget_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + return; + } + + if (!gadget_trb_is_link(ring->dequeue)) { + ring->dequeue++; + ring->num_trbs_free++; + } + + while (gadget_trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } +} + +void gadget_queue_reset_ep(struct phytium_device *pdev, unsigned int ep_index) +{ + return gadget_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) | + EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_RESET_EP)); +} + +void gadget_queue_halt_endpoint(struct phytium_device *pdev, unsigned int ep_index) +{ + return gadget_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | + SLOT_ID_FOR_TRB(pdev->slot_id) | EP_ID_FOR_TRB(ep_index)); +} + +void gadget_queue_reset_device(struct phytium_device *pdev) +{ + gadget_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) | + SLOT_ID_FOR_TRB(pdev->slot_id)); +} + +static int gadget_update_port_id(struct phytium_device *pdev, u32 port_id) +{ + struct gadget_port *port = pdev->active_port; + u8 old_port = 0; + + if (port && port->port_num == port_id) + return 0; + + if (port) + old_port = port->port_num; + + if (port_id == pdev->usb2_port.port_num) + port = &pdev->usb2_port; + else if (port_id == pdev->usb3_port.port_num) + port = &pdev->usb3_port; + else { + dev_err(pdev->dev, "Port event with invalid port ID %d\n", port_id); + return -EINVAL; + } + + if (port_id != old_port) { + gadget_disable_slot(pdev); + pdev->active_port = port; + gadget_enable_slot(pdev); + } + + if (port_id == pdev->usb2_port.port_num) + gadget_set_usb2_hardware_lpm(pdev, NULL, 1); + else + writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1), &pdev->usb3_port.regs->portpmsc); + + return 0; +} + +void gadget_force_header_wakeup(struct phytium_device *pdev, int intf_num) +{ + u32 lo, mid; + + lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) | + TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address); + + mid = TRB_FH_TR_PACKET_DEV_NOT | TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) | + TRB_FH_TO_INTERFACE(intf_num); + + gadget_queue_command(pdev, lo, mid, 0, TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2)); +} + +static void gadget_handle_port_status(struct phytium_device *pdev, union gadget_trb *event) +{ + struct gadget_port_regs __iomem *port_regs; + u32 portsc, cmd_regs, link_state, port_id; + bool port2 = false; + + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + dev_err(pdev->dev, "Err: incorrect PSC event\n"); + + port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); + + if (gadget_update_port_id(pdev, port_id)) + goto cleanup; + + port_regs = pdev->active_port->regs; + + if (port_id == pdev->usb2_port.port_num) + port2 = true; + +new_event: + portsc = readl(&port_regs->portsc); + writel(gadget_port_state_to_neutral(portsc) | + (portsc & PORT_CHANGE_BITS), &port_regs->portsc); + + pdev->gadget.speed = gadget_port_speed(portsc); + link_state = portsc & PORT_PLS_MASK; + + if (portsc & PORT_PLC) { + if (!(pdev->gadget_state & GADGET_WAKEUP_PENDING) && link_state == XDEV_RESUME) { + cmd_regs = readl(&pdev->op_regs->command); + if (!(cmd_regs & CMD_R_S)) + goto cleanup; + + if (DEV_SUPERSPEED_ANY(portsc)) { + gadget_set_link_state(pdev, &port_regs->portsc, XDEV_U0); + resume_gadget(pdev); + } + } + + if ((pdev->gadget_state & GADGET_WAKEUP_PENDING) && link_state == XDEV_U0) { + pdev->gadget_state &= ~GADGET_WAKEUP_PENDING; + gadget_force_header_wakeup(pdev, 1); + gadget_ring_cmd_db(pdev); + gadget_wait_for_cmd_compl(pdev); + } + + if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 && + !DEV_SUPERSPEED_ANY(portsc)) + resume_gadget(pdev); + + if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3) + suspend_gadget(pdev); + + pdev->link_state = link_state; + } + + if (portsc & PORT_CSC) { + if (pdev->gadget.connected && !(portsc & PORT_CONNECT)) + disconnect_gadget(pdev); + + if (portsc & PORT_CONNECT) { + if (!port2) + gadget_irq_reset(pdev); + + usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED); + } + } + + if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) { + gadget_irq_reset(pdev); + pdev->u1_allowed = 0; + pdev->u2_allowed = 0; + pdev->may_wakeup = 0; + } + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Over Current detected\n"); + + if (portsc & PORT_CEC) + dev_err(pdev->dev, "Port Configure Error detected\n"); + + if (readl(&port_regs->portsc) & PORT_CHANGE_BITS) + goto new_event; + +cleanup: + gadget_inc_deq(pdev, pdev->event_ring); +} + +static void gadget_td_cleanup(struct phytium_device *pdev, struct gadget_td *td, + struct gadget_ring *ep_ring, int *status) +{ + struct gadget_request *preq = td->preq; + + gadget_unmap_td_bounce_buffer(pdev, ep_ring, td); + + if (preq->request.actual > preq->request.length) { + preq->request.actual = 0; + *status = 0; + } + + list_del_init(&td->td_list); + ep_ring->num_tds--; + preq->pep->stream_info.td_count--; + + gadget_giveback(preq->pep, preq, *status); +} + +static void gadget_skip_isoc_td(struct phytium_device *pdev, struct gadget_td *td, + struct gadget_transfer_event *event, + struct gadget_ep *pep, int status) +{ + struct gadget_ring *ep_ring; + + ep_ring = gadget_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + td->preq->request.status = -EXDEV; + td->preq->request.actual = 0; + + while (ep_ring->dequeue != td->last_trb) + gadget_inc_deq(pdev, ep_ring); + + gadget_inc_deq(pdev, ep_ring); + + gadget_td_cleanup(pdev, td, ep_ring, &status); +} + +static int gadget_giveback_first_trb(struct phytium_device *pdev, struct gadget_ep *pep, + unsigned int stream_id, int start_cycle, + struct gadget_generic_trb *start_trb) +{ + /* + * Pass all the TRBs to the hardware at once and make sure this write + * isn't reordered. + */ + wmb(); + + if (start_cycle) + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); + + if ((pep->ep_state & EP_HAS_STREAMS) && !pep->stream_info.first_prime_det) + return 0; + + return gadget_ring_ep_doorbell(pdev, pep, stream_id); +} + +static void gadget_finish_td(struct phytium_device *pdev, struct gadget_td *td, + struct gadget_transfer_event *event, struct gadget_ep *ep, int *status) +{ + struct gadget_ring *ep_ring; + u32 trb_comp_code; + + ep_ring = gadget_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || + trb_comp_code == COMP_STOPPED_SHORT_PACKET || trb_comp_code == COMP_STOPPED) + return; + + while (ep_ring->dequeue != td->last_trb) + gadget_inc_deq(pdev, ep_ring); + + gadget_inc_deq(pdev, ep_ring); + + gadget_td_cleanup(pdev, td, ep_ring, status); +} + +static void gadget_process_ctrl_td(struct phytium_device *pdev, struct gadget_td *td, + union gadget_trb *event_trb, struct gadget_transfer_event *event, + struct gadget_ep *pep, int *status) +{ + struct gadget_ring *ep_ring; + u32 remaining; + u32 trb_type; + + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3])); + ep_ring = gadget_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + if (trb_type == TRB_DATA) { + td->request_length_set = true; + td->preq->request.actual = td->preq->request.length - remaining; + } + + if (!td->request_length_set) + td->preq->request.actual = td->preq->request.length; + + if (pdev->ep0_stage == GADGET_DATA_STAGE && pep->number == 0 && pdev->three_stage_setup) { + td = list_entry(ep_ring->td_list.next, struct gadget_td, td_list); + pdev->ep0_stage = GADGET_STATUS_STAGE; + + gadget_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state, + &td->last_trb->generic); + + return; + } + + *status = 0; + + gadget_finish_td(pdev, td, event, pep, status); +} + +static int gadget_sum_trb_lengths(struct phytium_device *pdev, + struct gadget_ring *ring, union gadget_trb *stop_trb) +{ + struct gadget_segment *seg = ring->deq_seg; + union gadget_trb *trb = ring->dequeue; + u32 sum; + + for (sum = 0; trb != stop_trb; gadget_next_trb(pdev, ring, &seg, &trb)) { + if (!gadget_trb_is_noop(trb) && !gadget_trb_is_link(trb)) + sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); + } + + return sum; +} + +static void gadget_process_isoc_td(struct phytium_device *pdev, + struct gadget_td *td, union gadget_trb *ep_trb, + struct gadget_transfer_event *event, + struct gadget_ep *pep, int status) +{ + struct gadget_request *preq = td->preq; + u32 remaining, requested, ep_trb_len; + bool sum_trbs_for_length = false; + struct gadget_ring *ep_ring; + u32 trb_comp_code, td_length; + + ep_ring = gadget_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + + requested = preq->request.length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + preq->request.status = 0; + break; + case COMP_SHORT_PACKET: + preq->request.status = 0; + sum_trbs_for_length = true; + break; + case COMP_ISOCH_BUFFER_OVERRUN: + case COMP_BABBLE_DETECTED_ERROR: + preq->request.status = -EOVERFLOW; + break; + case COMP_STOPPED: + sum_trbs_for_length = true; + break; + case COMP_STOPPED_SHORT_PACKET: + preq->request.status = 0; + requested = remaining; + break; + case COMP_STOPPED_LENGTH_INVALID: + requested = 0; + remaining = 0; + break; + default: + sum_trbs_for_length = true; + preq->request.status = -1; + break; + } + + if (sum_trbs_for_length) { + td_length = gadget_sum_trb_lengths(pdev, ep_ring, ep_trb); + td_length += ep_trb_len - remaining; + } else { + td_length = requested; + } + + td->preq->request.actual += td_length; + + gadget_finish_td(pdev, td, event, pep, &status); +} + +static void gadget_process_bulk_intr_td(struct phytium_device *pdev, + struct gadget_td *td, union gadget_trb *ep_trb, + struct gadget_transfer_event *event, struct gadget_ep *ep, int *status) +{ + u32 remaining, requested, ep_trb_len, trb_comp_code; + struct gadget_ring *ep_ring; + + ep_ring = gadget_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); + requested = td->preq->request.length; + + switch (trb_comp_code) { + case COMP_SUCCESS: + case COMP_SHORT_PACKET: + *status = 0; + break; + case COMP_STOPPED_SHORT_PACKET: + td->preq->request.actual = remaining; + goto finish_td; + case COMP_STOPPED_LENGTH_INVALID: + ep_trb_len = 0; + remaining = 0; + break; + } + + if (ep_trb == td->last_trb) + ep_trb_len = gadget_sum_trb_lengths(pdev, ep_ring, ep_trb) + ep_trb_len - remaining; + + td->preq->request.actual = ep_trb_len; + +finish_td: + ep->stream_info.drbls_count--; + + gadget_finish_td(pdev, td, event, ep, status); +} + +static int gadget_handle_tx_event(struct phytium_device *pdev, + struct gadget_transfer_event *event) +{ + const struct usb_endpoint_descriptor *desc; + bool handling_skipped_tds = false; + struct gadget_segment *ep_seg; + struct gadget_ring *ep_ring; + int status = -EINPROGRESS; + union gadget_trb *ep_trb; + dma_addr_t ep_trb_dma; + struct gadget_ep *pep; + struct gadget_td *td; + u32 trb_comp_code; + int invalidate, ep_index; + + invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); + ep_trb_dma = le64_to_cpu(event->buffer); + + pep = &pdev->eps[ep_index]; + ep_ring = gadget_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer)); + + if (invalidate || !pdev->gadget.connected) + goto cleanup; + + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) + goto err_out; + + if (!ep_ring) { + switch (trb_comp_code) { + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + case COMP_RING_OVERRUN: + case COMP_RING_UNDERRUN: + goto cleanup; + default: + dev_err(pdev->dev, "Err: %s event for unknown ring\n", pep->name); + goto err_out; + } + } + + switch (trb_comp_code) { + case COMP_BABBLE_DETECTED_ERROR: + status = -EOVERFLOW; + break; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + goto cleanup; + case COMP_MISSED_SERVICE_ERROR: + pep->skip = true; + break; + } + + do { + if (list_empty(&ep_ring->td_list)) + goto cleanup; + + td = list_entry(ep_ring->td_list.next, struct gadget_td, td_list); + + ep_seg = gadget_trb_in_td(pdev, ep_ring->deq_seg, + ep_ring->dequeue, td->last_trb, ep_trb_dma); + + if (!ep_seg && (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { + pep->skip = false; + goto cleanup; + } + + desc = td->preq->pep->endpoint.desc; + if (!ep_seg) { + if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) + dev_err(pdev->dev, "Err Transfer event ep_index %d comp_code %u\n", + ep_index, trb_comp_code); + + gadget_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + if (trb_comp_code == COMP_SHORT_PACKET) + ep_ring->last_td_was_short = true; + else + ep_ring->last_td_was_short = false; + + if (pep->skip) { + pep->skip = false; + gadget_skip_isoc_td(pdev, td, event, pep, status); + goto cleanup; + } + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; + + if (gadget_trb_is_noop(ep_trb)) + goto cleanup; + + if (usb_endpoint_xfer_control(desc)) + gadget_process_ctrl_td(pdev, td, ep_trb, event, pep, &status); + else if (usb_endpoint_xfer_isoc(desc)) + gadget_process_isoc_td(pdev, td, ep_trb, event, pep, status); + else + gadget_process_bulk_intr_td(pdev, td, ep_trb, event, pep, &status); + +cleanup: + handling_skipped_tds = pep->skip; + + if (!handling_skipped_tds) + gadget_inc_deq(pdev, pdev->event_ring); + } while (handling_skipped_tds); + + return 0; + +err_out: + dev_err(pdev->dev, "0x%llx %08x %08x %08x %08x\n", + (unsigned long long)gadget_trb_virt_to_dma(pdev->event_ring->deq_seg, + pdev->event_ring->dequeue), lower_32_bits(le64_to_cpu(event->buffer)), + upper_32_bits(le64_to_cpu(event->buffer)), le32_to_cpu(event->transfer_len), + le32_to_cpu(event->flags)); + + return -EINVAL; +} + +static int gadget_ep0_delegate_req(struct phytium_device *pdev, struct usb_ctrlrequest *ctrl) +{ + int ret; + + spin_unlock(&pdev->lock); + ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl); + spin_lock(&pdev->lock); + + return ret; +} + +static int gadget_w_index_to_ep_index(u16 wIndex) +{ + if (!(wIndex & USB_ENDPOINT_NUMBER_MASK)) + return 0; + + return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) + + (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1; +} + + +static int gadget_ep0_handle_status(struct phytium_device *pdev, struct usb_ctrlrequest *ctrl) +{ + struct gadget_ep *pep; + __le16 *response; + int ep_sts = 0; + u16 status = 0; + u32 recipient; + + recipient = ctrl->bRequestType & USB_RECIP_MASK; + + switch (recipient) { + case USB_RECIP_DEVICE: + status = pdev->gadget.is_selfpowered; + status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED; + status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED; + } + break; + case USB_RECIP_INTERFACE: + return gadget_ep0_delegate_req(pdev, ctrl); + case USB_RECIP_ENDPOINT: + ep_sts = gadget_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex)); + pep = &pdev->eps[ep_sts]; + ep_sts = GET_EP_CTX_STATE(pep->out_ctx); + + if (ep_sts == EP_STATE_HALTED) + status = BIT(USB_ENDPOINT_HALT); + break; + default: + return -EINVAL; + } + + response = (__le16 *)pdev->setup_buf; + *response = cpu_to_le16(status); + + pdev->ep0_preq.request.length = sizeof(*response); + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return gadget_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static void gadget_enter_test_mode(struct phytium_device *pdev) +{ + u32 temp; + + temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28); + temp |= PORT_TEST_MODE(pdev->test_mode); + writel(temp, &pdev->active_port->regs->portpmsc); +} + +static int gadget_ep0_handle_feature_device(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl, int set) +{ + enum usb_device_state state; + enum usb_device_speed speed; + u16 tmode; + + state = pdev->gadget.state; + speed = pdev->gadget.speed; + + switch (le16_to_cpu(ctrl->wValue)) { + case USB_DEVICE_REMOTE_WAKEUP: + pdev->may_wakeup = !!set; + break; + case USB_DEVICE_U1_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u1_allowed = !!set; + break; + case USB_DEVICE_U2_ENABLE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + pdev->u2_allowed = !!set; + break; + case USB_DEVICE_LTM_ENABLE: + return -EINVAL; + case USB_DEVICE_TEST_MODE: + if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER) + return -EINVAL; + + tmode = le16_to_cpu(ctrl->wIndex); + + if (!set || (tmode & 0xff) != 0) + return -EINVAL; + + tmode = tmode >> 8; + + if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J) + return -EINVAL; + + pdev->test_mode = tmode; + gadget_enter_test_mode(pdev); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int gadget_ep0_handle_feature_intf(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl, int set) +{ + u16 wValue, wIndex; + int ret; + + wValue = le16_to_cpu(ctrl->wValue); + wIndex = le16_to_cpu(ctrl->wIndex); + + switch (wValue) { + case USB_INTRF_FUNC_SUSPEND: + ret = gadget_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) + pdev->may_wakeup++; + else + if (pdev->may_wakeup > 0) + pdev->may_wakeup--; + + return 0; + default: + return -EINVAL; + } + + return 0; +} + +static int gadget_ep0_handle_feature_endpoint(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl, int set) +{ + struct gadget_ep *pep; + u16 wValue; + + wValue = le16_to_cpu(ctrl->wValue); + pep = &pdev->eps[gadget_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))]; + + switch (wValue) { + case USB_ENDPOINT_HALT: + if (!set && (pep->ep_state & EP_WEDGE)) { + gadget_halt_endpoint(pdev, pep, 0); + gadget_halt_endpoint(pdev, pep, 1); + break; + } + return gadget_halt_endpoint(pdev, pep, set); + default: + dev_warn(pdev->dev, "Warn Incorrect wValue %04x\n", wValue); + return -EINVAL; + } + + return 0; +} + +static int gadget_ep0_handle_feature(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl, int set) +{ + switch (ctrl->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_DEVICE: + return gadget_ep0_handle_feature_device(pdev, ctrl, set); + case USB_RECIP_INTERFACE: + return gadget_ep0_handle_feature_intf(pdev, ctrl, set); + case USB_RECIP_ENDPOINT: + return gadget_ep0_handle_feature_endpoint(pdev, ctrl, set); + default: + return -EINVAL; + } +} + +static int gadget_ep0_set_address(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + struct gadget_slot_ctx *slot_ctx; + unsigned int slot_state; + int ret; + u32 addr; + + addr = le16_to_cpu(ctrl->wValue); + if (addr > 127) { + dev_err(pdev->dev, "Invalid device address %d\n", addr); + return -EINVAL; + } + + slot_ctx = gadget_get_slot_ctx(&pdev->out_ctx); + + if (state == USB_STATE_CONFIGURED) { + dev_err(pdev->dev, "Can't Set Address from Configured State\n"); + return -EINVAL; + } + + pdev->device_address = le16_to_cpu(ctrl->wValue); + + slot_ctx = gadget_get_slot_ctx(&pdev->out_ctx); + slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); + if (slot_state == SLOT_STATE_ADDRESSED) + gadget_reset_device(pdev); + + ret = gadget_setup_device(pdev, SETUP_CONTEXT_ADDRESS); + if (ret) + return ret; + + if (addr) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + else + usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT); + + return 0; +} + +static int gadget_ep0_set_config(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u32 cfg; + int ret; + + cfg = le16_to_cpu(ctrl->wValue); + + switch (state) { + case USB_STATE_ADDRESS: + break; + case USB_STATE_CONFIGURED: + break; + default: + dev_err(pdev->dev, "Set Configuration - bad device state\n"); + return -EINVAL; + } + + ret = gadget_ep0_delegate_req(pdev, ctrl); + if (ret) + return ret; + + if (!cfg) + usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS); + + return 0; +} + +static int gadget_ep0_set_sel(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + enum usb_device_state state = pdev->gadget.state; + u16 wLength; + + if (state == USB_STATE_DEFAULT) + return -EINVAL; + + wLength = le16_to_cpu(ctrl->wLength); + + if (wLength != 6) { + dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n", wLength); + return -EINVAL; + } + + pdev->ep0_preq.request.length = 6; + pdev->ep0_preq.request.buf = pdev->setup_buf; + + return gadget_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static int gadget_ep0_set_isoch_delay(struct phytium_device *pdev, + struct usb_ctrlrequest *ctrl) +{ + if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength)) + return -EINVAL; + + pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue); + + return 0; +} + +static int gadget_ep0_std_request(struct phytium_device *pdev, struct usb_ctrlrequest *ctrl) +{ + int ret; + + switch (ctrl->bRequest) { + case USB_REQ_GET_STATUS: + ret = gadget_ep0_handle_status(pdev, ctrl); + break; + case USB_REQ_CLEAR_FEATURE: + ret = gadget_ep0_handle_feature(pdev, ctrl, 0); + break; + case USB_REQ_SET_FEATURE: + ret = gadget_ep0_handle_feature(pdev, ctrl, 1); + break; + case USB_REQ_SET_ADDRESS: + ret = gadget_ep0_set_address(pdev, ctrl); + break; + case USB_REQ_SET_CONFIGURATION: + ret = gadget_ep0_set_config(pdev, ctrl); + break; + case USB_REQ_SET_SEL: + ret = gadget_ep0_set_sel(pdev, ctrl); + break; + case USB_REQ_SET_ISOCH_DELAY: + ret = gadget_ep0_set_isoch_delay(pdev, ctrl); + break; + case USB_REQ_SET_INTERFACE: + list_add_tail(&pdev->ep0_preq.list, &pdev->ep0_preq.pep->pending_list); + ret = gadget_ep0_delegate_req(pdev, ctrl); + if (ret == -EBUSY) + ret = 0; + + list_del(&pdev->ep0_preq.list); + break; + default: + ret = gadget_ep0_delegate_req(pdev, ctrl); + break; + } + + return ret; +} + +int gadget_status_stage(struct phytium_device *pdev) +{ + pdev->ep0_stage = GADGET_STATUS_STAGE; + pdev->ep0_preq.request.length = 0; + + return gadget_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq); +} + +static void gadget_ep0_stall(struct phytium_device *pdev) +{ + struct gadget_request *preq; + struct gadget_ep *pep; + + pep = &pdev->eps[0]; + preq = next_request(&pep->pending_list); + + if (pdev->three_stage_setup) { + gadget_halt_endpoint(pdev, pep, true); + if (preq) + gadget_giveback(pep, preq, -ECONNRESET); + } else { + pep->ep_state |= EP0_HALTED_STATUS; + + if (preq) + list_del(&preq->list); + + gadget_status_stage(pdev); + } +} + +static void gadget_setup_analyze(struct phytium_device *pdev) +{ + struct usb_ctrlrequest *ctrl = &pdev->setup; + int ret = 0; + u16 len; + + if (!pdev->gadget_driver) + goto out; + + if (pdev->gadget.state == USB_STATE_NOTATTACHED) { + dev_err(pdev->dev, "Err: Setup detected in unattached state\n"); + ret = -EINVAL; + goto out; + } + + if (pdev->eps[0].ep_state & EP_HALTED) + gadget_halt_endpoint(pdev, &pdev->eps[0], 0); + + if (!list_empty(&pdev->eps[0].pending_list)) { + struct gadget_request *req; + + req = next_request(&pdev->eps[0].pending_list); + ep_dequeue(&pdev->eps[0], req); + } + + len = le16_to_cpu(ctrl->wLength); + if (!len) { + pdev->three_stage_setup = false; + pdev->ep0_expect_in = false; + } else { + pdev->three_stage_setup = true; + pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN); + } + + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + ret = gadget_ep0_std_request(pdev, ctrl); + else + ret = gadget_ep0_delegate_req(pdev, ctrl); + + if (!len) + pdev->ep0_stage = GADGET_STATUS_STAGE; + + if (ret == USB_GADGET_DELAYED_STATUS) + return; + +out: + if (ret < 0) + gadget_ep0_stall(pdev); + else if (pdev->ep0_stage == GADGET_STATUS_STAGE) + gadget_status_stage(pdev); +} + +static void gadget_handle_tx_nrdy(struct phytium_device *pdev, struct gadget_transfer_event *event) +{ + struct gadget_generic_trb *generic; + struct gadget_ring *ep_ring; + struct gadget_ep *pep; + int cur_stream, ep_index, host_sid, dev_sid; + + generic = (struct gadget_generic_trb *)event; + ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0])); + host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2])); + + pep = &pdev->eps[ep_index]; + + if (!(pep->ep_state & EP_HAS_STREAMS)) + return; + + if (host_sid == STREAM_PRIME_ACK) { + pep->stream_info.first_prime_det = 1; + for (cur_stream = 1; cur_stream < pep->stream_info.num_streams; cur_stream++) { + ep_ring = pep->stream_info.stream_rings[cur_stream]; + ep_ring->stream_active = 1; + ep_ring->stream_rejected = 0; + } + } + + if (host_sid == STREAM_REJECTED) { + struct gadget_td *td, *td_temp; + + pep->stream_info.drbls_count--; + ep_ring = pep->stream_info.stream_rings[dev_sid]; + ep_ring->stream_active = 0; + ep_ring->stream_rejected = 1; + + list_for_each_entry_safe(td, td_temp, &ep_ring->td_list, td_list) + td->drbl = 0; + } + + gadget_ring_doorbell_for_active_rings(pdev, pep); +} + +static bool gadget_handle_event(struct phytium_device *pdev) +{ + unsigned int comp_code; + union gadget_trb *event; + bool update_ptrs = true; + u32 cycle_bit, flags; + int ret = 0; + + event = pdev->event_ring->dequeue; + flags = le32_to_cpu(event->event_cmd.flags); + cycle_bit = (flags & TRB_CYCLE); + + if (cycle_bit != pdev->event_ring->cycle_state) + return false; + + /* + * Barrier between reading the TRB_CYCLE (valid) flag above and any + * reads of the event's flags/data below. + */ + rmb(); + + switch (flags & TRB_TYPE_BITMASK) { + case TRB_TYPE(TRB_COMPLETION): + gadget_inc_deq(pdev, pdev->cmd_ring); + break; + case TRB_TYPE(TRB_PORT_STATUS): + gadget_handle_port_status(pdev, event); + update_ptrs = false; + break; + case TRB_TYPE(TRB_TRANSFER): + ret = gadget_handle_tx_event(pdev, &event->trans_event); + if (ret >= 0) + update_ptrs = false; + break; + case TRB_TYPE(TRB_SETUP): + pdev->ep0_stage = GADGET_SETUP_STAGE; + pdev->setup_id = TRB_SETUPID_TO_TYPE(flags); + pdev->setup_speed = TRB_SETUP_SPEEDID(flags); + pdev->setup = *((struct usb_ctrlrequest *)&event->trans_event.buffer); + + gadget_setup_analyze(pdev); + break; + case TRB_TYPE(TRB_ENDPOINT_NRDY): + gadget_handle_tx_nrdy(pdev, &event->trans_event); + break; + case TRB_TYPE(TRB_HC_EVENT): { + comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); + switch (comp_code) { + case COMP_EVENT_RING_FULL_ERROR: + dev_err(pdev->dev, "Event Ring Full\n"); + break; + default: + dev_err(pdev->dev, "Controller error code 0x%02x\n", comp_code); + } + break; + } + default: + dev_warn(pdev->dev, "Error unknown event type %ld\n", TRB_FIELD_TO_TYPE(flags)); + } + + if (update_ptrs) + gadget_inc_deq(pdev, pdev->event_ring); + + return true; +} + +irqreturn_t gadget_irq_handler(int irq, void *priv) +{ + struct phytium_device *pdev = (struct phytium_device *)priv; + u32 irq_pending; + u32 status; + union gadget_trb *event_ring_deq; + unsigned long flags; + int counter = 0; + + spin_lock_irqsave(&pdev->lock, flags); + status = readl(&pdev->op_regs->status); + + if (status == ~(u32)0) { + gadget_died(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + return IRQ_HANDLED; + } + + if (!(status & STS_EINT)) { + spin_unlock_irqrestore(&pdev->lock, flags); + return IRQ_NONE; + } + + writel(status | STS_EINT, &pdev->op_regs->status); + irq_pending = readl(&pdev->ir_set->irq_pending); + irq_pending |= IMAN_IP; + writel(irq_pending, &pdev->ir_set->irq_pending); + + if (status & STS_FATAL) { + gadget_died(pdev); + spin_unlock_irqrestore(&pdev->lock, flags); + return IRQ_HANDLED; + } + + if (pdev->gadget_state & (GADGET_STATE_HALTED | GADGET_STATE_DYING)) { + if (pdev->gadget_driver) + gadget_died(pdev); + + spin_unlock_irqrestore(&pdev->lock, flags); + return IRQ_HANDLED; + } + + event_ring_deq = pdev->event_ring->dequeue; + + while (gadget_handle_event(pdev)) { + if (++counter >= TRBS_PER_EV_DEQ_UPDATE) { + gadget_update_erst_dequeue(pdev, event_ring_deq, 0); + event_ring_deq = pdev->event_ring->dequeue; + counter = 0; + } + } + + gadget_update_erst_dequeue(pdev, event_ring_deq, 1); + spin_unlock_irqrestore(&pdev->lock, flags); + + return IRQ_HANDLED; +} +void gadget_queue_address_device(struct phytium_device *pdev, + dma_addr_t in_ctx_ptr, enum gadget_setup_dev setup) +{ + gadget_queue_command(pdev, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(pdev->slot_id) | + (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0)); +} + +static int gadget_prepare_transfer(struct phytium_device *pdev, + struct gadget_request *preq, unsigned int num_trbs) +{ + struct gadget_ring *ep_ring; + int ret; + + ep_ring = gadget_get_transfer_ring(pdev, preq->pep, preq->request.stream_id); + if (!ep_ring) + return -EINVAL; + + ret = gadget_prepare_ring(pdev, ep_ring, GET_EP_CTX_STATE(preq->pep->out_ctx), + num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + INIT_LIST_HEAD(&preq->td.td_list); + preq->td.preq = preq; + + list_add_tail(&preq->td.td_list, &ep_ring->td_list); + ep_ring->num_tds++; + preq->pep->stream_info.td_count++; + + preq->td.start_seg = ep_ring->enq_seg; + preq->td.first_trb = ep_ring->enqueue; + + return 0; +} + +static u32 gadget_td_remainder(struct phytium_device *pdev, int transferred, int trb_buff_len, + unsigned int td_total_len, struct gadget_request *preq, + bool more_trbs_coming, bool zlp) +{ + u32 maxp, total_packet_count; + + if (zlp) + return 1; + + if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) + || trb_buff_len == td_total_len) + return 0; + + maxp = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_packet_count = DIV_ROUND_UP(td_total_len, maxp); + + return (total_packet_count - ((transferred + trb_buff_len) / maxp)); +} + +int gadget_queue_ctrl_tx(struct phytium_device *pdev, struct gadget_request *preq) +{ + u32 field, length_field, remainder; + struct gadget_ep *pep = preq->pep; + struct gadget_ring *ep_ring; + int num_trbs; + int ret; + + ep_ring = gadget_request_to_transfer_ring(pdev, preq); + if (!ep_ring) + return -EINVAL; + + num_trbs = (pdev->three_stage_setup) ? 2 : 1; + + ret = gadget_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + if (pdev->ep0_expect_in) + field = TRB_TYPE(TRB_DATA) | TRB_IOC; + else + field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC; + + if (preq->request.length > 0) { + remainder = gadget_td_remainder(pdev, 0, preq->request.length, + preq->request.length, preq, 1, 0); + + length_field = TRB_LEN(preq->request.length) | TRB_TD_SIZE(remainder) | + TRB_INTR_TARGET(0); + + if (pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + gadget_queue_trb(pdev, ep_ring, true, lower_32_bits(preq->request.dma), + upper_32_bits(preq->request.dma), length_field, field | + ep_ring->cycle_state | TRB_SETUPID(pdev->setup_id) | + pdev->setup_speed); + + pdev->ep0_stage = GADGET_DATA_STAGE; + } + + preq->td.last_trb = ep_ring->enqueue; + + if (preq->request.length == 0) + field = ep_ring->cycle_state; + else + field = (ep_ring->cycle_state ^ 1); + + if (preq->request.length > 0 && pdev->ep0_expect_in) + field |= TRB_DIR_IN; + + if (pep->ep_state & EP0_HALTED_STATUS) { + pep->ep_state &= ~EP0_HALTED_STATUS; + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL); + } else { + field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK); + } + + gadget_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0), field | TRB_IOC | + TRB_SETUPID(pdev->setup_id) | TRB_TYPE(TRB_STATUS) | pdev->setup_speed); + + gadget_ring_ep_doorbell(pdev, pep, preq->request.stream_id); + + return 0; +} + +static unsigned int gadget_count_trbs(u64 addr, u64 len) +{ + unsigned int num_trbs; + + num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), TRB_MAX_BUFF_SIZE); + + if (num_trbs == 0) + num_trbs++; + + return num_trbs; +} + +static unsigned int count_trbs_needed(struct gadget_request *preq) +{ + return gadget_count_trbs(preq->request.dma, preq->request.length); +} + +static unsigned int count_sg_trbs_needed(struct gadget_request *preq) +{ + unsigned int i, len, full_len, num_trbs = 0; + struct scatterlist *sg; + + full_len = preq->request.length; + + for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) { + len = sg_dma_len(sg); + num_trbs += gadget_count_trbs(sg_dma_address(sg), len); + len = min(len, full_len); + full_len -= len; + if (full_len == 0) + break; + } + + return num_trbs; +} + +static int gadget_align_td(struct phytium_device *pdev, struct gadget_request *preq, + u32 enqd_len, u32 *trb_buff_len, struct gadget_segment *seg) +{ + struct device *dev = pdev->dev; + unsigned int unalign, max_pkt; + u32 new_buff_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + unalign = (enqd_len + *trb_buff_len) % max_pkt; + + if (unalign == 0) + return 0; + + if (*trb_buff_len > unalign) { + *trb_buff_len -= unalign; + return 0; + } + + new_buff_len = max_pkt - (enqd_len % max_pkt); + + if (new_buff_len > (preq->request.length - enqd_len)) + new_buff_len = preq->request.length - enqd_len; + + if (preq->direction) { + sg_pcopy_to_buffer(preq->request.sg, preq->request.num_mapped_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); + } else { + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_FROM_DEVICE); + } + + if (dma_mapping_error(dev, seg->bounce_dma)) { + dev_warn(dev, "Failed mapping bounce buffer, not aligning\n"); + return 0; + } + + *trb_buff_len = new_buff_len; + seg->bounce_len = new_buff_len; + seg->bounce_offs = enqd_len; + + return 1; +} + +static void gadget_check_trb_math(struct gadget_request *preq, int running_total) +{ + if (running_total != preq->request.length) + dev_err(preq->pep->pdev->dev, + "%s - Miscalculated tx length, queued %#x, asked for %#x (%d)\n", + preq->pep->name, running_total, preq->request.length, + preq->request.actual); +} + +int gadget_queue_bulk_tx(struct phytium_device *pdev, struct gadget_request *preq) +{ + unsigned int enqd_len, block_len, trb_buff_len, full_len; + unsigned int start_cycle, num_sgs = 0; + struct gadget_generic_trb *start_trb; + u32 field, length_field, remainder; + struct scatterlist *sg = NULL; + bool more_trbs_coming = true; + bool need_zero_pkt = false; + bool zero_len_trb = false; + struct gadget_ring *ring; + bool first_trb = true; + unsigned int num_trbs; + struct gadget_ep *pep; + u64 addr, send_addr; + int sent_len, ret; + + ring = gadget_request_to_transfer_ring(pdev, preq); + if (!ring) + return -EINVAL; + + full_len = preq->request.length; + + if (preq->request.num_sgs) { + num_sgs = preq->request.num_sgs; + sg = preq->request.sg; + addr = (u64)sg_dma_address(sg); + block_len = sg_dma_len(sg); + num_trbs = count_sg_trbs_needed(preq); + } else { + num_trbs = count_trbs_needed(preq); + addr = (u64)preq->request.dma; + block_len = full_len; + } + + pep = preq->pep; + + if (preq->request.zero && preq->request.length && + IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) { + need_zero_pkt = true; + num_trbs++; + } + + ret = gadget_prepare_transfer(pdev, preq, num_trbs); + if (ret) + return ret; + + start_trb = &ring->enqueue->generic; + start_cycle = ring->cycle_state; + send_addr = addr; + + for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len; + enqd_len += trb_buff_len) { + field = TRB_TYPE(TRB_NORMAL); + + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + trb_buff_len = min(trb_buff_len, block_len); + if (enqd_len + trb_buff_len > full_len) + trb_buff_len = full_len - enqd_len; + + if (first_trb) { + first_trb = false; + if (start_cycle == 0) + field |= TRB_CYCLE; + } else { + field |= ring->cycle_state; + } + + if (enqd_len + trb_buff_len < full_len || need_zero_pkt) { + field |= TRB_CHAIN; + if (gadget_trb_is_link(ring->enqueue + 1)) { + if (gadget_align_td(pdev, preq, enqd_len, + &trb_buff_len, ring->enq_seg)) { + send_addr = ring->enq_seg->bounce_dma; + preq->td.bounce_seg = ring->enq_seg; + } + } + } + + if (enqd_len + trb_buff_len >= full_len) { + if (need_zero_pkt && !zero_len_trb) { + zero_len_trb = true; + } else { + zero_len_trb = false; + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } + } + + if (!preq->direction) + field |= TRB_ISP; + + remainder = gadget_td_remainder(pdev, enqd_len, trb_buff_len, full_len, preq, + more_trbs_coming, zero_len_trb); + + length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); + + gadget_queue_trb(pdev, ring, more_trbs_coming, lower_32_bits(send_addr), + upper_32_bits(send_addr), length_field, field); + + addr += trb_buff_len; + sent_len = trb_buff_len; + + while (sg && sent_len >= block_len) { + --num_sgs; + sent_len -= block_len; + if (num_sgs != 0) { + sg = sg_next(sg); + block_len = sg_dma_len(sg); + addr = (u64)sg_dma_address(sg); + addr += sent_len; + } + } + + block_len -= sent_len; + send_addr = addr; + } + + gadget_check_trb_math(preq, enqd_len); + ret = gadget_giveback_first_trb(pdev, pep, preq->request.stream_id, start_cycle, start_trb); + if (ret) + preq->td.drbl = 1; + + return 0; +} + +static unsigned int count_isoc_trbs_needed(struct gadget_request *preq) +{ + return gadget_count_trbs(preq->request.dma, preq->request.length); +} + +static unsigned int gadget_get_burst_count(struct phytium_device *pdev, + struct gadget_request *preq, unsigned int total_packet_count) +{ + unsigned int max_burst; + + if (pdev->gadget.speed < USB_SPEED_SUPER) + return 0; + + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; +} + +static unsigned int gadget_get_last_burst_packet_count(struct phytium_device *pdev, + struct gadget_request *preq, unsigned int total_packet_count) +{ + unsigned int max_burst, residue; + + if (pdev->gadget.speed >= USB_SPEED_SUPER) { + max_burst = preq->pep->endpoint.comp_desc->bMaxBurst; + residue = total_packet_count % (max_burst + 1); + + if (residue == 0) + return max_burst; + + return residue - 1; + } + + if (total_packet_count == 0) + return 0; + + return total_packet_count - 1; +} + +static int gadget_queue_isoc_tx(struct phytium_device *pdev, struct gadget_request *preq) +{ + int trb_buff_len, td_len, td_remain_len, ret; + unsigned int burst_count, last_burst_pkt, total_pkt_count, max_pkt; + struct gadget_generic_trb *start_trb; + bool more_trbs_coming = true; + struct gadget_ring *ep_ring; + u32 field, length_field; + int start_cycle, trbs_per_td, i; + int running_total = 0; + u64 addr; + + ep_ring = preq->pep->ring; + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + td_len = preq->request.length; + addr = (u64)preq->request.dma; + td_remain_len = td_len; + + max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc); + total_pkt_count = DIV_ROUND_UP(td_len, max_pkt); + + if (total_pkt_count == 0) + total_pkt_count++; + + burst_count = gadget_get_burst_count(pdev, preq, total_pkt_count); + last_burst_pkt = gadget_get_last_burst_packet_count(pdev, preq, total_pkt_count); + trbs_per_td = count_isoc_trbs_needed(preq); + + ret = gadget_prepare_transfer(pdev, preq, trbs_per_td); + if (ret) + goto cleanup; + + field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) | TRB_SIA | TRB_TBC(burst_count); + + if (!start_cycle) + field |= TRB_CYCLE; + + for (i = 0; i < trbs_per_td; i++) { + u32 remainder; + + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + if (trb_buff_len > td_remain_len) + trb_buff_len = td_remain_len; + + remainder = gadget_td_remainder(pdev, running_total, trb_buff_len, td_len, + preq, more_trbs_coming, 0); + + length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0); + + if (i) { + field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state; + length_field |= TRB_TD_SIZE(remainder); + } else { + length_field |= TRB_TD_SIZE_TBC(burst_count); + } + + if (usb_endpoint_dir_out(preq->pep->endpoint.desc)) + field |= TRB_ISP; + + if (i < trbs_per_td - 1) { + more_trbs_coming = true; + field |= TRB_CHAIN; + } else { + more_trbs_coming = false; + preq->td.last_trb = ep_ring->enqueue; + field |= TRB_IOC; + } + + gadget_queue_trb(pdev, ep_ring, more_trbs_coming, lower_32_bits(addr), + upper_32_bits(addr), length_field, field); + + running_total += trb_buff_len; + addr += trb_buff_len; + td_remain_len -= trb_buff_len; + } + + if (running_total != td_len) { + dev_err(pdev->dev, "ISOC TD length unmatch\n"); + ret = -EINVAL; + goto cleanup; + } + + gadget_giveback_first_trb(pdev, preq->pep, preq->request.stream_id, start_cycle, start_trb); + + return 0; + +cleanup: + list_del_init(&preq->td.td_list); + ep_ring->num_tds--; + preq->td.last_trb = ep_ring->enqueue; + gadget_td_to_noop(pdev, ep_ring, &preq->td, true); + + ep_ring->enqueue = preq->td.first_trb; + ep_ring->enq_seg = preq->td.start_seg; + ep_ring->cycle_state = start_cycle; + + return ret; +} + +int gadget_queue_isoc_tx_prepare(struct phytium_device *pdev, struct gadget_request *preq) +{ + struct gadget_ring *ep_ring; + u32 ep_state; + int num_trbs, ret; + + ep_ring = preq->pep->ring; + ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx); + num_trbs = count_isoc_trbs_needed(preq); + + ret = gadget_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC); + if (ret) + return ret; + + return gadget_queue_isoc_tx(pdev, preq); +} + diff --git a/drivers/usb/phytium_usb_v2/ring.h b/drivers/usb/phytium_usb_v2/ring.h new file mode 100644 index 0000000000000000000000000000000000000000..dca38d781ed85fbaf5475e80986b50856b55aa98 --- /dev/null +++ b/drivers/usb/phytium_usb_v2/ring.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PHYTIUM_USB_RING_H__ +#define __LINUX_PHYTIUM_USB_RING_H__ + +#include "gadget.h" +#include + +dma_addr_t gadget_trb_virt_to_dma(struct gadget_segment *seg, + union gadget_trb *trb); + +void gadget_queue_configure_endpoint(struct phytium_device *pdev, + dma_addr_t in_ctx_ptr); + +void gadget_ring_doorbell_for_active_rings(struct phytium_device *pdev, + struct gadget_ep *pep); + +void gadget_initialize_ring_info(struct gadget_ring *ring); + +irqreturn_t gadget_irq_handler(int irq, void *priv); + +irqreturn_t gadget_thread_irq_handler(int irq, void *data); + +void gadget_queue_address_device(struct phytium_device *pdev, + dma_addr_t in_ctx_ptr, enum gadget_setup_dev setup); + +int gadget_queue_ctrl_tx(struct phytium_device *pdev, struct gadget_request *preq); +int gadget_queue_bulk_tx(struct phytium_device *pdev, struct gadget_request *preq); +int gadget_queue_isoc_tx_prepare(struct phytium_device *pdev, struct gadget_request *preq); +#endif + diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 2f80c2792dbdaf8bc7e161924326eb9230569a76..40e816650b7b498927647e3b185e9c1ba70d66b1 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -64,6 +64,18 @@ config TYPEC_ANX7411 If you choose to build this driver as a dynamically linked module, the module will be called anx7411.ko. +config TYPEC_PHYTIUM_RS + tristate "Phytium USB Type-C DRP Port controller driver" + depends on I2C + depends on USB_ROLE_SWITCH + depends on POWER_SUPPLY + help + Say Y or M here if your system has PHYTIUM USB Type-C DRP Port + controller driver. + + If you choose to build this driver as a dynamically linked module, the + module will be called role-switch-phytium.ko. + config TYPEC_RT1719 tristate "Richtek RT1719 Sink Only Type-C controller driver" depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile index 7a368fea61bc9fdf406eb41298356a57817f22ba..5bcde0cc5b65a9726360034dc458afecbe0f636a 100644 --- a/drivers/usb/typec/Makefile +++ b/drivers/usb/typec/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_TYPEC_TCPM) += tcpm/ obj-$(CONFIG_TYPEC_UCSI) += ucsi/ obj-$(CONFIG_TYPEC_TPS6598X) += tipd/ obj-$(CONFIG_TYPEC_ANX7411) += anx7411.o +obj-$(CONFIG_TYPEC_PHYTIUM_RS) += role-switch-phytium.o obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o obj-$(CONFIG_TYPEC_RT1719) += rt1719.o diff --git a/drivers/usb/typec/role-switch-phytium.c b/drivers/usb/typec/role-switch-phytium.c new file mode 100644 index 0000000000000000000000000000000000000000..2f3fa23e1e1ef6e9b623a6af69b48594146591bc --- /dev/null +++ b/drivers/usb/typec/role-switch-phytium.c @@ -0,0 +1,1655 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Driver for USB Type-C and PD controller + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RS_PHYTIUM_DRV_VERSION "1.0.0" + +#define RS_PHYTIUM_TCPC_ADDRESS1 0x58 +#define RS_PHYTIUM_TCPC_ADDRESS2 0x56 +#define RS_PHYTIUM_TCPC_ADDRESS3 0x54 +#define RS_PHYTIUM_TCPC_ADDRESS4 0x52 +#define RS_PHYTIUM_SPI_ADDRESS1 0x7e +#define RS_PHYTIUM_SPI_ADDRESS2 0x6e +#define RS_PHYTIUM_SPI_ADDRESS3 0x64 +#define RS_PHYTIUM_SPI_ADDRESS4 0x62 + +struct role_sw_phytium_i2c_select { + u8 tcpc_address; + u8 spi_address; +}; + +#define RS_PHYTIUM_VID_ANALOGIX 0x1F29 +#define RS_PHYTIUM_PID_ANALOGIX 0x7411 + +/* TCPC register define */ + +#define RS_PHYTIUM_ANALOG_CTRL_10 0xAA + +#define RS_PHYTIUM_STATUS_LEN 2 +#define RS_PHYTIUM_ALERT_0 0xCB +#define RS_PHYTIUM_RECEIVED_MSG BIT(7) +#define RS_PHYTIUM_SOFTWARE_INT BIT(6) +#define RS_PHYTIUM_MSG_LEN 32 +#define RS_PHYTIUM_HEADER_LEN 2 +#define RS_PHYTIUM_MSG_HEADER 0x00 +#define RS_PHYTIUM_MSG_TYPE 0x01 +#define RS_PHYTIUM_MSG_RAWDATA 0x02 +#define RS_PHYTIUM_MSG_LEN_MASK 0x1F + +#define RS_PHYTIUM_ALERT_1 0xCC +#define RS_PHYTIUM_INTP_POW_ON BIT(7) +#define RS_PHYTIUM_INTP_POW_OFF BIT(6) + +#define RS_PHYTIUM_VBUS_THRESHOLD_H 0xDD +#define RS_PHYTIUM_VBUS_THRESHOLD_L 0xDE + +#define RS_PHYTIUM_FW_CTRL_0 0xF0 +#define RS_PHYTIUM_UNSTRUCT_VDM_EN BIT(0) +#define RS_PHYTIUM_DELAY_200MS BIT(1) +#define RS_PHYTIUM_VSAFE0 0 +#define RS_PHYTIUM_VSAFE1 BIT(2) +#define RS_PHYTIUM_VSAFE2 BIT(3) +#define RS_PHYTIUM_VSAFE3 (BIT(2) | BIT(3)) +#define RS_PHYTIUM_FRS_EN BIT(7) + +#define RS_PHYTIUM_FW_PARAM 0xF1 +#define RS_PHYTIUM_DONGLE_IOP BIT(0) + +#define RS_PHYTIUM_FW_CTRL_2 0xF7 +#define RS_PHYTIUM_SINK_CTRL_DIS_FLAG BIT(5) + +/* SPI register define */ +#define RS_PHYTIUM_OCM_CTRL_0 0x6E +#define RS_PHYTIUM_OCM_RESET BIT(6) + +#define RS_PHYTIUM_MAX_VOLTAGE 0xAC +#define RS_PHYTIUM_MAX_POWER 0xAD +#define RS_PHYTIUM_MIN_POWER 0xAE + +#define RS_PHYTIUM_REQUEST_VOLTAGE 0xAF +#define RS_PHYTIUM_VOLTAGE_UNIT 100 /* mV per unit */ + +#define RS_PHYTIUM_REQUEST_CURRENT 0xB1 +#define RS_PHYTIUM_CURRENT_UNIT 50 /* mA per unit */ + +#define RS_PHYTIUM_CMD_SEND_BUF 0xC0 +#define RS_PHYTIUM_CMD_RECV_BUF 0xE0 + +#define RS_PHYTIUM_REQ_VOL_20V_IN_100MV 0xC8 +#define RS_PHYTIUM_REQ_CUR_2_25A_IN_50MA 0x2D +#define RS_PHYTIUM_REQ_CUR_3_25A_IN_50MA 0x41 + +#define RS_PHYTIUM_DEF_5V 5000 +#define RS_PHYTIUM_DEF_1_5A 1500 + +#define RS_PHYTIUM_LOBYTE(w) ((u8)((w) & 0xFF)) +#define RS_PHYTIUM_HIBYTE(w) ((u8)(((u16)(w) >> 8) & 0xFF)) + +enum role_sw_phytium_typec_message_type { + RS_PHYTIUM_TYPE_SRC_CAP = 0x00, + RS_PHYTIUM_TYPE_SNK_CAP = 0x01, + RS_PHYTIUM_TYPE_SNK_IDENTITY = 0x02, + RS_PHYTIUM_TYPE_SVID = 0x03, + RS_PHYTIUM_TYPE_SET_SNK_DP_CAP = 0x08, + RS_PHYTIUM_TYPE_PSWAP_REQ = 0x10, + RS_PHYTIUM_TYPE_DSWAP_REQ = 0x11, + RS_PHYTIUM_TYPE_VDM = 0x14, + RS_PHYTIUM_TYPE_OBJ_REQ = 0x16, + RS_PHYTIUM_TYPE_DP_ALT_ENTER = 0x19, + RS_PHYTIUM_TYPE_DP_DISCOVER_MODES_INFO = 0x27, + RS_PHYTIUM_TYPE_GET_DP_CONFIG = 0x29, + RS_PHYTIUM_TYPE_DP_CONFIGURE = 0x2A, + RS_PHYTIUM_TYPE_GET_DP_DISCOVER_MODES_INFO = 0x2E, + RS_PHYTIUM_TYPE_GET_DP_ALT_ENTER = 0x2F, +}; + +#define RS_PHYTIUM_FW_CTRL_1 0xB2 +#define RS_PHYTIUM_AUTO_PD_EN BIT(1) +#define RS_PHYTIUM_TRYSRC_EN BIT(2) +#define RS_PHYTIUM_TRYSNK_EN BIT(3) +#define RS_PHYTIUM_FORCE_SEND_RDO BIT(6) + +#define RS_PHYTIUM_FW_VER 0xB4 +#define RS_PHYTIUM_FW_SUBVER 0xB5 + +#define RS_PHYTIUM_INT_MASK 0xB6 +#define RS_PHYTIUM_INT_STS 0xB7 +#define RS_PHYTIUM_OCM_BOOT_UP BIT(0) +#define RS_PHYTIUM_OC_OV_EVENT BIT(1) +#define RS_PHYTIUM_VCONN_CHANGE BIT(2) +#define RS_PHYTIUM_VBUS_CHANGE BIT(3) +#define RS_PHYTIUM_CC_STATUS_CHANGE BIT(4) +#define RS_PHYTIUM_DATA_ROLE_CHANGE BIT(5) +#define RS_PHYTIUM_PR_CONSUMER_GOT_POWER BIT(6) +#define RS_PHYTIUM_HPD_STATUS_CHANGE BIT(7) + +#define RS_PHYTIUM_SYSTEM_STSTUS 0xB8 +/* 0: SINK off; 1: SINK on */ +#define RS_PHYTIUM_SINK_STATUS BIT(1) +/* 0: VCONN off; 1: VCONN on*/ +#define RS_PHYTIUM_VCONN_STATUS BIT(2) +/* 0: vbus off; 1: vbus on*/ +#define RS_PHYTIUM_VBUS_STATUS BIT(3) +/* 1: host; 0:device*/ +#define RS_PHYTIUM_DATA_ROLE BIT(5) +/* 0: Chunking; 1: Unchunked*/ +#define RS_PHYTIUM_SUPPORT_UNCHUNKING BIT(6) +/* 0: HPD low; 1: HPD high*/ +#define RS_PHYTIUM_HPD_STATUS BIT(7) + +#define RS_PHYTIUM_DATA_DFP 1 +#define RS_PHYTIUM_DATA_UFP 2 +#define RS_PHYTIUM_POWER_SOURCE 1 +#define RS_PHYTIUM_POWER_SINK 2 + +#define RS_PHYTIUM_CC_STATUS 0xB9 +#define RS_PHYTIUM_CC1_RD BIT(0) +#define RS_PHYTIUM_CC2_RD BIT(4) +#define RS_PHYTIUM_CC1_RA BIT(1) +#define RS_PHYTIUM_CC2_RA BIT(5) +#define RS_PHYTIUM_CC1_RD BIT(0) +#define RS_PHYTIUM_CC1_RP(cc) (((cc) >> 2) & 0x03) +#define RS_PHYTIUM_CC2_RP(cc) (((cc) >> 6) & 0x03) + +#define RS_PHYTIUM_PD_REV_INIT 0xBA + +#define RS_PHYTIUM_PD_EXT_MSG_CTRL 0xBB +#define RS_PHYTIUM_SRC_CAP_EXT_REPLY BIT(0) +#define RS_PHYTIUM_MANUFACTURER_INFO_REPLY BIT(1) +#define RS_PHYTIUM_BATTERY_STS_REPLY BIT(2) +#define RS_PHYTIUM_BATTERY_CAP_REPLY BIT(3) +#define RS_PHYTIUM_ALERT_REPLY BIT(4) +#define RS_PHYTIUM_STATUS_REPLY BIT(5) +#define RS_PHYTIUM_PPS_STATUS_REPLY BIT(6) +#define RS_PHYTIUM_SNK_CAP_EXT_REPLY BIT(7) + +#define RS_PHYTIUM_NO_CONNECT 0x00 +#define RS_PHYTIUM_USB3_1_CONNECTED 0x01 +#define RS_PHYTIUM_DP_ALT_4LANES 0x02 +#define RS_PHYTIUM_USB3_1_DP_2LANES 0x03 +#define RS_PHYTIUM_CC1_CONNECTED 0x01 +#define RS_PHYTIUM_CC2_CONNECTED 0x02 +#define RS_PHYTIUM_SELECT_PIN_ASSIGMENT_C 0x04 +#define RS_PHYTIUM_SELECT_PIN_ASSIGMENT_D 0x08 +#define RS_PHYTIUM_SELECT_PIN_ASSIGMENT_E 0x10 +#define RS_PHYTIUM_SELECT_PIN_ASSIGMENT_U 0x00 +#define RS_PHYTIUM_REDRIVER_ADDRESS 0x20 +#define RS_PHYTIUM_REDRIVER_OFFSET 0x00 + +#define RS_PHYTIUM_DP_SVID 0xFF01 +#define RS_PHYTIUM_VDM_ACK 0x40 +#define RS_PHYTIUM_VDM_CMD_RES 0x00 +#define RS_PHYTIUM_VDM_CMD_DIS_ID 0x01 +#define RS_PHYTIUM_VDM_CMD_DIS_SVID 0x02 +#define RS_PHYTIUM_VDM_CMD_DIS_MOD 0x03 +#define RS_PHYTIUM_VDM_CMD_ENTER_MODE 0x04 +#define RS_PHYTIUM_VDM_CMD_EXIT_MODE 0x05 +#define RS_PHYTIUM_VDM_CMD_ATTENTION 0x06 +#define RS_PHYTIUM_VDM_CMD_GET_STS 0x10 +#define RS_PHYTIUM_VDM_CMD_AND_ACK_MASK 0x5F + +#define RS_PHYTIUM_MAX_ALTMODE 2 + +#define RS_PHYTIUM_HAS_SOURCE_CAP BIT(0) +#define RS_PHYTIUM_HAS_SINK_CAP BIT(1) +#define RS_PHYTIUM_HAS_SINK_WATT BIT(2) + +enum role_sw_phytium_psy_state { + /* copy from drivers/usb/typec/tcpm */ + RS_PHYTIUM_PSY_OFFLINE = 0, + RS_PHYTIUM_PSY_FIXED_ONLINE, + + /* private */ + /* PD keep in, but disconnct power to bq25700, + * this state can be active when higher capacity adapter plug in, + * and change to ONLINE state when higher capacity adapter plug out + */ + RS_PHYTIUM_PSY_HANG = 0xff, +}; + +struct role_sw_phytium_typec_params { + int request_current; /* ma */ + int request_voltage; /* mv */ + int cc_connect; + int cc_orientation_valid; + int cc_status; + int data_role; + int power_role; + int vconn_role; + int dp_altmode_enter; + int cust_altmode_enter; + struct usb_role_switch *role_sw; + struct typec_port *port; + struct typec_partner *partner; + struct typec_mux_dev *typec_mux; + struct typec_switch_dev *typec_switch; + struct typec_altmode *amode[RS_PHYTIUM_MAX_ALTMODE]; + struct typec_altmode *port_amode[RS_PHYTIUM_MAX_ALTMODE]; + struct typec_displayport_data data; + int pin_assignment; + struct typec_capability caps; + u32 src_pdo[PDO_MAX_OBJECTS]; + u32 sink_pdo[PDO_MAX_OBJECTS]; + u8 caps_flags; + u8 src_pdo_nr; + u8 sink_pdo_nr; + u8 sink_watt; + u8 sink_voltage; +}; + +#define RS_PHYTIUM_MAX_BUF_LEN 30 +struct role_sw_phytium_fw_msg { + u8 msg_len; + u8 msg_type; + u8 buf[RS_PHYTIUM_MAX_BUF_LEN]; +} __packed; + +struct role_sw_phytium_data { + int fw_version; + int fw_subversion; + struct i2c_client *tcpc_client; + struct i2c_client *spi_client; + struct role_sw_phytium_fw_msg send_msg; + struct role_sw_phytium_fw_msg recv_msg; + struct gpio_desc *intp_gpiod; + struct fwnode_handle *connector_fwnode; + struct acpi_device *adev; + struct role_sw_phytium_typec_params typec; + int intp_irq; + struct work_struct work; + struct workqueue_struct *workqueue; + /* Lock for interrupt work queue */ + struct mutex lock; + + enum role_sw_phytium_psy_state psy_online; + enum power_supply_usb_type usb_type; + struct power_supply *psy; + struct power_supply_desc psy_desc; + struct device *dev; +}; + +static u8 snk_identity[] = { + RS_PHYTIUM_LOBYTE(RS_PHYTIUM_VID_ANALOGIX), + RS_PHYTIUM_HIBYTE(RS_PHYTIUM_VID_ANALOGIX), 0x00, 0x82, /* snk_id_hdr */ + 0x00, 0x00, 0x00, 0x00, /* snk_cert */ + 0x00, 0x00, RS_PHYTIUM_LOBYTE(RS_PHYTIUM_PID_ANALOGIX), + RS_PHYTIUM_HIBYTE(RS_PHYTIUM_PID_ANALOGIX), /* 5snk_ama */ +}; + +static u8 dp_caps[4] = {0xC6, 0x00, 0x00, 0x00}; + +static int role_sw_phytium_reg_read(struct i2c_client *client, + u8 reg_addr) +{ + return i2c_smbus_read_byte_data(client, reg_addr); +} + +static int role_sw_phytium_reg_block_read(struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + return i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf); +} + +static int role_sw_phytium_reg_write(struct i2c_client *client, + u8 reg_addr, u8 reg_val) +{ + return i2c_smbus_write_byte_data(client, reg_addr, reg_val); +} + +static int role_sw_phytium_reg_block_write(struct i2c_client *client, + u8 reg_addr, u8 len, u8 *buf) +{ + return i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf); +} + +static struct role_sw_phytium_i2c_select role_sw_phytium_i2c_addr[] = { + {RS_PHYTIUM_TCPC_ADDRESS1, RS_PHYTIUM_SPI_ADDRESS1}, + {RS_PHYTIUM_TCPC_ADDRESS2, RS_PHYTIUM_SPI_ADDRESS2}, + {RS_PHYTIUM_TCPC_ADDRESS3, RS_PHYTIUM_SPI_ADDRESS3}, + {RS_PHYTIUM_TCPC_ADDRESS4, RS_PHYTIUM_SPI_ADDRESS4}, +}; + +static int role_sw_phytium_detect_power_mode(struct role_sw_phytium_data *ctx) +{ + int ret; + int mode; + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_REQUEST_CURRENT); + if (ret < 0) + return ret; + + ctx->typec.request_current = ret * RS_PHYTIUM_CURRENT_UNIT; /* 50ma per unit */ + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_REQUEST_VOLTAGE); + if (ret < 0) + return ret; + + ctx->typec.request_voltage = ret * RS_PHYTIUM_VOLTAGE_UNIT; /* 100mv per unit */ + + if (ctx->psy_online == RS_PHYTIUM_PSY_OFFLINE) { + ctx->psy_online = RS_PHYTIUM_PSY_FIXED_ONLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(ctx->psy); + } + + if (!ctx->typec.cc_orientation_valid) + return 0; + + if (ctx->typec.cc_connect == RS_PHYTIUM_CC1_CONNECTED) + mode = RS_PHYTIUM_CC1_RP(ctx->typec.cc_status); + else + mode = RS_PHYTIUM_CC2_RP(ctx->typec.cc_status); + if (mode) { + typec_set_pwr_opmode(ctx->typec.port, mode - 1); + return 0; + } + + typec_set_pwr_opmode(ctx->typec.port, TYPEC_PWR_MODE_PD); + + return 0; +} + +static int role_sw_phytium_register_partner(struct role_sw_phytium_data *ctx, + int pd, int accessory) +{ + struct typec_partner_desc desc; + struct typec_partner *partner; + + if (ctx->typec.partner) + return 0; + + desc.usb_pd = pd; + desc.accessory = accessory; + desc.identity = NULL; + partner = typec_register_partner(ctx->typec.port, &desc); + if (IS_ERR(partner)) + return PTR_ERR(partner); + + ctx->typec.partner = partner; + + return 0; +} + +static int role_sw_phytium_detect_cc_orientation(struct role_sw_phytium_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + int ret; + int cc1_rd, cc2_rd; + int cc1_ra, cc2_ra; + int cc1_rp, cc2_rp; + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_CC_STATUS); + if (ret < 0) + return ret; + + ctx->typec.cc_status = ret; + + cc1_rd = ret & RS_PHYTIUM_CC1_RD ? 1 : 0; + cc2_rd = ret & RS_PHYTIUM_CC2_RD ? 1 : 0; + cc1_ra = ret & RS_PHYTIUM_CC1_RA ? 1 : 0; + cc2_ra = ret & RS_PHYTIUM_CC2_RA ? 1 : 0; + cc1_rp = RS_PHYTIUM_CC1_RP(ret); + cc2_rp = RS_PHYTIUM_CC2_RP(ret); + + /* Debug cable, nothing to do */ + if (cc1_rd && cc2_rd) { + ctx->typec.cc_orientation_valid = 0; + return role_sw_phytium_register_partner(ctx, 0, TYPEC_ACCESSORY_DEBUG); + } + + if (cc1_ra && cc2_ra) { + ctx->typec.cc_orientation_valid = 0; + return role_sw_phytium_register_partner(ctx, 0, TYPEC_ACCESSORY_AUDIO); + } + + ctx->typec.cc_orientation_valid = 1; + + ret = role_sw_phytium_register_partner(ctx, 1, TYPEC_ACCESSORY_NONE); + if (ret) { + dev_err(dev, "register partner\n"); + return ret; + } + + if (cc1_rd || cc1_rp) { + typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_NORMAL); + ctx->typec.cc_connect = RS_PHYTIUM_CC1_CONNECTED; + } + + if (cc2_rd || cc2_rp) { + typec_set_orientation(ctx->typec.port, TYPEC_ORIENTATION_REVERSE); + ctx->typec.cc_connect = RS_PHYTIUM_CC2_CONNECTED; + } + + return 0; +} + +static int role_sw_phytium_set_mux(struct role_sw_phytium_data *ctx, int pin_assignment) +{ + int mode = TYPEC_STATE_SAFE; + + switch (pin_assignment) { + case RS_PHYTIUM_SELECT_PIN_ASSIGMENT_U: + /* default 4 line USB 3.1 */ + mode = TYPEC_STATE_MODAL; + break; + case RS_PHYTIUM_SELECT_PIN_ASSIGMENT_C: + case RS_PHYTIUM_SELECT_PIN_ASSIGMENT_E: + /* 4 line DP */ + mode = TYPEC_STATE_SAFE; + break; + case RS_PHYTIUM_SELECT_PIN_ASSIGMENT_D: + /* 2 line DP, 2 line USB */ + mode = TYPEC_MODE_USB3; + break; + default: + mode = TYPEC_STATE_SAFE; + break; + } + + ctx->typec.pin_assignment = pin_assignment; + + return typec_set_mode(ctx->typec.port, mode); +} + +static int role_sw_phytium_set_usb_role(struct role_sw_phytium_data *ctx, enum usb_role role) +{ + if (!ctx->typec.role_sw) + return 0; + + return usb_role_switch_set_role(ctx->typec.role_sw, role); +} + +static int role_sw_phytium_data_role_detect(struct role_sw_phytium_data *ctx) +{ + int ret; + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_SYSTEM_STSTUS); + if (ret < 0) + return ret; + + ctx->typec.data_role = (ret & RS_PHYTIUM_DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE; + ctx->typec.vconn_role = (ret & RS_PHYTIUM_VCONN_STATUS) ? TYPEC_SOURCE : TYPEC_SINK; + + typec_set_data_role(ctx->typec.port, ctx->typec.data_role); + + typec_set_vconn_role(ctx->typec.port, ctx->typec.vconn_role); + + if (ctx->typec.data_role == TYPEC_HOST) + return role_sw_phytium_set_usb_role(ctx, USB_ROLE_HOST); + + return role_sw_phytium_set_usb_role(ctx, USB_ROLE_DEVICE); +} + +static int role_sw_phytium_power_role_detect(struct role_sw_phytium_data *ctx) +{ + int ret; + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_SYSTEM_STSTUS); + if (ret < 0) + return ret; + + ctx->typec.power_role = (ret & RS_PHYTIUM_SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE; + + if (ctx->typec.power_role == TYPEC_SOURCE) { + ctx->typec.request_current = RS_PHYTIUM_DEF_1_5A; + ctx->typec.request_voltage = RS_PHYTIUM_DEF_5V; + } + + typec_set_pwr_role(ctx->typec.port, ctx->typec.power_role); + + return 0; +} + +static int role_sw_phytium_cc_status_detect(struct role_sw_phytium_data *ctx) +{ + role_sw_phytium_detect_cc_orientation(ctx); + role_sw_phytium_detect_power_mode(ctx); + + return 0; +} + +static void role_sw_phytium_partner_unregister_altmode(struct role_sw_phytium_data *ctx) +{ + int i; + + ctx->typec.dp_altmode_enter = 0; + ctx->typec.cust_altmode_enter = 0; + + for (i = 0; i < RS_PHYTIUM_MAX_ALTMODE; i++) + if (ctx->typec.amode[i]) { + typec_unregister_altmode(ctx->typec.amode[i]); + ctx->typec.amode[i] = NULL; + } + + ctx->typec.pin_assignment = 0; +} + +static int role_sw_phytium_typec_register_altmode(struct role_sw_phytium_data *ctx, + int svid, int vdo) +{ + struct device *dev = &ctx->spi_client->dev; + struct typec_altmode_desc desc; + int err; + int i; + + desc.svid = svid; + desc.vdo = vdo; + + for (i = 0; i < RS_PHYTIUM_MAX_ALTMODE; i++) + if (!ctx->typec.amode[i]) + break; + + desc.mode = i + 1; /* start with 1 */ + + if (i >= RS_PHYTIUM_MAX_ALTMODE) { + dev_err(dev, "no altmode space for registering\n"); + return -ENOMEM; + } + + ctx->typec.amode[i] = typec_partner_register_altmode(ctx->typec.partner, + &desc); + if (IS_ERR(ctx->typec.amode[i])) { + dev_err(dev, "failed to register altmode\n"); + err = PTR_ERR(ctx->typec.amode[i]); + ctx->typec.amode[i] = NULL; + return err; + } + + return 0; +} + +static void role_sw_phytium_unregister_partner(struct role_sw_phytium_data *ctx) +{ + if (ctx->typec.partner) { + typec_unregister_partner(ctx->typec.partner); + ctx->typec.partner = NULL; + } +} + +static int role_sw_phytium_update_altmode(struct role_sw_phytium_data *ctx, int svid) +{ + int i; + + if (svid == RS_PHYTIUM_DP_SVID) + ctx->typec.dp_altmode_enter = 1; + else + ctx->typec.cust_altmode_enter = 1; + + for (i = 0; i < RS_PHYTIUM_MAX_ALTMODE; i++) { + if (!ctx->typec.amode[i]) + continue; + + if (ctx->typec.amode[i]->svid == svid) { + typec_altmode_update_active(ctx->typec.amode[i], true); + typec_altmode_notify(ctx->typec.amode[i], + ctx->typec.pin_assignment, + &ctx->typec.data); + break; + } + } + + return 0; +} + +static int role_sw_phytium_register_altmode(struct role_sw_phytium_data *ctx, + bool dp_altmode, u8 *buf) +{ + int ret; + int svid; + int mid; + + if (!ctx->typec.partner) + return 0; + + svid = RS_PHYTIUM_DP_SVID; + if (dp_altmode) { + mid = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); + + return role_sw_phytium_typec_register_altmode(ctx, svid, mid); + } + + svid = (buf[3] << 8) | buf[2]; + if ((buf[0] & RS_PHYTIUM_VDM_CMD_AND_ACK_MASK) != + (RS_PHYTIUM_VDM_ACK | RS_PHYTIUM_VDM_CMD_ENTER_MODE)) + return role_sw_phytium_update_altmode(ctx, svid); + + if ((buf[0] & RS_PHYTIUM_VDM_CMD_AND_ACK_MASK) != + (RS_PHYTIUM_VDM_ACK | RS_PHYTIUM_VDM_CMD_DIS_MOD)) + return 0; + + mid = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24); + + ret = role_sw_phytium_typec_register_altmode(ctx, svid, mid); + if (ctx->typec.cust_altmode_enter) + ret |= role_sw_phytium_update_altmode(ctx, svid); + + return ret; +} + +static int role_sw_phytium_parse_cmd(struct role_sw_phytium_data *ctx, u8 type, u8 *buf, u8 len) +{ + struct device *dev = &ctx->spi_client->dev; + u8 cur_50ma, vol_100mv; + + switch (type) { + case RS_PHYTIUM_TYPE_SRC_CAP: + cur_50ma = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_REQUEST_CURRENT); + vol_100mv = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_REQUEST_VOLTAGE); + + ctx->typec.request_voltage = vol_100mv * RS_PHYTIUM_VOLTAGE_UNIT; + ctx->typec.request_current = cur_50ma * RS_PHYTIUM_CURRENT_UNIT; + + ctx->psy_online = RS_PHYTIUM_PSY_FIXED_ONLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_PD; + power_supply_changed(ctx->psy); + break; + case RS_PHYTIUM_TYPE_SNK_CAP: + break; + case RS_PHYTIUM_TYPE_SVID: + break; + case RS_PHYTIUM_TYPE_SNK_IDENTITY: + break; + case RS_PHYTIUM_TYPE_GET_DP_ALT_ENTER: + /* DP alt mode enter success */ + if (buf[0]) + role_sw_phytium_update_altmode(ctx, RS_PHYTIUM_DP_SVID); + break; + case RS_PHYTIUM_TYPE_DP_ALT_ENTER: + /* Update DP altmode */ + role_sw_phytium_update_altmode(ctx, RS_PHYTIUM_DP_SVID); + break; + case RS_PHYTIUM_TYPE_OBJ_REQ: + role_sw_phytium_detect_power_mode(ctx); + break; + case RS_PHYTIUM_TYPE_DP_CONFIGURE: + role_sw_phytium_set_mux(ctx, buf[1]); + break; + case RS_PHYTIUM_TYPE_DP_DISCOVER_MODES_INFO: + /* Make sure discover modes valid */ + if (buf[0] | buf[1]) + /* Register DP Altmode */ + role_sw_phytium_register_altmode(ctx, 1, buf); + break; + case RS_PHYTIUM_TYPE_VDM: + /* Register other altmode */ + role_sw_phytium_register_altmode(ctx, 0, buf); + break; + default: + dev_err(dev, "ignore message(0x%.02x).\n", type); + break; + } + + return 0; +} + +static u8 checksum(struct device *dev, u8 *buf, u8 len) +{ + u8 ret = 0; + u8 i; + + for (i = 0; i < len; i++) + ret += buf[i]; + + return ret; +} + +static int role_sw_phytium_read_msg_ctrl_status(struct i2c_client *client) +{ + return role_sw_phytium_reg_read(client, RS_PHYTIUM_CMD_SEND_BUF); +} + +static int role_sw_phytium_wait_msg_empty(struct i2c_client *client) +{ + int val; + + return readx_poll_timeout(role_sw_phytium_read_msg_ctrl_status, + client, val, (val < 0) || (val == 0), + 2000, 2000 * 150); +} + +static int role_sw_phytium_send_msg(struct role_sw_phytium_data *ctx, u8 type, u8 *buf, u8 size) +{ + struct device *dev = &ctx->spi_client->dev; + struct role_sw_phytium_fw_msg *msg = &ctx->send_msg; + u8 crc; + int ret; + + size = min_t(u8, size, (u8)RS_PHYTIUM_MAX_BUF_LEN); + memcpy(msg->buf, buf, size); + msg->msg_type = type; + /* msg len equals buffer length + msg_type */ + msg->msg_len = size + 1; + + /* Do CRC check for all buffer data and msg_len and msg_type */ + crc = checksum(dev, (u8 *)msg, size + RS_PHYTIUM_HEADER_LEN); + msg->buf[size] = 0 - crc; + + ret = role_sw_phytium_wait_msg_empty(ctx->spi_client); + if (ret) + return ret; + + ret = role_sw_phytium_reg_block_write(ctx->spi_client, + RS_PHYTIUM_CMD_SEND_BUF + 1, size + RS_PHYTIUM_HEADER_LEN, + &msg->msg_type); + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_CMD_SEND_BUF, + msg->msg_len); + return ret; +} + +static int role_sw_phytium_process_cmd(struct role_sw_phytium_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + struct role_sw_phytium_fw_msg *msg = &ctx->recv_msg; + u8 len; + u8 crc; + int ret; + + /* Read message from firmware */ + ret = role_sw_phytium_reg_block_read(ctx->spi_client, RS_PHYTIUM_CMD_RECV_BUF, + RS_PHYTIUM_MSG_LEN, (u8 *)msg); + if (ret < 0) + return 0; + + if (!msg->msg_len) + return 0; + + ret = role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_CMD_RECV_BUF, 0); + if (ret) + return ret; + + len = msg->msg_len & RS_PHYTIUM_MSG_LEN_MASK; + crc = checksum(dev, (u8 *)msg, len + RS_PHYTIUM_HEADER_LEN); + if (crc) { + dev_err(dev, "message error crc(0x%.02x)\n", crc); + return -ERANGE; + } + + return role_sw_phytium_parse_cmd(ctx, msg->msg_type, msg->buf, len - 1); +} + +static void role_sw_phytium_translate_payload(struct device *dev, __le32 *payload, + u32 *pdo, int nr, const char *type) +{ + int i; + + if (nr > PDO_MAX_OBJECTS) { + dev_err(dev, "nr(%d) exceed PDO_MAX_OBJECTS(%d)\n", + nr, PDO_MAX_OBJECTS); + + return; + } + + for (i = 0; i < nr; i++) + payload[i] = cpu_to_le32(pdo[i]); +} + +static int role_sw_phytium_config(struct role_sw_phytium_data *ctx) +{ + struct device *dev = &ctx->spi_client->dev; + struct role_sw_phytium_typec_params *typecp = &ctx->typec; + __le32 payload[PDO_MAX_OBJECTS]; + int ret; + + /* Config PD FW work under PD 2.0 */ + ret = role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_PD_REV_INIT, PD_REV20); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_FW_CTRL_0, + RS_PHYTIUM_UNSTRUCT_VDM_EN | RS_PHYTIUM_DELAY_200MS | + RS_PHYTIUM_VSAFE1 | RS_PHYTIUM_FRS_EN); + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_FW_CTRL_1, + RS_PHYTIUM_AUTO_PD_EN | RS_PHYTIUM_FORCE_SEND_RDO); + + /* Set VBUS current threshold */ + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_VBUS_THRESHOLD_H, 0xff); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_VBUS_THRESHOLD_L, 0x03); + + /* Fix dongle compatible issue */ + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_FW_PARAM, + role_sw_phytium_reg_read(ctx->tcpc_client, RS_PHYTIUM_FW_PARAM) | + RS_PHYTIUM_DONGLE_IOP); + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_INT_MASK, 0); + + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_PD_EXT_MSG_CTRL, 0xFF); + if (ret) + return ret; + + if (typecp->caps_flags & RS_PHYTIUM_HAS_SOURCE_CAP) { + role_sw_phytium_translate_payload(dev, payload, typecp->src_pdo, + typecp->src_pdo_nr, "source"); + role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_SRC_CAP, (u8 *)&payload, + typecp->src_pdo_nr * 4); + role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_SNK_IDENTITY, snk_identity, + sizeof(snk_identity)); + role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_SET_SNK_DP_CAP, dp_caps, + sizeof(dp_caps)); + } + + if (typecp->caps_flags & RS_PHYTIUM_HAS_SINK_CAP) { + role_sw_phytium_translate_payload(dev, payload, typecp->sink_pdo, + typecp->sink_pdo_nr, "sink"); + role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_SNK_CAP, (u8 *)&payload, + typecp->sink_pdo_nr * 4); + } + + if (typecp->caps_flags & RS_PHYTIUM_HAS_SINK_WATT) { + if (typecp->sink_watt) { + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_MAX_POWER, + typecp->sink_watt); + /* Set min power to 1W */ + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_MIN_POWER, 2); + } + + if (typecp->sink_voltage) + ret |= role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_MAX_VOLTAGE, + typecp->sink_voltage); + if (ret) + return ret; + } + + if (!typecp->caps_flags) + usleep_range(5000, 6000); + + ctx->fw_version = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_FW_VER); + ctx->fw_subversion = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_FW_SUBVER); + + return 0; +} + +static void role_sw_phytium_chip_standby(struct role_sw_phytium_data *ctx) +{ + int ret; + u8 cc1, cc2; + struct device *dev = &ctx->spi_client->dev; + + ret = role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_OCM_CTRL_0, + role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_OCM_CTRL_0) | + RS_PHYTIUM_OCM_RESET); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_ANALOG_CTRL_10, 0x80); + /* Set TCPC to RD and DRP enable */ + cc1 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT; + cc2 = TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT; + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, TCPC_ROLE_CTRL, + TCPC_ROLE_CTRL_DRP | cc1 | cc2); + + /* Send DRP toggle command */ + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, TCPC_COMMAND, + TCPC_CMD_LOOK4CONNECTION); + + /* Send TCPC enter standby command */ + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, + TCPC_COMMAND, TCPC_CMD_I2C_IDLE); + if (ret) + dev_err(dev, "Chip standby failed\n"); +} + +static void role_sw_phytium_work_func(struct work_struct *work) +{ + int ret; + u8 buf[RS_PHYTIUM_STATUS_LEN]; + u8 int_change; /* Interrupt change */ + u8 int_status; /* Firmware status update */ + u8 alert0, alert1; /* Interrupt alert source */ + struct role_sw_phytium_data *ctx = container_of(work, struct role_sw_phytium_data, work); + struct device *dev = &ctx->spi_client->dev; + + mutex_lock(&ctx->lock); + + /* Read interrupt change status */ + ret = role_sw_phytium_reg_block_read(ctx->spi_client, RS_PHYTIUM_INT_STS, + RS_PHYTIUM_STATUS_LEN, buf); + if (ret < 0) { + /* Power standby mode, just return */ + goto unlock; + } + int_change = buf[0]; + int_status = buf[1]; + + /* Read alert register */ + ret = role_sw_phytium_reg_block_read(ctx->tcpc_client, RS_PHYTIUM_ALERT_0, + RS_PHYTIUM_STATUS_LEN, buf); + if (ret < 0) + goto unlock; + + alert0 = buf[0]; + alert1 = buf[1]; + + /* Clear interrupt and alert status */ + ret = role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_INT_STS, 0); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_ALERT_0, alert0); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_ALERT_1, alert1); + if (ret) + goto unlock; + + if (alert1 & RS_PHYTIUM_INTP_POW_OFF) { + role_sw_phytium_partner_unregister_altmode(ctx); + if (role_sw_phytium_set_usb_role(ctx, USB_ROLE_NONE)) + dev_err(dev, "Set usb role\n"); + role_sw_phytium_unregister_partner(ctx); + ctx->psy_online = RS_PHYTIUM_PSY_OFFLINE; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_C; + ctx->typec.request_voltage = 0; + ctx->typec.request_current = 0; + power_supply_changed(ctx->psy); + role_sw_phytium_chip_standby(ctx); + goto unlock; + } + + if ((alert0 & RS_PHYTIUM_SOFTWARE_INT) && (int_change & RS_PHYTIUM_OCM_BOOT_UP)) { + if (role_sw_phytium_config(ctx)) + dev_err(dev, "Config failed\n"); + if (role_sw_phytium_data_role_detect(ctx)) + dev_err(dev, "set PD data role\n"); + if (role_sw_phytium_power_role_detect(ctx)) + dev_err(dev, "set PD power role\n"); + role_sw_phytium_set_mux(ctx, RS_PHYTIUM_SELECT_PIN_ASSIGMENT_C); + } + + if (alert0 & RS_PHYTIUM_RECEIVED_MSG) + role_sw_phytium_process_cmd(ctx); + + ret = (int_status & RS_PHYTIUM_DATA_ROLE) ? TYPEC_HOST : TYPEC_DEVICE; + if (ctx->typec.data_role != ret) + if (role_sw_phytium_data_role_detect(ctx)) + dev_err(dev, "set PD data role\n"); + + ret = (int_status & RS_PHYTIUM_SINK_STATUS) ? TYPEC_SINK : TYPEC_SOURCE; + if (ctx->typec.power_role != ret) + if (role_sw_phytium_power_role_detect(ctx)) + dev_err(dev, "set PD power role\n"); + + if ((alert0 & RS_PHYTIUM_SOFTWARE_INT) && (int_change & RS_PHYTIUM_CC_STATUS_CHANGE)) + role_sw_phytium_cc_status_detect(ctx); + +unlock: + mutex_unlock(&ctx->lock); +} + +static irqreturn_t role_sw_phytium_intr_isr(int irq, void *data) +{ + struct role_sw_phytium_data *ctx = (struct role_sw_phytium_data *)data; + + queue_work(ctx->workqueue, &ctx->work); + + return IRQ_HANDLED; +} + +static int role_sw_phytium_register_i2c_dummy_clients(struct role_sw_phytium_data *ctx, + struct i2c_client *client) +{ + int i; + u8 spi_addr; + + for (i = 0; i < ARRAY_SIZE(role_sw_phytium_i2c_addr); i++) { + if (client->addr == (role_sw_phytium_i2c_addr[i].tcpc_address >> 1)) { + spi_addr = role_sw_phytium_i2c_addr[i].spi_address >> 1; + ctx->spi_client = i2c_new_dummy_device(client->adapter, + spi_addr); + if (!IS_ERR(ctx->spi_client)) + return 0; + } + } + + dev_err(&client->dev, "unable to get SPI slave\n"); + return -ENOMEM; +} + +static void role_sw_phytium_port_unregister_altmodes(struct typec_altmode **adev) +{ + int i; + + for (i = 0; i < RS_PHYTIUM_MAX_ALTMODE; i++) + if (adev[i]) { + typec_unregister_altmode(adev[i]); + adev[i] = NULL; + } +} + +static int role_sw_phytium_usb_mux_set(struct typec_mux_dev *mux, + struct typec_mux_state *state) +{ + struct role_sw_phytium_data *ctx = typec_mux_get_drvdata(mux); + struct device *dev = &ctx->spi_client->dev; + int has_dp; + + has_dp = (state->alt && state->alt->svid == USB_TYPEC_DP_SID && + state->alt->mode == USB_TYPEC_DP_MODE); + if (!has_dp) + dev_err(dev, "dp altmode not register\n"); + + return 0; +} + +static int role_sw_phytium_usb_set_orientation(struct typec_switch_dev *sw, + enum typec_orientation orientation) +{ + /* No need set */ + + return 0; +} + +static int role_sw_phytium_register_switch(struct role_sw_phytium_data *ctx, + struct device *dev, + struct fwnode_handle *fwnode) +{ + struct typec_switch_desc sw_desc = { }; + + sw_desc.fwnode = fwnode; + sw_desc.drvdata = ctx; + sw_desc.name = fwnode_get_name(fwnode); + sw_desc.set = role_sw_phytium_usb_set_orientation; + + ctx->typec.typec_switch = typec_switch_register(dev, &sw_desc); + if (IS_ERR(ctx->typec.typec_switch)) { + dev_err(dev, "switch register failed\n"); + return PTR_ERR(ctx->typec.typec_switch); + } + + return 0; +} + +static int role_sw_phytium_register_mux(struct role_sw_phytium_data *ctx, + struct device *dev, + struct fwnode_handle *fwnode) +{ + struct typec_mux_desc mux_desc = { }; + + mux_desc.fwnode = fwnode; + mux_desc.drvdata = ctx; + mux_desc.name = fwnode_get_name(fwnode); + mux_desc.set = role_sw_phytium_usb_mux_set; + + ctx->typec.typec_mux = typec_mux_register(dev, &mux_desc); + if (IS_ERR(ctx->typec.typec_mux)) { + dev_err(dev, "mux register failed\n"); + return PTR_ERR(ctx->typec.typec_mux); + } + + return 0; +} + +static void role_sw_phytium_unregister_mux(struct role_sw_phytium_data *ctx) +{ + if (ctx->typec.typec_mux) { + typec_mux_unregister(ctx->typec.typec_mux); + ctx->typec.typec_mux = NULL; + } +} + +static void role_sw_phytium_unregister_switch(struct role_sw_phytium_data *ctx) +{ + if (ctx->typec.typec_switch) { + typec_switch_unregister(ctx->typec.typec_switch); + ctx->typec.typec_switch = NULL; + } +} + +static int role_sw_phytium_typec_switch_probe(struct role_sw_phytium_data *ctx, + struct device *dev) +{ + int ret; + struct fwnode_handle *fwnode; + char sw_str[32] = {0}, almode_str[32] = {0}; + + if (has_acpi_companion(dev)) { + strscpy(sw_str, "ORSW", sizeof(sw_str)); + strscpy(almode_str, "ALSW", sizeof(almode_str)); + } else { + strscpy(sw_str, "orientation_switch", sizeof(sw_str)); + strscpy(almode_str, "mode_switch", sizeof(almode_str)); + } + + fwnode = device_get_named_child_node(dev, sw_str); + if (!fwnode) { + dev_err(dev, "Err:cannot find %s\n", sw_str); + return -EINVAL; + } + + ret = role_sw_phytium_register_switch(ctx, dev, fwnode); + if (ret) { + dev_err(dev, "failed register switch"); + return ret; + } + + fwnode = device_get_named_child_node(dev, almode_str); + if (!fwnode) { + dev_err(dev, "no typec mux exist"); + ret = -ENODEV; + goto unregister_switch; + } + ret = role_sw_phytium_register_mux(ctx, dev, fwnode); + if (ret) { + dev_err(dev, "failed register mode switch"); + ret = -ENODEV; + goto unregister_switch; + } + + return 0; + +unregister_switch: + role_sw_phytium_unregister_switch(ctx); + + return ret; +} + +static int role_sw_phytium_typec_port_probe(struct role_sw_phytium_data *ctx, + struct device *dev) +{ + struct typec_capability *cap = &ctx->typec.caps; + struct role_sw_phytium_typec_params *typecp = &ctx->typec; + struct fwnode_handle *fwnode; + const char *buf; + int ret, i; + char conc_str[32] = {0}; + + if (has_acpi_companion(dev)) + strscpy(conc_str, "CONC", sizeof(conc_str)); + else + strscpy(conc_str, "connector", sizeof(conc_str)); + + fwnode = device_get_named_child_node(dev, conc_str); + if (!fwnode) + return -EINVAL; + + ret = fwnode_property_read_string(fwnode, "power-role", &buf); + if (ret) { + dev_err(dev, "power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_power_role(buf); + if (ret < 0) + return ret; + cap->type = ret; + + ret = fwnode_property_read_string(fwnode, "data-role", &buf); + if (ret) { + dev_err(dev, "data-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_data_role(buf); + if (ret < 0) + return ret; + cap->data = ret; + + ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); + if (ret) { + dev_err(dev, "try-power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_power_role(buf); + if (ret < 0) + return ret; + cap->prefer_role = ret; + + /* Get source pdos */ + ret = fwnode_property_count_u32(fwnode, "source-pdos"); + if (ret > 0) { + typecp->src_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS); + ret = fwnode_property_read_u32_array(fwnode, "source-pdos", + typecp->src_pdo, + typecp->src_pdo_nr); + if (ret < 0) { + dev_err(dev, "source cap validate failed: %d\n", ret); + return -EINVAL; + } + + typecp->caps_flags |= RS_PHYTIUM_HAS_SOURCE_CAP; + } + + ret = fwnode_property_count_u32(fwnode, "sink-pdos"); + if (ret > 0) { + typecp->sink_pdo_nr = min_t(u8, ret, PDO_MAX_OBJECTS); + ret = fwnode_property_read_u32_array(fwnode, "sink-pdos", + typecp->sink_pdo, + typecp->sink_pdo_nr); + if (ret < 0) { + dev_err(dev, "sink cap validate failed: %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < typecp->sink_pdo_nr; i++) { + ret = 0; + switch (pdo_type(typecp->sink_pdo[i])) { + case PDO_TYPE_FIXED: + ret = pdo_fixed_voltage(typecp->sink_pdo[i]); + break; + case PDO_TYPE_BATT: + case PDO_TYPE_VAR: + ret = pdo_max_voltage(typecp->sink_pdo[i]); + break; + case PDO_TYPE_APDO: + default: + ret = 0; + break; + } + + /* 100mv per unit */ + typecp->sink_voltage = max(5000, ret) / 100; + } + + typecp->caps_flags |= RS_PHYTIUM_HAS_SINK_CAP; + } + + if (!fwnode_property_read_u32(fwnode, "op-sink-microwatt", &ret)) { + typecp->sink_watt = ret / 500000; /* 500mw per unit */ + typecp->caps_flags |= RS_PHYTIUM_HAS_SINK_WATT; + } + + cap->fwnode = fwnode; + + ctx->typec.role_sw = usb_role_switch_get(dev); + if (IS_ERR(ctx->typec.role_sw)) { + dev_err(dev, "USB role switch not found.\n"); + ctx->typec.role_sw = NULL; + } + + ctx->typec.port = typec_register_port(dev, cap); + if (IS_ERR(ctx->typec.port)) { + ret = PTR_ERR(ctx->typec.port); + ctx->typec.port = NULL; + dev_err(dev, "Failed to register type c port %d\n", ret); + return ret; + } + + typec_port_register_altmodes(ctx->typec.port, NULL, ctx, + ctx->typec.port_amode, + RS_PHYTIUM_MAX_ALTMODE); + return 0; +} + +static int role_sw_phytium_typec_check_connection(struct role_sw_phytium_data *ctx) +{ + int ret; + + ret = role_sw_phytium_reg_read(ctx->spi_client, RS_PHYTIUM_FW_VER); + if (ret < 0) + return 0; /* No device attached in typec port */ + + /* Clear interrupt and alert status */ + ret = role_sw_phytium_reg_write(ctx->spi_client, RS_PHYTIUM_INT_STS, 0); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_ALERT_0, 0xFF); + ret |= role_sw_phytium_reg_write(ctx->tcpc_client, RS_PHYTIUM_ALERT_1, 0xFF); + if (ret) + return ret; + + ret = role_sw_phytium_cc_status_detect(ctx); + ret |= role_sw_phytium_power_role_detect(ctx); + ret |= role_sw_phytium_data_role_detect(ctx); + ret |= role_sw_phytium_set_mux(ctx, RS_PHYTIUM_SELECT_PIN_ASSIGMENT_C); + if (ret) + return ret; + + ret = role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_GET_DP_ALT_ENTER, NULL, 0); + ret |= role_sw_phytium_send_msg(ctx, RS_PHYTIUM_TYPE_GET_DP_DISCOVER_MODES_INFO, NULL, 0); + + return ret; +} + +static int __maybe_unused role_sw_phytium_runtime_pm_suspend(struct device *dev) +{ + struct role_sw_phytium_data *ctx = dev_get_drvdata(dev); + + mutex_lock(&ctx->lock); + + role_sw_phytium_partner_unregister_altmode(ctx); + + if (ctx->typec.partner) + role_sw_phytium_unregister_partner(ctx); + + mutex_unlock(&ctx->lock); + + return 0; +} + +static int __maybe_unused role_sw_phytium_runtime_pm_resume(struct device *dev) +{ + struct role_sw_phytium_data *ctx = dev_get_drvdata(dev); + + mutex_lock(&ctx->lock); + /* Detect PD connection */ + if (role_sw_phytium_typec_check_connection(ctx)) + dev_err(dev, "check connection"); + + mutex_unlock(&ctx->lock); + + return 0; +} + +static const struct dev_pm_ops role_sw_phytium_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(role_sw_phytium_runtime_pm_suspend, + role_sw_phytium_runtime_pm_resume, NULL) +}; + +static void role_sw_phytium_get_gpio_irq_acpi(struct role_sw_phytium_data *ctx) +{ + struct device *dev = &ctx->tcpc_client->dev; + + if (!ctx->adev) { + dev_err(dev, "Err: no valid fwnode\n"); + return; + } + + ctx->intp_irq = acpi_dev_gpio_irq_get(ctx->adev, 0); + if (ctx->intp_irq < 0) + dev_err(dev, "failed to get GPIO IRQ\n"); +} + +static void role_sw_phytium_get_gpio_irq(struct role_sw_phytium_data *ctx) +{ + struct device *dev = &ctx->tcpc_client->dev; + + ctx->intp_gpiod = devm_gpiod_get_optional(dev, "interrupt", GPIOD_IN); + if (IS_ERR_OR_NULL(ctx->intp_gpiod)) { + dev_err(dev, "no interrupt gpio property\n"); + return; + } + + ctx->intp_irq = gpiod_to_irq(ctx->intp_gpiod); + if (ctx->intp_irq < 0) + dev_err(dev, "failed to get GPIO IRQ\n"); +} + +static enum power_supply_usb_type role_sw_phytium_psy_usb_types[] = { + POWER_SUPPLY_USB_TYPE_C, + POWER_SUPPLY_USB_TYPE_PD, + POWER_SUPPLY_USB_TYPE_PD_PPS, +}; + +static enum power_supply_property role_sw_phytium_psy_props[] = { + POWER_SUPPLY_PROP_USB_TYPE, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, +}; + +static int role_sw_phytium_psy_set_prop(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct role_sw_phytium_data *ctx = power_supply_get_drvdata(psy); + int ret = 0; + + if (psp == POWER_SUPPLY_PROP_ONLINE) + ctx->psy_online = val->intval; + else + ret = -EINVAL; + + power_supply_changed(ctx->psy); + return ret; +} + +static int role_sw_phytium_psy_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + return psp == POWER_SUPPLY_PROP_ONLINE; +} + +static int role_sw_phytium_psy_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct role_sw_phytium_data *ctx = power_supply_get_drvdata(psy); + int ret = 0; + + if (!ctx) { + pr_err("ctx is null.\n"); + return -EINVAL; + } + switch (psp) { + case POWER_SUPPLY_PROP_USB_TYPE: + val->intval = ctx->usb_type; + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = ctx->psy_online; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + val->intval = (ctx->psy_online) ? + ctx->typec.request_voltage * 1000 : 0; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = (ctx->psy_online) ? + ctx->typec.request_current * 1000 : 0; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int role_sw_phytium_psy_register(struct role_sw_phytium_data *ctx) +{ + struct power_supply_desc *psy_desc = &ctx->psy_desc; + struct power_supply_config psy_cfg = {}; + char *psy_name; + + psy_name = devm_kasprintf(ctx->dev, GFP_KERNEL, "anx7411-source-psy-%s", + dev_name(ctx->dev)); + if (!psy_name) + return -ENOMEM; + + psy_desc->name = psy_name; + psy_desc->type = POWER_SUPPLY_TYPE_USB; + psy_desc->usb_types = role_sw_phytium_psy_usb_types; + psy_desc->num_usb_types = ARRAY_SIZE(role_sw_phytium_psy_usb_types); + psy_desc->properties = role_sw_phytium_psy_props; + psy_desc->num_properties = ARRAY_SIZE(role_sw_phytium_psy_props); + + psy_desc->get_property = role_sw_phytium_psy_get_prop; + psy_desc->set_property = role_sw_phytium_psy_set_prop; + psy_desc->property_is_writeable = role_sw_phytium_psy_prop_writeable; + psy_cfg.drv_data = ctx; + ctx->usb_type = POWER_SUPPLY_USB_TYPE_C; + ctx->psy = devm_power_supply_register(ctx->dev, psy_desc, &psy_cfg); + + if (IS_ERR(ctx->psy)) + dev_warn(ctx->dev, "unable to register psy\n"); + + return PTR_ERR_OR_ZERO(ctx->psy); +} + +static int role_sw_phytium_i2c_probe(struct i2c_client *client) +{ + struct role_sw_phytium_data *plat; + struct device *dev = &client->dev; + int ret; + + dev_info(dev, "probe start\n"); + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) + return -ENODEV; + + plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->tcpc_client = client; + i2c_set_clientdata(client, plat); + + mutex_init(&plat->lock); + + ret = role_sw_phytium_register_i2c_dummy_clients(plat, client); + if (ret) { + dev_err(dev, "fail to reserve I2C bus\n"); + return ret; + } + + ret = role_sw_phytium_typec_switch_probe(plat, dev); + if (ret) { + dev_err(dev, "fail to probe typec switch\n"); + goto free_i2c_dummy; + } + + ret = role_sw_phytium_typec_port_probe(plat, dev); + if (ret) { + dev_err(dev, "fail to probe typec property.\n"); + ret = -ENODEV; + goto free_typec_switch; + } + + plat->intp_irq = client->irq; + plat->adev = ACPI_COMPANION(dev); + if (!client->irq) { + if (has_acpi_companion(dev)) + role_sw_phytium_get_gpio_irq_acpi(plat); + else + role_sw_phytium_get_gpio_irq(plat); + } + + if (!plat->intp_irq) { + dev_err(dev, "fail to get interrupt IRQ\n"); + ret = -EINVAL; + goto free_typec_port; + } + + plat->dev = dev; + plat->psy_online = RS_PHYTIUM_PSY_OFFLINE; + ret = role_sw_phytium_psy_register(plat); + if (ret) { + dev_err(dev, "register psy\n"); + goto free_typec_port; + } + + INIT_WORK(&plat->work, role_sw_phytium_work_func); + plat->workqueue = alloc_workqueue("rs_phytium_work", + WQ_FREEZABLE | + WQ_MEM_RECLAIM, + 1); + if (!plat->workqueue) { + dev_err(dev, "fail to create work queue\n"); + ret = -ENOMEM; + goto free_typec_port; + } + + ret = devm_request_threaded_irq(dev, plat->intp_irq, + NULL, role_sw_phytium_intr_isr, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "rs-phytium-intp", plat); + if (ret) { + dev_err(dev, "fail to request irq\n"); + goto free_wq; + } + + if (role_sw_phytium_typec_check_connection(plat)) + dev_err(dev, "check status\n"); + + pm_runtime_enable(dev); + + dev_info(dev, "probe ok\n"); + return 0; + +free_wq: + destroy_workqueue(plat->workqueue); + +free_typec_port: + typec_unregister_port(plat->typec.port); + role_sw_phytium_port_unregister_altmodes(plat->typec.port_amode); + +free_typec_switch: + role_sw_phytium_unregister_switch(plat); + role_sw_phytium_unregister_mux(plat); + +free_i2c_dummy: + i2c_unregister_device(plat->spi_client); + + return ret; +} + +static void role_sw_phytium_i2c_remove(struct i2c_client *client) +{ + struct role_sw_phytium_data *plat = i2c_get_clientdata(client); + + role_sw_phytium_partner_unregister_altmode(plat); + role_sw_phytium_unregister_partner(plat); + + if (plat->workqueue) + destroy_workqueue(plat->workqueue); + + if (plat->spi_client) + i2c_unregister_device(plat->spi_client); + + if (plat->typec.role_sw) + usb_role_switch_put(plat->typec.role_sw); + + role_sw_phytium_unregister_mux(plat); + + role_sw_phytium_unregister_switch(plat); + + if (plat->typec.port) + typec_unregister_port(plat->typec.port); + + role_sw_phytium_port_unregister_altmodes(plat->typec.port_amode); +} + +static const struct i2c_device_id role_sw_phytium_id[] = { + {"role-sw", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, role_sw_phytium_id); + +static const struct of_device_id role_sw_phytium_match_table[] = { + {.compatible = "phytium,role-sw",}, + {}, +}; +static const struct acpi_device_id role_sw_phytium_acpi_match[] = { + { "PHYT8011", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, role_sw_phytium_acpi_match); +static struct i2c_driver role_sw_phytium_driver = { + .driver = { + .name = "phytium_role_sw", + .of_match_table = role_sw_phytium_match_table, + .acpi_match_table = ACPI_PTR(role_sw_phytium_acpi_match), + .pm = &role_sw_phytium_pm_ops, + }, + .probe = role_sw_phytium_i2c_probe, + .remove = role_sw_phytium_i2c_remove, + + .id_table = role_sw_phytium_id, +}; + +module_i2c_driver(role_sw_phytium_driver); + +MODULE_DESCRIPTION("Phytium USB Type-C PD driver"); +MODULE_AUTHOR("Wu Jinyong "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RS_PHYTIUM_DRV_VERSION); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 63029bc7c9dd01973bc663e5d7c3a618b2d12dd3..1ab8f45671e0c084dc5aebfd4b26ad6f1b13ec18 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -76,7 +76,9 @@ * alignment. */ #ifdef RO_EXCEPTION_TABLE_ALIGN -#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) +#define RO_EXCEPTION_TABLE \ + EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) \ + MC_EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) #else #define RO_EXCEPTION_TABLE #endif @@ -637,6 +639,21 @@ BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* + * Machine Check Exception table + */ +#define MC_EXCEPTION_TABLE(align) \ + . = ALIGN(align); \ + __mc_ex_table : AT(ADDR(__mc_ex_table) - LOAD_OFFSET) { \ + __start___mc_ex_table = .; \ + KEEP(*(__mc_ex_table)) \ + __stop___mc_ex_table = .; \ + } +#else +#define MC_EXCEPTION_TABLE(align) +#endif + /* * .BTF */ diff --git a/include/linux/device.h b/include/linux/device.h index 3627b26b243e61b1809160c12740d8200412ee4d..2c5e240eb71a65bfe739b2afa3468b7ccea8d344 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -655,6 +655,8 @@ struct device_physical_location { * @dma_io_tlb_pools: List of transient swiotlb memory pools. * @dma_io_tlb_lock: Protects changes to the list of active pools. * @dma_uses_io_tlb: %true if device has used the software IO TLB. + * @dma_p_io_tlb_mem: Phytium Software IO TLB allocator. Not for driver use. + * @dma_uses_p_io_tlb: %true if device has used the Phytium software IO TLB. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -761,6 +763,11 @@ struct device { #ifdef CONFIG_SWIOTLB struct io_tlb_mem *dma_io_tlb_mem; #endif +#ifdef CONFIG_PSWIOTLB + struct p_io_tlb_mem *dma_p_io_tlb_mem; + bool dma_uses_p_io_tlb; + bool can_use_pswiotlb; +#endif #ifdef CONFIG_SWIOTLB_DYNAMIC struct list_head dma_io_tlb_pools; spinlock_t dma_io_tlb_lock; @@ -774,6 +781,9 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ +#ifdef CONFIG_PSWIOTLB + int local_node; /* NUMA node this device is really belong to */ +#endif #endif dev_t devt; /* dev_t, creates the sysfs "dev" */ u32 id; /* device instance */ diff --git a/include/linux/extable.h b/include/linux/extable.h index 4ab9e78f313b7983865a5f6588ecfcb721fcc188..e608f8a8df4e1c04dad6d496d21a1911b470f3e5 100644 --- a/include/linux/extable.h +++ b/include/linux/extable.h @@ -19,18 +19,41 @@ void trim_init_extable(struct module *m); /* Given an address, look for it in the exception tables */ const struct exception_table_entry *search_exception_tables(unsigned long add); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_mc_exception_tables(unsigned long add); +#else +static inline const struct exception_table_entry * +search_mc_exception_tables(unsigned long add) +{ + return NULL; +} +#endif const struct exception_table_entry * search_kernel_exception_table(unsigned long addr); #ifdef CONFIG_MODULES /* For extable.c to search modules' exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +const struct exception_table_entry *search_module_mc_extables(unsigned long addr); +#else +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} +#endif #else static inline const struct exception_table_entry * search_module_extables(unsigned long addr) { return NULL; } +static inline const struct exception_table_entry * +search_module_mc_extables(unsigned long addr) +{ + return NULL; +} #endif /*CONFIG_MODULES*/ #ifdef CONFIG_BPF_JIT diff --git a/include/linux/kabi.h b/include/linux/kabi.h new file mode 100644 index 0000000000000000000000000000000000000000..fe3213c0f5760d4b043be300cf2f9c8c96bd8a10 --- /dev/null +++ b/include/linux/kabi.h @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kabi.h - openEuler kABI abstraction header + * + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * Copyright (c) 2021 Xie XiuQi + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + * + * These kabi macros hide the changes from the kabi checker and from the + * process that computes the exported symbols' checksums. + * They have 2 variants: one (defined under __GENKSYMS__) used when + * generating the checksums, and the other used when building the kernel's + * binaries. + * + * The use of these macros does not guarantee that the usage and modification + * of code is correct. As with all openEuler only changes, an engineer must + * explain why the use of the macro is valid in the patch containing the + * changes. + * + * The macro helpers are derived from RHEL "include/linux/rh_kabi.h" + * Mostly debrand from RHEL. + */ + +#ifndef _LINUX_KABI_H +#define _LINUX_KABI_H + +#include +#include + +/* + * NOTE + * Unless indicated otherwise, don't use ';' after these macros as it + * messes up the kABI checker by changing what the resulting token string + * looks like. Instead let the macros add the ';' so it can be properly + * hidden from the kABI checker (mainly for KABI_EXTEND, but applied to + * most macros for uniformity). + * + * KABI_CONST + * Adds a new const modifier to a function parameter preserving the old + * checksum. + * + * KABI_ADD_MODIFIER + * Adds a new modifier to a function parameter or a typedef, preserving + * the old checksum. Useful e.g. for adding rcu annotations or changing + * int to unsigned. Beware that this may change the semantics; if you're + * sure this is safe, always explain why binary compatibility with 3rd + * party modules is retained. + * + * KABI_DEPRECATE + * Marks the element as deprecated and make it unusable by modules while + * keeping a hole in its place to preserve binary compatibility. + * + * # define KABI_BROKEN_INSERT_ENUM(_new) _new, + * # define KABI_BROKEN_REMOVE_ENUM(_orig) + * KABI_DEPRECATE_FN + * Marks the function pointer as deprecated and make it unusable by modules + * while keeping a hole in its place to preserve binary compatibility. + * + * KABI_EXTEND + * Adds a new field to a struct. This must always be added to the end of + * the struct. Before using this macro, make sure this is actually safe + * to do - there is a number of conditions under which it is *not* safe. + * In particular (but not limited to), this macro cannot be used: + * - if the struct in question is embedded in another struct, or + * - if the struct is allocated by drivers either statically or + * dynamically, or + * - if the struct is allocated together with driver data (an example of + * such behavior is struct net_device or struct request). + * + * KABI_EXTEND_WITH_SIZE + * Adds a new element (usually a struct) to a struct and reserves extra + * space for the new element. The provided 'size' is the total space to + * be added in longs (i.e. it's 8 * 'size' bytes), including the size of + * the added element. It is automatically checked that the new element + * does not overflow the reserved space, now nor in the future. However, + * no attempt is done to check the content of the added element (struct) + * for kABI conformance - kABI checking inside the added element is + * effectively switched off. + * For any struct being added by KABI_EXTEND_WITH_SIZE, it is + * recommended its content to be documented as not covered by kABI + * guarantee. + * + * KABI_FILL_HOLE + * Fills a hole in a struct. + * + * Warning: only use if a hole exists for _all_ arches. Use pahole to verify. + * + * KABI_RENAME + * Renames an element without changing its type. This macro can be used in + * bitfields, for example. + * + * NOTE: does not include the final ';' + * + * KABI_REPLACE + * Replaces the _orig field by the _new field. The size of the occupied + * space is preserved, it's fine if the _new field is smaller than the + * _orig field. If a _new field is larger or has a different alignment, + * compilation will abort. + * + * + * KABI_HIDE_INCLUDE + * Hides the given include file from kABI checksum computations. This is + * used when a newly added #include makes a previously opaque struct + * visible. + * + * Example usage: + * #include KABI_HIDE_INCLUDE() + * + * KABI_FAKE_INCLUDE + * Pretends inclusion of the given file for kABI checksum computations. + * This is used when upstream removed a particular #include but that made + * some structures opaque that were previously visible and is causing kABI + * checker failures. + * + * Example usage: + * #include KABI_FAKE_INCLUDE() + * + * KABI_RESERVE + * Adds a reserved field to a struct. This is done prior to kABI freeze + * for structs that cannot be expanded later using KABI_EXTEND (for + * example because they are embedded in another struct or because they are + * allocated by drivers or because they use unusual memory layout). The + * size of the reserved field is 'unsigned long' and is assumed to be + * 8 bytes. + * + * The argument is a number unique for the given struct; usually, multiple + * KABI_RESERVE macros are added to a struct with numbers starting from + * one. + * + * Example usage: + * struct foo { + * int a; + * KABI_RESERVE(1) + * KABI_RESERVE(2) + * }; + * + * KABI_USE + * Simple wrappers to replace standard openEuler reserved elements. + * + * KABI_AUX_EMBED + * KABI_AUX_PTR + * Adds an extenstion of a struct in the form of "auxiliary structure". + * This is done prior to kABI freeze for structs that cannot be expanded + * later using KABI_EXTEND. See also KABI_RESERVED, these two + * approaches can (and often are) combined. + * + * To use this for 'struct foo' (the "base structure"), define a new + * structure called 'struct foo_resvd'; this new struct is called "auxiliary + * structure". Then add KABI_AUX_EMBED or KABI_AUX_PTR to the end + * of the base structure. The argument is the name of the base structure, + * without the 'struct' keyword. + * + * KABI_AUX_PTR stores a pointer to the aux structure in the base + * struct. The lifecycle of the aux struct needs to be properly taken + * care of. + * + * KABI_AUX_EMBED embeds the aux struct into the base struct. This + * cannot be used when the base struct is itself embedded into another + * struct, allocated in an array, etc. + * + * Both approaches (ptr and embed) work correctly even when the aux struct + * is allocated by modules. To ensure this, the code responsible for + * allocation/assignment of the aux struct has to properly set the size of + * the aux struct; see the KABI_AUX_SET_SIZE and KABI_AUX_INIT_SIZE + * macros. + * + * New fields can be later added to the auxiliary structure, always to its + * end. Note the auxiliary structure cannot be shrunk in size later (i.e., + * fields cannot be removed, only deprecated). Any code accessing fields + * from the aux struct must guard the access using the KABI_AUX macro. + * The access itself is then done via a '_resvd' field in the base struct. + * + * The auxiliary structure is not guaranteed for access by modules unless + * explicitly commented as such in the declaration of the aux struct + * itself or some of its elements. + * + * Example: + * + * struct foo_resvd { + * int newly_added; + * }; + * + * struct foo { + * bool big_hammer; + * KABI_AUX_PTR(foo) + * }; + * + * void use(struct foo *f) + * { + * if (KABI_AUX(f, foo, newly_added)) + * f->_resvd->newly_added = 123; + * else + * // the field 'newly_added' is not present in the passed + * // struct, fall back to old behavior + * f->big_hammer = true; + * } + * + * static struct foo_resvd my_foo_resvd { + * .newly_added = 0; + * } + * + * static struct foo my_foo = { + * .big_hammer = false, + * ._resvd = &my_foo_resvd, + * KABI_AUX_INIT_SIZE(foo) + * }; + * + * KABI_USE_AUX_PTR + * Creates an auxiliary structure post kABI freeze. This works by using + * two reserved fields (thus there has to be two reserved fields still + * available) and converting them to KABI_AUX_PTR. + * + * Example: + * + * struct foo_resvd { + * }; + * + * struct foo { + * int a; + * KABI_RESERVE(1) + * KABI_USE_AUX_PTR(2, 3, foo) + * }; + * + * KABI_AUX_SET_SIZE + * KABI_AUX_INIT_SIZE + * Calculates and stores the size of the auxiliary structure. + * + * KABI_AUX_SET_SIZE is for dynamically allocated base structs, + * KABI_AUX_INIT_SIZE is for statically allocated case structs. + * + * These macros must be called from the allocation (KABI_AUX_SET_SIZE) + * or declaration (KABI_AUX_INIT_SIZE) site, regardless of whether + * that happens in the kernel or in a module. Without calling one of + * these macros, the aux struct will appear to have no fields to the + * kernel. + * + * Note: since KABI_AUX_SET_SIZE is intended to be invoked outside of + * a struct definition, it does not add the semicolon and must be + * terminated by semicolon by the caller. + * + * KABI_AUX + * Verifies that the given field exists in the given auxiliary structure. + * This MUST be called prior to accessing that field; failing to do that + * may lead to invalid memory access. + * + * The first argument is a pointer to the base struct, the second argument + * is the name of the base struct (without the 'struct' keyword), the + * third argument is the field name. + * + * This macro works for structs extended by either of KABI_AUX_EMBED, + * KABI_AUX_PTR and KABI_USE_AUX_PTR. + * + * KABI_FORCE_CHANGE + * Force change of the symbol checksum. The argument of the macro is a + * version for cases we need to do this more than once. + * + * This macro does the opposite: it changes the symbol checksum without + * actually changing anything about the exported symbol. It is useful for + * symbols that are not whitelisted, we're changing them in an + * incompatible way and want to prevent 3rd party modules to silently + * corrupt memory. Instead, by changing the symbol checksum, such modules + * won't be loaded by the kernel. This macro should only be used as a + * last resort when all other KABI workarounds have failed. + * + * KABI_EXCLUDE + * !!! WARNING: DANGEROUS, DO NOT USE unless you are aware of all the !!! + * !!! implications. This should be used ONLY EXCEPTIONALLY and only !!! + * !!! under specific circumstances. Very likely, this macro does not !!! + * !!! do what you expect it to do. Note that any usage of this macro !!! + * !!! MUST be paired with a KABI_FORCE_CHANGE annotation of !!! + * !!! a suitable symbol (or an equivalent safeguard) and the commit !!! + * !!! log MUST explain why the chosen solution is appropriate. !!! + * + * Exclude the element from checksum generation. Any such element is + * considered not to be part of the kABI whitelist and may be changed at + * will. Note however that it's the responsibility of the developer + * changing the element to ensure 3rd party drivers using this element + * won't panic, for example by not allowing them to be loaded. That can + * be achieved by changing another, non-whitelisted symbol they use, + * either by nature of the change or by using KABI_FORCE_CHANGE. + * + * Also note that any change to the element must preserve its size. Change + * of the size is not allowed and would constitute a silent kABI breakage. + * Beware that the KABI_EXCLUDE macro does not do any size checks. + * + * KABI_BROKEN_INSERT + * KABI_BROKEN_REMOVE + * Insert a field to the middle of a struct / delete a field from a struct. + * Note that this breaks kABI! It can be done only when it's certain that + * no 3rd party driver can validly reach into the struct. A typical + * example is a struct that is: both (a) referenced only through a long + * chain of pointers from another struct that is part of a whitelisted + * symbol and (b) kernel internal only, it should have never been visible + * to genksyms in the first place. + * + * Another example are structs that are explicitly exempt from kABI + * guarantee but we did not have enough foresight to use KABI_EXCLUDE. + * In this case, the warning for KABI_EXCLUDE applies. + * + * A detailed explanation of correctness of every KABI_BROKEN_* macro + * use is especially important. + * + * KABI_BROKEN_INSERT_BLOCK + * KABI_BROKEN_REMOVE_BLOCK + * A version of KABI_BROKEN_INSERT / REMOVE that allows multiple fields + * to be inserted or removed together. All fields need to be terminated + * by ';' inside(!) the macro parameter. The macro itself must not be + * terminated by ';'. + * + * KABI_BROKEN_REPLACE + * Replace a field by a different one without doing any checking. This + * allows replacing a field by another with a different size. Similarly + * to other KABI_BROKEN macros, use of this indicates a kABI breakage. + * + * KABI_BROKEN_INSERT_ENUM + * KABI_BROKEN_REMOVE_ENUM + * Insert a field to the middle of an enumaration type / delete a field from + * an enumaration type. Note that this can break kABI especially if the + * number of enum fields is used in an array within a structure. It can be + * done only when it is certain that no 3rd party driver will use the + * enumeration type or a structure that embeds an array with size determined + * by an enumeration type. + * + * KABI_EXTEND_ENUM + * Adds a new field to an enumeration type. This must always be added to + * the end of the enum. Before using this macro, make sure this is actually + * safe to do. + */ +#ifdef __GENKSYMS__ + +# define KABI_CONST +# define KABI_ADD_MODIFIER(_new) +# define KABI_EXTEND(_new) +# define KABI_FILL_HOLE(_new) +# define KABI_FORCE_CHANGE(ver) __attribute__((kabi_change ## ver)) +# define KABI_RENAME(_orig, _new) _orig +# define KABI_HIDE_INCLUDE(_file) +# define KABI_FAKE_INCLUDE(_file) _file +# define KABI_BROKEN_INSERT(_new) +# define KABI_BROKEN_REMOVE(_orig) _orig; +# define KABI_BROKEN_INSERT_BLOCK(_new) +# define KABI_BROKEN_REMOVE_BLOCK(_orig) _orig +# define KABI_BROKEN_REPLACE(_orig, _new) _orig; +# define KABI_BROKEN_INSERT_ENUM(_new) +# define KABI_BROKEN_REMOVE_ENUM(_orig) _orig, +# define KABI_EXTEND_ENUM(_new) + +# define _KABI_DEPRECATE(_type, _orig) _type _orig +# define _KABI_DEPRECATE_FN(_type, _orig, _args...) _type (*_orig)(_args) +# define _KABI_REPLACE(_orig, _new) _orig +# define _KABI_EXCLUDE(_elem) + +#else + +# define KABI_ALIGN_WARNING ". Disable CONFIG_KABI_SIZE_ALIGN_CHECKS if debugging." + +# define KABI_CONST const +# define KABI_ADD_MODIFIER(_new) _new +# define KABI_EXTEND(_new) _new; +# define KABI_FILL_HOLE(_new) _new; +# define KABI_FORCE_CHANGE(ver) +# define KABI_RENAME(_orig, _new) _new +# define KABI_HIDE_INCLUDE(_file) _file +# define KABI_FAKE_INCLUDE(_file) +# define KABI_BROKEN_INSERT(_new) _new; +# define KABI_BROKEN_REMOVE(_orig) +# define KABI_BROKEN_INSERT_BLOCK(_new) _new +# define KABI_BROKEN_REMOVE_BLOCK(_orig) +# define KABI_BROKEN_REPLACE(_orig, _new) _new; +# define KABI_BROKEN_INSERT_ENUM(_new) _new, +# define KABI_BROKEN_REMOVE_ENUM(_orig) +# define KABI_EXTEND_ENUM(_new) _new, + +#if IS_BUILTIN(CONFIG_KABI_SIZE_ALIGN_CHECKS) +# define __KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_new) " is larger than " __stringify(_orig) KABI_ALIGN_WARNING); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_orig) " is not aligned the same as " __stringify(_new) KABI_ALIGN_WARNING); \ + } +# define __KABI_CHECK_SIZE(_item, _size) \ + _Static_assert(sizeof(struct{_item;}) <= _size, \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_item) " is larger than the reserved size (" __stringify(_size) " bytes)" KABI_ALIGN_WARNING) +#else +# define __KABI_CHECK_SIZE_ALIGN(_orig, _new) +# define __KABI_CHECK_SIZE(_item, _size) +#endif + +#define KABI_UNIQUE_ID __PASTE(kabi_hidden_, __LINE__) + +# define _KABI_DEPRECATE(_type, _orig) _type kabi_reserved_##_orig +# define _KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _type (* kabi_reserved_##_orig)(_args) +#ifdef CONFIG_KABI_RESERVE +# define _KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } KABI_UNIQUE_ID; \ + __KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } +#else +# define _KABI_REPLACE(_orig, _new) KABI_BROKEN_REPLACE(_orig, _new) +#endif + +# define _KABI_EXCLUDE(_elem) _elem + +#endif /* __GENKSYMS__ */ + +/* semicolon added wrappers for the KABI_REPLACE macros */ +# define KABI_DEPRECATE(_type, _orig) _KABI_DEPRECATE(_type, _orig); +# define KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _KABI_DEPRECATE_FN(_type, _orig, _args); +# define KABI_REPLACE(_orig, _new) _KABI_REPLACE(_orig, _new); +/* + * Macro for breaking up a random element into two smaller chunks using an + * anonymous struct inside an anonymous union. + */ +# define KABI_REPLACE2(orig, _new1, _new2) KABI_REPLACE(orig, struct{ _new1; _new2;}) + +/* + * We tried to standardize on openEuler reserved names. These wrappers + * leverage those common names making it easier to read and find in the + * code. + */ +#ifdef CONFIG_KABI_RESERVE +# define _KABI_RESERVE(n) u64 kabi_reserved##n +#else +# define _KABI_RESERVE(n) +#endif +# define KABI_RESERVE(n) _KABI_RESERVE(n); +/* + * Simple wrappers to replace standard openEuler reserved elements. + */ +# define KABI_USE(n, _new) KABI_REPLACE(_KABI_RESERVE(n), _new) +/* + * Macros for breaking up a reserved element into two smaller chunks using + * an anonymous struct inside an anonymous union. + */ +# define KABI_USE2(n, _new1, _new2) KABI_REPLACE(_KABI_RESERVE(n), struct{ _new1; _new2; }) + +#define KABI_EXCLUDE(_elem) _KABI_EXCLUDE(_elem); + +#define KABI_EXTEND_WITH_SIZE(_new, _size) \ + KABI_EXTEND(union { \ + _new; \ + unsigned long KABI_UNIQUE_ID[_size]; \ + __KABI_CHECK_SIZE(_new, 8 * (_size)); \ + }) + +#define _KABI_AUX_PTR(_struct) \ + size_t _struct##_size_resvd; \ + _KABI_EXCLUDE(struct _struct##_resvd *_resvd) +#define KABI_AUX_PTR(_struct) \ + _KABI_AUX_PTR(_struct); + +#define _KABI_AUX_EMBED(_struct) \ + size_t _struct##_size_resvd; \ + _KABI_EXCLUDE(struct _struct##_resvd _resvd) +#define KABI_AUX_EMBED(_struct) \ + _KABI_AUX_EMBED(_struct); + +#define KABI_USE_AUX_PTR(n1, n2, _struct) \ + KABI_USE(n1, n2, \ + struct { KABI_AUX_PTR(_struct) }) + +/* + * KABI_AUX_SET_SIZE calculates and sets the size of the extended struct and + * stores it in the size_resvd field for structs that are dynamically allocated. + * This macro MUST be called when expanding a base struct with + * KABI_SIZE_AND_EXTEND, and it MUST be called from the allocation site + * regardless of being allocated in the kernel or a module. + * Note: since this macro is intended to be invoked outside of a struct, + * a semicolon is necessary at the end of the line where it is invoked. + */ +#define KABI_AUX_SET_SIZE(_name, _struct) ({ \ + (_name)->_struct##_size_resvd = sizeof(struct _struct##_resvd); \ +}) + +/* + * KABI_AUX_INIT_SIZE calculates and sets the size of the extended struct and + * stores it in the size_resvd field for structs that are statically allocated. + * This macro MUST be called when expanding a base struct with + * KABI_SIZE_AND_EXTEND, and it MUST be called from the declaration site + * regardless of being allocated in the kernel or a module. + */ +#define KABI_AUX_INIT_SIZE(_struct) \ + ._struct##_size_resvd = sizeof(struct _struct##_resvd), + +/* + * KABI_AUX verifies allocated memory exists. This MUST be called to + * verify that memory in the _resvd struct is valid, and can be called + * regardless if KABI_SIZE_AND_EXTEND or KABI_SIZE_AND_EXTEND_PTR is + * used. + */ +#define KABI_AUX(_ptr, _struct, _field) ({ \ + size_t __off = offsetof(struct _struct##_resvd, _field); \ + (_ptr)->_struct##_size_resvd > __off ? true : false; \ +}) + +#endif /* _LINUX_KABI_H */ diff --git a/include/linux/module.h b/include/linux/module.h index a98e188cf37b8182b8654b3be069f93ce1d97174..4a81a189f2da2a2b7f21207a5dc7011c719e2ff7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -32,6 +32,8 @@ #include #include +#include + #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN struct modversion_info { @@ -457,6 +459,11 @@ struct module { /* Startup function. */ int (*init)(void); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + /* there is 8-byte hole on all platforms */ + KABI_FILL_HOLE(unsigned int num_mc_exentries) +#endif + struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align; /* Arch-specific module values */ @@ -580,6 +587,9 @@ struct module { struct error_injection_entry *ei_funcs; unsigned int num_ei_funcs; #endif +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + KABI_USE(1, struct exception_table_entry *mc_extable) +#endif #ifdef CONFIG_DYNAMIC_DEBUG_CORE struct _ddebug_info dyndbg_info; #endif diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index b4fa92a6e44b824661aa618b35deb57d925136ed..6ed9b09be3f103a1eda9e541454e1f52c68e2343 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -232,6 +232,7 @@ struct map_info { unsigned long map_priv_1; unsigned long map_priv_2; struct device_node *device_node; + struct fwnode_handle *fwnode_handle; void *fldrv_priv; struct mtd_chip_driver *fldrv; }; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a77f3a7d21d12f0d83f59957460391f255a45abc..efabe7785cfa7ff9dce8bcfbb0f47cb934367fcb 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -191,6 +191,13 @@ enum pageflags { /* At least one page in this folio has the hwpoison flag set */ PG_has_hwpoisoned = PG_error, PG_large_rmappable = PG_workingset, /* anon or file-backed */ + +#ifdef CONFIG_PSWIOTLB + /* check if pswiotlb is sync already */ + PG_pswiotlbsync = __NR_PAGEFLAGS + 1, + /* check if the page is used for pswiotlb */ + PG_pswiotlb, +#endif }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) diff --git a/include/linux/phy.h b/include/linux/phy.h index 5aa30ee99810479488a0e75a517ce2f68183cee6..cf833187d1ada4b88488329892053c356f46fa68 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -642,6 +642,8 @@ struct phy_device { u32 phy_id; + u32 force_mode; + struct phy_c45_device_ids c45_ids; unsigned is_c45:1; unsigned is_internal:1; diff --git a/include/linux/phytium_npu_dma_buf_heap.h b/include/linux/phytium_npu_dma_buf_heap.h new file mode 100644 index 0000000000000000000000000000000000000000..e24e6d9961330c0f4463e0042365e0385e495c22 --- /dev/null +++ b/include/linux/phytium_npu_dma_buf_heap.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL */ +/* + * Copyright (c) 2023, Phytium Corporation. + */ +#ifndef __PHYTIUM_NPU_DMA_BUF_HEAP_H__ +#define __PHYTIUM_NPU_DMA_BUF_HEAP_H__ +#include +#include + +struct npu_unified_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + struct sg_table sgt; + unsigned long req_len; + unsigned int page_size; + int vmap_cnt; + int buffer_status; /* buffer is ready for inference */ + void *vaddr; +}; + +#endif diff --git a/include/linux/pswiotlb.h b/include/linux/pswiotlb.h new file mode 100644 index 0000000000000000000000000000000000000000..26b2a9d7f5a1c6fda8fa594278e8330b18f4b67c --- /dev/null +++ b/include/linux/pswiotlb.h @@ -0,0 +1,366 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PSWIOTLB_H +#define __LINUX_PSWIOTLB_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct device; +struct page; +struct scatterlist; +extern bool pswiotlb_force_disable; +struct p_io_tlb_pool; + +#define SOC_ID_PS23064 0x8 +#define SOC_ID_PS24080 0x6 +#define MIDR_PS 0x700F8620 +#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) +#define PSWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ +#define PSWIOTLB_FORCEOFF (1 << 1) /* force phytium bounce buffering off*/ +#define PSWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ +#define PSWIOTLB_FREE_THRESHOLD 30 +static bool is_ps_socs; + +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define P_IO_TLB_SEGSIZE 1024 + +/* + * log of the size of each Phytium IO TLB slab. The number of slabs is command line + * controllable. + */ +#define P_IO_TLB_SHIFT 11 +#define P_IO_TLB_SIZE (1 << P_IO_TLB_SHIFT) + +/* default to 256MB */ +#define P_IO_TLB_DEFAULT_SIZE (256UL<<20) +#define P_IO_TLB_INC_THR (64UL<<20) +#define P_IO_TLB_EXT_WATERMARK (80) + +/* passthroughlist which incompatible with pswiotlb temporarily */ +#define BL_PCI_VENDOR_ID_NVIDIA 0x10de +#define BL_PCI_VENDOR_ID_ILUVATAR 0x1E3E +#define BL_PCI_VENDOR_ID_METAX 0x9999 + +unsigned long pswiotlb_size_or_default(void); +void __init pswiotlb_init_remap(bool addressing_limit, int nid, unsigned int flags, + int (*remap)(void *tlb, unsigned long nslabs)); + +phys_addr_t pswiotlb_tbl_map_single(struct device *hwdev, int nid, phys_addr_t phys, + size_t mapping_size, size_t alloc_size, unsigned int alloc_align_mask, + enum dma_data_direction dir, + unsigned long attrs); + +extern void pswiotlb_tbl_unmap_single(struct device *hwdev, + int nid, + phys_addr_t tlb_addr, + size_t offset, + size_t mapping_size, + enum dma_data_direction dir, + unsigned long attrs, + struct p_io_tlb_pool *pool); + +void pswiotlb_sync_single_for_device(struct device *dev, int nid, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, struct p_io_tlb_pool *pool); +void pswiotlb_sync_single_for_cpu(struct device *dev, int nid, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, struct p_io_tlb_pool *pool); +dma_addr_t pswiotlb_map(struct device *dev, int nid, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_store_local_node(struct pci_dev *dev, struct pci_bus *bus); +void iommu_dma_unmap_sg_pswiotlb(struct device *dev, struct scatterlist *sg, unsigned long iova, + size_t mapped, int nents, enum dma_data_direction dir, unsigned long attrs); +#ifdef CONFIG_PSWIOTLB +struct pswiotlb_passthroughlist { + struct list_head node; + unsigned short vendor; + unsigned short device; + bool from_grub; +}; + +struct pswiotlb_bypass_rules { + unsigned short vendor_id; + bool dma_is_sg; + enum dma_data_direction dir; +}; + +/** + * struct p_io_tlb_pool - Phytium IO TLB memory pool descriptor + * @start: The start address of the pswiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @end: The end address of the pswiotlb memory pool. Used to do a quick + * range check to see if the memory was in fact allocated by this + * API. + * @nslabs: The number of Phytium IO TLB blocks (in groups of 64) between @start and + * @end. For default pswiotlb, this is command line adjustable via + * setup_io_tlb_npages. + * @used: The number of used Phytium IO TLB block. + * @list: The free list describing the number of free entries available + * from each index. + * @index: The index to start searching in the next round. + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + * @vaddr: The vaddr of the pswiotlb memory pool. The pswiotlb memory pool + * may be remapped in the memory encrypted case and store virtual + * address for bounce buffer operation. + * @nslabs: The number of Phytium IO TLB slots between @start and @end. For the + * default pswiotlb, this can be adjusted with a boot parameter, + * see setup_io_tlb_npages(). + * @late_alloc: %true if allocated using the page allocator. + * @nareas: Number of areas in the pool. + * @area_nslabs: Number of slots in each area. + * @areas: Array of memory area descriptors. + * @slots: Array of slot descriptors. + * @node: Member of the Phytium IO TLB memory pool list. + * @rcu: RCU head for pswiotlb_dyn_free(). + * @transient: %true if transient memory pool. + * @busy_flag: %true if the pool is used by devices. + * @free_cnt: Counters every time the pool is free when checked by monitor. + * @free_th: Free threshold determine when to free the pool to memory. + * @busy_recode: Bitmap to record the busy status of the areas in the pool. + * @node_min_addr: Minimum physical address of the numa node. + * @numa_max_addr: Maximum physical address of the numa node. + * @numa_node_id: Numa node id the pool belong to. + */ +struct p_io_tlb_pool { + phys_addr_t start; + phys_addr_t end; + void *vaddr; + unsigned long nslabs; + bool late_alloc; + unsigned int nareas; + unsigned int area_nslabs; + struct p_io_tlb_area *areas; + struct p_io_tlb_slot *slots; + struct list_head node; + struct rcu_head rcu; + bool transient; + bool busy_flag; + unsigned int free_cnt; + unsigned int free_th; + unsigned long *busy_record; + phys_addr_t node_min_addr; + phys_addr_t node_max_addr; + int numa_node_id; +}; + +/** + * struct p_io_tlb_mem - Phytium Software IO TLB allocator + * @defpool: Default (initial) Phytium IO TLB memory pool descriptor. + * @pool: Phytium IO TLB memory pool descriptor (if not dynamic). + * @nslabs: Total number of Phytium IO TLB slabs in all pools. + * @debugfs: The dentry to debugfs. + * @force_bounce: %true if pswiotlb bouncing is forced + * @for_alloc: %true if the pool is used for memory allocation + * @can_grow: %true if more pools can be allocated dynamically. + * @phys_limit: Maximum allowed physical address. + * @pool_addr: Array where all the pools stored. + * @capacity: Number of pools which could be allocated. + * @whole_size: Number of pools which stored in the pool array. + * @lock: Lock to synchronize changes to the list. + * @pools: List of Phytium IO TLB memory pool descriptors (if dynamic). + * @dyn_alloc: Dynamic Phytium IO TLB pool allocation work. + * @total_used: The total number of slots in the pool that are currently used + * across all areas. Used only for calculating used_hiwater in + * debugfs. + * @used_hiwater: The high water mark for total_used. Used only for reporting + * in debugfs. + * @node_min_addr: Minimum physical address of the numa node. + * @numa_max_addr: Maximum physical address of the numa node. + * @numa_node_id: Numa node id the mem belong to. + */ +struct p_io_tlb_mem { + struct p_io_tlb_pool defpool; + unsigned long nslabs; + struct dentry *debugfs; + bool force_bounce; + bool for_alloc; + bool can_grow; + u64 phys_limit; + struct p_io_tlb_pool *pool_addr[64*1024/8]; + int capacity; + int whole_size; + spinlock_t lock; + struct list_head pools; + struct work_struct dyn_alloc; +#ifdef CONFIG_DEBUG_FS + atomic_long_t total_used; + atomic_long_t used_hiwater; +#endif + phys_addr_t node_min_addr; + phys_addr_t node_max_addr; + unsigned long node_total_mem; + int numa_node_id; +}; + +extern struct p_io_tlb_mem p_io_tlb_default_mem[MAX_NUMNODES]; + +struct p_io_tlb_pool *pswiotlb_find_pool(struct device *dev, int nid, phys_addr_t paddr); + +static inline bool is_phytium_ps_socs(void) +{ + unsigned int soc_id; + unsigned int midr; + + if (likely(is_ps_socs)) + return true; + + soc_id = read_sysreg_s(SYS_AIDR_EL1); + midr = read_cpuid_id(); + if ((soc_id == SOC_ID_PS23064 || soc_id == SOC_ID_PS24080) + && midr == MIDR_PS) { + is_ps_socs = true; + return true; + } else + return false; +} + +static inline bool is_pswiotlb_buffer(struct device *dev, int nid, phys_addr_t paddr, + struct p_io_tlb_pool **pool) +{ + struct p_io_tlb_mem *mem = &dev->dma_p_io_tlb_mem[nid]; + struct page *page; + + if (!paddr || (paddr == DMA_MAPPING_ERROR)) + return false; + + page = pfn_to_page(PFN_DOWN(paddr)); + + if (test_bit(PG_pswiotlb, &page->flags) == false) + return false; + + if (!mem) + return false; + + /* + * All PSWIOTLB buffer addresses must have been returned by + * pswiotlb_tbl_map_single() and passed to a device driver. + * If a PSWIOTLB address is checked on another CPU, then it was + * presumably loaded by the device driver from an unspecified private + * data structure. Make sure that this load is ordered before reading + * dev->dma_uses_p_io_tlb here and mem->pools in pswiotlb_find_pool(). + * + * This barrier pairs with smp_mb() in pswiotlb_find_slots(). + */ + smp_rmb(); + + *pool = pswiotlb_find_pool(dev, nid, paddr); + if (READ_ONCE(dev->dma_uses_p_io_tlb) && *pool) + return true; + + return false; +} + +static inline bool dma_is_in_local_node(struct device *dev, int nid, dma_addr_t addr, size_t size) +{ + dma_addr_t end = addr + size - 1; + struct p_io_tlb_mem *mem = &p_io_tlb_default_mem[nid]; + + if (addr >= mem->node_min_addr && end <= mem->node_max_addr) + return true; + + return false; +} + +void pswiotlb_init(bool addressing_limited, unsigned int flags); +void pswiotlb_dev_init(struct device *dev); +size_t pswiotlb_max_mapping_size(struct device *dev); +bool is_pswiotlb_allocated(struct device *dev); +bool is_pswiotlb_active(struct device *dev); +void __init pswiotlb_adjust_size(unsigned long size); +phys_addr_t default_pswiotlb_base(struct device *dev); +phys_addr_t default_pswiotlb_limit(struct device *dev); +bool pswiotlb_is_dev_in_passthroughlist(struct pci_dev *dev); + +extern const struct pswiotlb_bypass_rules bypass_rules_list[]; +static inline bool pswiotlb_bypass_is_needed(struct device *dev, int nelems, + enum dma_data_direction dir) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool dma_is_sg = nelems ? true : false; + const struct pswiotlb_bypass_rules *list = bypass_rules_list; + + while (list->vendor_id) { + if ((pdev->vendor == list->vendor_id) && + (dma_is_sg == list->dma_is_sg) && + (dir == list->dir)) + return true; + list++; + } + + return false; +} + +#else +static inline void pswiotlb_init(bool addressing_limited, unsigned int flags) +{ +} + +static inline void pswiotlb_dev_init(struct device *dev) +{ +} +static inline bool is_pswiotlb_buffer(struct device *dev, int nid, phys_addr_t paddr, + struct p_io_tlb_pool **pool) +{ + return false; +} +static inline bool dma_is_in_local_node(struct device *dev, int nid, dma_addr_t addr, size_t size) +{ + return false; +} +static inline size_t pswiotlb_max_mapping_size(struct device *dev) +{ + return SIZE_MAX; +} + +static inline bool is_pswiotlb_allocated(struct device *dev) +{ + return false; +} +static inline bool is_pswiotlb_active(struct device *dev) +{ + return false; +} + +static inline void pswiotlb_adjust_size(unsigned long size) +{ +} + +static inline phys_addr_t default_pswiotlb_base(struct device *dev) +{ + return 0; +} + +static inline phys_addr_t default_pswiotlb_limit(struct device *dev) +{ + return 0; +} + +static inline bool pswiotlb_is_dev_in_passthroughlist(struct pci_dev *dev) +{ + return false; +} + +static inline bool pswiotlb_bypass_is_needed(struct device *dev, int nelems, + enum dma_data_direction dir) +{ + return true; +} +#endif /* CONFIG_PSWIOTLB */ + +extern void pswiotlb_print_info(int); +extern bool pswiotlb_dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); + +#endif /* __LINUX_PSWIOTLB_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3064314f48329cac729b9b4e29fc62072ec2df3b..550287c929906a402b03c65ee81479c2650799c8 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -205,6 +205,15 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt) } #endif +#ifndef copy_mc_to_user +static inline unsigned long __must_check +copy_mc_to_user(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; diff --git a/include/trace/events/pswiotlb.h b/include/trace/events/pswiotlb.h new file mode 100644 index 0000000000000000000000000000000000000000..ed26c41a4046200216eae0ba71712fb99027b0a4 --- /dev/null +++ b/include/trace/events/pswiotlb.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pswiotlb + +#if !defined(_TRACE_PSWIOTLB_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PSWIOTLB_H + +#include + +TRACE_EVENT(pswiotlb_bounced, + + TP_PROTO(struct device *dev, + dma_addr_t dev_addr, + size_t size), + + TP_ARGS(dev, dev_addr, size), + + TP_STRUCT__entry( + __string(dev_name, dev_name(dev)) + __field(u64, dma_mask) + __field(dma_addr_t, dev_addr) + __field(size_t, size) + __field(bool, force) + ), + + TP_fast_assign( + __assign_str(dev_name, dev_name(dev)); + __entry->dma_mask = (dev->dma_mask ? *dev->dma_mask : 0); + __entry->dev_addr = dev_addr; + __entry->size = size; + ), + + TP_printk("dev_name: %s dma_mask=%llx dev_addr=%llx size=%zu %s", + __get_str(dev_name), + __entry->dma_mask, + (unsigned long long)__entry->dev_addr, + __entry->size, + __entry->force ? "NORMAL" : "FORCEOFF") +); + +#endif /* _TRACE_PSWIOTLB_H */ + +/* This part must be outside protection */ +#include diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index f488997b0717126b40b59e91a7d0cd1d8b4f94bf..f13515fc1384c6e583e4a8236c4866bdd4bf2557 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -270,3 +270,5 @@ config DMA_MAP_BENCHMARK performance of dma_(un)map_page. See tools/testing/selftests/dma/dma_map_benchmark.c + +source "kernel/dma/phytium/Kconfig" diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile index 21926e46ef4fb27fd378dd5741cee81b60b267da..c7c3cb4499e9c70de4b1fcb0132865035f8ccf38 100644 --- a/kernel/dma/Makefile +++ b/kernel/dma/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o obj-$(CONFIG_MMU) += remap.o obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o +obj-$(CONFIG_PSWIOTLB) += phytium/ diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index f005c66f378c32e9bae2d84b0ca80d1c126f0c15..e119995ea37527a9fc2af5795acbe2548b373fb0 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -52,6 +52,10 @@ #include #include +#ifdef CONFIG_PSWIOTLB +#include "./phytium/pswiotlb-dma.h" +#endif + #ifdef CONFIG_CMA_SIZE_MBYTES #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES #else @@ -356,6 +360,10 @@ static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) */ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) { +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) + return NULL; +#endif #ifdef CONFIG_DMA_NUMA_CMA int nid = dev_to_node(dev); #endif @@ -408,6 +416,13 @@ void dma_free_contiguous(struct device *dev, struct page *page, size_t size) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + __free_pages(page, get_order(size)); + + return; + } +#endif /* if dev has its own cma, free page from there */ if (dev->cma_area) { if (cma_release(dev->cma_area, page, count)) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index f1d9f01b283d7d74670f05711f1784f9aded73e3..2e392fc6c22ca0d75cd4d06b837fbe3465586658 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -16,6 +16,9 @@ #include #include "debug.h" #include "direct.h" +#ifdef CONFIG_PSWIOTLB +#include "./phytium/pswiotlb-dma.h" +#endif #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ @@ -143,7 +146,6 @@ static inline bool dma_map_direct(struct device *dev, { return dma_go_direct(dev, *dev->dma_mask, ops); } - dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -156,6 +158,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, if (WARN_ON_ONCE(!dev->dma_mask)) return DMA_MAPPING_ERROR; +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev) && + !pswiotlb_bypass_is_needed(dev, 0, dir)) { + addr = pswiotlb_dma_map_page_distribute(dev, page, offset, size, dir, attrs); + return addr; + } +#endif if (dma_map_direct(dev, ops) || arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); @@ -167,13 +176,18 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, return addr; } EXPORT_SYMBOL(dma_map_page_attrs); - void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_unmap_page_attrs_distribute(dev, addr, size, dir, attrs); + return; + } +#endif if (dma_map_direct(dev, ops) || arch_dma_unmap_page_direct(dev, addr + size)) dma_direct_unmap_page(dev, addr, size, dir, attrs); @@ -182,7 +196,6 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, debug_dma_unmap_page(dev, addr, size, dir); } EXPORT_SYMBOL(dma_unmap_page_attrs); - static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { @@ -193,7 +206,13 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, if (WARN_ON_ONCE(!dev->dma_mask)) return 0; - +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev) && + !pswiotlb_bypass_is_needed(dev, nents, dir)) { + ents = pswiotlb_dma_map_sg_attrs_distribute(dev, sg, nents, dir, attrs); + return ents; + } +#endif if (dma_map_direct(dev, ops) || arch_dma_map_sg_direct(dev, sg, nents)) ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); @@ -279,7 +298,6 @@ int dma_map_sgtable(struct device *dev, struct sg_table *sgt, return 0; } EXPORT_SYMBOL_GPL(dma_map_sgtable); - void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) @@ -288,6 +306,12 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_unmap_sg_attrs_distribute(dev, sg, nents, dir, attrs); + return; + } +#endif if (dma_map_direct(dev, ops) || arch_dma_unmap_sg_direct(dev, sg, nents)) dma_direct_unmap_sg(dev, sg, nents, dir, attrs); @@ -328,13 +352,18 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, debug_dma_unmap_resource(dev, addr, size, dir); } EXPORT_SYMBOL(dma_unmap_resource); - void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_sync_single_for_cpu_distribute(dev, addr, size, dir); + return; + } +#endif if (dma_map_direct(dev, ops)) dma_direct_sync_single_for_cpu(dev, addr, size, dir); else if (ops->sync_single_for_cpu) @@ -342,13 +371,18 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, debug_dma_sync_single_for_cpu(dev, addr, size, dir); } EXPORT_SYMBOL(dma_sync_single_for_cpu); - void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_sync_single_for_device_distribute(dev, addr, size, dir); + return; + } +#endif if (dma_map_direct(dev, ops)) dma_direct_sync_single_for_device(dev, addr, size, dir); else if (ops->sync_single_for_device) @@ -356,13 +390,18 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, debug_dma_sync_single_for_device(dev, addr, size, dir); } EXPORT_SYMBOL(dma_sync_single_for_device); - void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_sync_sg_for_cpu_distribute(dev, sg, nelems, dir); + return; + } +#endif if (dma_map_direct(dev, ops)) dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); else if (ops->sync_sg_for_cpu) @@ -370,13 +409,18 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); - void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); +#ifdef CONFIG_PSWIOTLB + if (check_if_pswiotlb_is_applicable(dev)) { + pswiotlb_dma_sync_sg_for_device_distribute(dev, sg, nelems, dir); + return; + } +#endif if (dma_map_direct(dev, ops)) dma_direct_sync_sg_for_device(dev, sg, nelems, dir); else if (ops->sync_sg_for_device) @@ -510,6 +554,9 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, if (WARN_ON_ONCE(flag & __GFP_COMP)) return NULL; +#ifdef CONFIG_PSWIOTLB + check_if_pswiotlb_is_applicable(dev); +#endif if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr; diff --git a/kernel/dma/phytium/Kconfig b/kernel/dma/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..8553a65027ee3baaecd5d289325b9e756c57b22a --- /dev/null +++ b/kernel/dma/phytium/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config PSWIOTLB + bool "Phytium software IO TLB" + select NEED_DMA_MAP_STATE + depends on ARCH_PHYTIUM && NUMA + help + This enables phytium software IO TLB. You can disable phytium software + IO TLB using "pswiotlb=forceoff" on the kernel command line if you do + not need it when PSWIOTLB is Y. diff --git a/kernel/dma/phytium/Makefile b/kernel/dma/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f94ea59e950f535ccbaf6653361aefa368faeae0 --- /dev/null +++ b/kernel/dma/phytium/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_PSWIOTLB) += pswiotlb.o +obj-$(CONFIG_PSWIOTLB) += pswiotlb-mapping.o +obj-$(CONFIG_PSWIOTLB) += pswiotlb-direct.o +obj-$(CONFIG_PSWIOTLB) += pswiotlb-iommu.o diff --git a/kernel/dma/phytium/pswiotlb-direct.c b/kernel/dma/phytium/pswiotlb-direct.c new file mode 100644 index 0000000000000000000000000000000000000000..f5e1b62c67c9d0f91fc05f2667d2ab1cbdc4fd66 --- /dev/null +++ b/kernel/dma/phytium/pswiotlb-direct.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA operations based on Phytium software IO tlb that + * map physical memory directly without using an IOMMU. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ +#include /* for max_pfn */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pswiotlb-dma.h" + +/* + * The following functions are ported from + * ./drivers/dma/direct.c + * static inline dma_addr_t phys_to_dma_direct(struct device *dev, + * phys_addr_t phys); + */ + +static inline dma_addr_t phys_to_dma_direct(struct device *dev, + phys_addr_t phys) +{ + if (force_dma_unencrypted(dev)) + return phys_to_dma_unencrypted(dev, phys); + return phys_to_dma(dev, phys); +} + +bool pswiotlb_dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +{ + dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); + + if (dma_addr == DMA_MAPPING_ERROR) + return false; + return dma_addr + size - 1 <= + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); +} + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_PSWIOTLB) +void pswiotlb_dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + for_each_sg(sgl, sg, nents, i) { + phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); + + if (unlikely(is_swiotlb_buffer(dev, paddr))) + swiotlb_sync_single_for_device(dev, paddr, sg->length, + dir); + + if (is_pswiotlb_active(dev) && + unlikely(is_pswiotlb_buffer(dev, nid, paddr, &pool))) + pswiotlb_sync_single_for_device(dev, nid, paddr, + sg->length, dir, pool); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(paddr, sg->length, + dir); + } +} +#endif + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ + defined(CONFIG_PSWIOTLB) +void pswiotlb_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + for_each_sg(sgl, sg, nents, i) { + phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(paddr, sg->length, dir); + + if (unlikely(is_swiotlb_buffer(dev, paddr))) + swiotlb_sync_single_for_cpu(dev, paddr, sg->length, + dir); + + if (is_pswiotlb_active(dev) && + unlikely(is_pswiotlb_buffer(dev, nid, paddr, &pool))) + pswiotlb_sync_single_for_cpu(dev, nid, paddr, + sg->length, dir, pool); + + if (dir == DMA_FROM_DEVICE) + arch_dma_mark_clean(paddr, sg->length); + } + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu_all(); +} + +/* + * Unmaps segments, except for ones marked as pci_p2pdma which do not + * require any further action as they contain a bus address. + */ +void pswiotlb_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + pswiotlb_dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, + attrs); +} +#endif + +int pswiotlb_dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i, ret; + + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = pswiotlb_dma_direct_map_page(dev, sg_page(sg), + sg->offset, sg->length, dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) { + ret = -EIO; + goto out_unmap; + } + sg_dma_len(sg) = sg->length; + } + + return nents; + +out_unmap: + pswiotlb_dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return ret; +} diff --git a/kernel/dma/phytium/pswiotlb-dma.h b/kernel/dma/phytium/pswiotlb-dma.h new file mode 100644 index 0000000000000000000000000000000000000000..0f159a3891749f38fbf8ce43bb2a69f5b9de05aa --- /dev/null +++ b/kernel/dma/phytium/pswiotlb-dma.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DMA operations based on Phytium software IO tlb that + * map physical memory. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ +#ifndef _KERNEL_PSWIOTLB_DMA_DIRECT_H +#define _KERNEL_PSWIOTLB_DMA_DIRECT_H + +#include +#include +#include + +extern bool pswiotlb_force_disable; +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_PSWIOTLB) +void pswiotlb_dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void pswiotlb_dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ + defined(CONFIG_PSWIOTLB) +void pswiotlb_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void pswiotlb_dma_direct_unmap_sg(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} +static inline void pswiotlb_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +#ifdef CONFIG_PSWIOTLB +int pswiotlb_dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +dma_addr_t pswiotlb_dma_map_page_distribute(struct device *dev, struct page *page, + size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +void pswiotlb_dma_unmap_page_attrs_distribute(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +int pswiotlb_dma_map_sg_attrs_distribute(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_dma_unmap_sg_attrs_distribute(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_dma_sync_single_for_cpu_distribute(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir); +void pswiotlb_dma_sync_single_for_device_distribute(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir); +void pswiotlb_dma_sync_sg_for_cpu_distribute(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +void pswiotlb_dma_sync_sg_for_device_distribute(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +dma_addr_t pswiotlb_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +void pswiotlb_iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, unsigned long attrs); +int pswiotlb_iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); +void pswiotlb_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); +void pswiotlb_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); +void pswiotlb_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, enum dma_data_direction dir); +void pswiotlb_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, enum dma_data_direction dir); + +static inline bool check_if_pswiotlb_is_applicable(struct device *dev) +{ + if (dev && dev->can_use_pswiotlb && is_phytium_ps_socs() + && !pswiotlb_force_disable) { + if (dev->numa_node == NUMA_NO_NODE || + dev->numa_node != dev->local_node) + dev->numa_node = dev->local_node; + + if (dev_is_pci(dev) && (dev->numa_node != NUMA_NO_NODE)) + return true; + } + + return false; +} + +static inline void pswiotlb_dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + phys_addr_t paddr = dma_to_phys(dev, addr); + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (unlikely(is_swiotlb_buffer(dev, paddr))) + swiotlb_sync_single_for_device(dev, paddr, size, dir); + + if (is_pswiotlb_active(dev)) { + if (unlikely(is_pswiotlb_buffer(dev, nid, paddr, &pool))) + pswiotlb_sync_single_for_device(dev, nid, paddr, size, dir, pool); + } + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(paddr, size, dir); +} + +static inline void pswiotlb_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + phys_addr_t paddr = dma_to_phys(dev, addr); + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (!dev_is_dma_coherent(dev)) { + arch_sync_dma_for_cpu(paddr, size, dir); + arch_sync_dma_for_cpu_all(); + } + + if (unlikely(is_swiotlb_buffer(dev, paddr))) + swiotlb_sync_single_for_cpu(dev, paddr, size, dir); + + if (is_pswiotlb_active(dev)) { + if (unlikely(is_pswiotlb_buffer(dev, nid, paddr, &pool))) + pswiotlb_sync_single_for_cpu(dev, nid, paddr, size, dir, pool); + } + + if (dir == DMA_FROM_DEVICE) + arch_dma_mark_clean(paddr, size); +} + +static inline dma_addr_t pswiotlb_dma_direct_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t dma_addr = phys_to_dma(dev, phys); + int nid = dev->numa_node; + + if (is_swiotlb_force_bounce(dev)) + return swiotlb_map(dev, phys, size, dir, attrs); + + if (unlikely(!dma_capable(dev, dma_addr, size, true)) || + dma_kmalloc_needs_bounce(dev, size, dir)) { + if (is_swiotlb_active(dev)) + return swiotlb_map(dev, phys, size, dir, attrs); + + dev_WARN_ONCE(dev, 1, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); + return DMA_MAPPING_ERROR; + } + + /* check whether dma addr is in local node */ + if (is_pswiotlb_active(dev)) { + if (dir != DMA_TO_DEVICE) { + if (unlikely(!dma_is_in_local_node(dev, nid, dma_addr, size))) { + dma_addr = pswiotlb_map(dev, nid, phys, size, dir, attrs); + if (dma_addr == DMA_MAPPING_ERROR) { + dma_addr = phys_to_dma(dev, phys); + dev_warn_once(dev, + "Failed to allocate memory from pswiotlb, fall back to non-local dma\n"); + } else + return dma_addr; + } + } + } + + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + return dma_addr; +} + +static inline void pswiotlb_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = dma_to_phys(dev, addr); + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + !dev_is_dma_coherent(dev)) { + arch_sync_dma_for_cpu(phys, size, dir); + arch_sync_dma_for_cpu_all(); + } + + if (unlikely(is_swiotlb_buffer(dev, phys))) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + + if (is_pswiotlb_active(dev)) { + if (unlikely(is_pswiotlb_buffer(dev, nid, phys, &pool))) + pswiotlb_tbl_unmap_single(dev, nid, phys, 0, size, dir, attrs, pool); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_FROM_DEVICE)) + arch_dma_mark_clean(phys, size); + } +} +#else +static inline int pswiotlb_dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return 0; +} + +static inline dma_addr_t pswiotlb_dma_map_page_distribute(struct device *dev, + struct page *page, size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return 0; +} + +static inline void pswiotlb_dma_unmap_page_attrs_distribute(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ +} + +static inline int pswiotlb_dma_map_sg_attrs_distribute(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + return 0; +} + +static inline void pswiotlb_dma_unmap_sg_attrs_distribute(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} + +static inline void pswiotlb_dma_sync_single_for_cpu_distribute(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_dma_sync_single_for_device_distribute(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_dma_sync_sg_for_cpu_distribute(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_dma_sync_sg_for_device_distribute(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction dir) +{ +} + +static inline dma_addr_t pswiotlb_iommu_dma_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + return 0; +} + +static inline void pswiotlb_iommu_dma_unmap_page(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ +} + +static inline int pswiotlb_iommu_dma_map_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + return 0; +} + +static inline void pswiotlb_iommu_dma_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} + +static inline void pswiotlb_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, enum dma_data_direction dir) +{ +} + +static inline bool check_if_pswiotlb_is_applicable(struct device *dev) +{ + return false; +} + +static inline void pswiotlb_dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} + +static inline void pswiotlb_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} + +static inline dma_addr_t pswiotlb_dma_direct_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + return 0; +} + +static inline void pswiotlb_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +} +#endif /* CONFIG_PSWIOTLB*/ +#endif /* _KERNEL_PSWIOTLB_DMA_DIRECT_H */ diff --git a/kernel/dma/phytium/pswiotlb-iommu.c b/kernel/dma/phytium/pswiotlb-iommu.c new file mode 100644 index 0000000000000000000000000000000000000000..bc8954afff899351c203d8db93ada23e352f49d6 --- /dev/null +++ b/kernel/dma/phytium/pswiotlb-iommu.c @@ -0,0 +1,1219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA operations based on Phytium software IO tlb that + * map physical memory indirectly with an IOMMU. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ + +#define pr_fmt(fmt) "pswiotlb iommu: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + +#include "pswiotlb-dma.h" + +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE, + IOMMU_DMA_MSI_COOKIE, +}; + +struct iommu_dma_cookie { + enum iommu_dma_cookie_type type; + union { + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ + struct { + struct iova_domain iovad; + + struct iova_fq __percpu *fq; /* Flush queue */ + /* Number of TLB flushes that have been started */ + atomic64_t fq_flush_start_cnt; + /* Number of TLB flushes that have been finished */ + atomic64_t fq_flush_finish_cnt; + /* Timer to regularily empty the flush queues */ + struct timer_list fq_timer; + /* 1 when timer is active, 0 when not */ + atomic_t fq_timer_on; + }; + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + + /* Domain for flush queue callback; NULL if flush queue not in use */ + struct iommu_domain *fq_domain; + struct mutex mutex; +}; + +static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); + +/* Number of entries per flush queue */ +#define IOVA_FQ_SIZE 256 + +/* Timeout (in ms) after which entries are flushed from the queue */ +#define IOVA_FQ_TIMEOUT 10 + +/* Flush queue entry for deferred flushing */ +struct iova_fq_entry { + unsigned long iova_pfn; + unsigned long pages; + struct list_head freelist; + u64 counter; /* Flush counter when this entry was added */ +}; + +/* Per-CPU flush queue structure */ +struct iova_fq { + struct iova_fq_entry entries[IOVA_FQ_SIZE]; + unsigned int head, tail; + spinlock_t lock; +}; + +#define fq_ring_for_each(i, fq) \ + for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) + +/* + * The following functions are ported from + * ./drivers/iommu/dma-iommu.c + * ./drivers/iommu/iommu.c + * static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, + * phys_addr_t paddr, size_t size, size_t *count); + * static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, + * phys_addr_t paddr, size_t size, int prot, + * gfp_t gfp, size_t *mapped); + * static int __iommu_map(struct iommu_domain *domain, unsigned long iova, + * phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + * static bool dev_is_untrusted(struct device *dev); + * static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, + * unsigned long attrs); + * static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, + * size_t size, u64 dma_limit, struct device *dev); + * static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, + * dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather); + * static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, + * size_t size); + * static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, + * size_t size, int prot, u64 dma_mask); + * static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, + * dma_addr_t dma_addr); + * static void __invalidate_sg(struct scatterlist *sg, int nents); + */ + +static inline bool fq_full(struct iova_fq *fq) +{ + assert_spin_locked(&fq->lock); + return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); +} + +static inline unsigned int fq_ring_add(struct iova_fq *fq) +{ + unsigned int idx = fq->tail; + + assert_spin_locked(&fq->lock); + + fq->tail = (idx + 1) % IOVA_FQ_SIZE; + + return idx; +} + +static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) +{ + u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); + unsigned int idx; + + assert_spin_locked(&fq->lock); + + fq_ring_for_each(idx, fq) { + + if (fq->entries[idx].counter >= counter) + break; + + put_pages_list(&fq->entries[idx].freelist); + free_iova_fast(&cookie->iovad, + fq->entries[idx].iova_pfn, + fq->entries[idx].pages); + + fq->head = (fq->head + 1) % IOVA_FQ_SIZE; + } +} + +static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) +{ + atomic64_inc(&cookie->fq_flush_start_cnt); + cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); + atomic64_inc(&cookie->fq_flush_finish_cnt); +} + +static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, size_t *count) +{ + unsigned int pgsize_idx, pgsize_idx_next; + unsigned long pgsizes; + size_t offset, pgsize, pgsize_next; + unsigned long addr_merge = paddr | iova; + + /* Page sizes supported by the hardware and small enough for @size */ + pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); + + /* Constrain the page sizes further based on the maximum alignment */ + if (likely(addr_merge)) + pgsizes &= GENMASK(__ffs(addr_merge), 0); + + /* Make sure we have at least one suitable page size */ + BUG_ON(!pgsizes); + + /* Pick the biggest page size remaining */ + pgsize_idx = __fls(pgsizes); + pgsize = BIT(pgsize_idx); + if (!count) + return pgsize; + + /* Find the next biggest support page size, if it exists */ + pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); + if (!pgsizes) + goto out_set_count; + + pgsize_idx_next = __ffs(pgsizes); + pgsize_next = BIT(pgsize_idx_next); + + /* + * There's no point trying a bigger page size unless the virtual + * and physical addresses are similarly offset within the larger page. + */ + if ((iova ^ paddr) & (pgsize_next - 1)) + goto out_set_count; + + /* Calculate the offset to the next page size alignment boundary */ + offset = pgsize_next - (addr_merge & (pgsize_next - 1)); + + /* + * If size is big enough to accommodate the larger page, reduce + * the number of smaller pages. + */ + if (offset + pgsize_next <= size) + size = offset; + +out_set_count: + *count = size >> pgsize_idx; + return pgsize; +} + +static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, + gfp_t gfp, size_t *mapped) +{ + const struct iommu_domain_ops *ops = domain->ops; + size_t pgsize, count; + int ret; + + pgsize = iommu_pgsize(domain, iova, paddr, size, &count); + + pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n", + iova, &paddr, pgsize, count); + + if (ops->map_pages) { + ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, + gfp, mapped); + } else { + ret = ops->map(domain, iova, paddr, pgsize, prot, gfp); + *mapped = ret ? 0 : pgsize; + } + + return ret; +} + +static int __iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +{ + const struct iommu_domain_ops *ops = domain->ops; + unsigned long orig_iova = iova; + unsigned int min_pagesz; + size_t orig_size = size; + phys_addr_t orig_paddr = paddr; + int ret = 0; + + if (unlikely(!(ops->map || ops->map_pages) || + domain->pgsize_bitmap == 0UL)) + return -ENODEV; + + if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) + return -EINVAL; + + /* find out the minimum page size supported */ + min_pagesz = 1 << __ffs(domain->pgsize_bitmap); + + /* + * both the virtual address and the physical one, as well as + * the size of the mapping, must be aligned (at least) to the + * size of the smallest page supported by the hardware + */ + if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { + pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", + iova, &paddr, size, min_pagesz); + return -EINVAL; + } + + pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); + + while (size) { + size_t mapped = 0; + + ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp, + &mapped); + /* + * Some pages may have been mapped, even if an error occurred, + * so we should account for those so they can be unmapped. + */ + size -= mapped; + + if (ret) + break; + + iova += mapped; + paddr += mapped; + } + + /* unroll mapping in case something went wrong */ + if (ret) + iommu_unmap(domain, orig_iova, orig_size - size); + else + trace_map(orig_iova, orig_paddr, orig_size); + + return ret; +} + +static ssize_t __iommu_map_sg_dma(struct device *dev, struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, unsigned int nents, + int prot, gfp_t gfp, enum dma_data_direction dir, unsigned long attrs) +{ + const struct iommu_domain_ops *ops = domain->ops; + size_t mapped = 0; + int ret; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t aligned_size; + int nid = dev->numa_node; + struct scatterlist *sg_orig = sg; + struct scatterlist *s; + int i; + + might_sleep_if(gfpflags_allow_blocking(gfp)); + + /* Discourage passing strange GFP flags */ + if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 | + __GFP_HIGHMEM))) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + + /* check whether dma addr is in local node */ + if (dir != DMA_TO_DEVICE) { + aligned_size = s->length; + if ((!dma_is_in_local_node(dev, nid, phys, + aligned_size)) && (pswiotlb_force_disable != true)) { + aligned_size = iova_align(iovad, s->length); + phys = pswiotlb_tbl_map_single(dev, nid, + phys, s->length, aligned_size, iova_mask(iovad), dir, attrs); + if (phys == DMA_MAPPING_ERROR) { + phys = page_to_phys(sg_page(s)) + s->offset; + dev_warn_once(dev, + "Failed to allocate memory from pswiotlb, fall back to non-local dma\n"); + } + } + } + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, s->length, dir); + + ret = __iommu_map(domain, iova + mapped, phys, + s->length, prot, gfp); + if (ret) + goto out_err; + + mapped += s->length; + } + + if (ops->iotlb_sync_map) + ops->iotlb_sync_map(domain, iova, mapped); + return mapped; + +out_err: + /* undo mappings already done */ + iommu_dma_unmap_sg_pswiotlb(dev, sg_orig, iova, + mapped, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + iommu_unmap(domain, iova, mapped); + + return ret; +} + +static ssize_t pswiotlb_iommu_map_sg_atomic_dma(struct device *dev, + struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot, + enum dma_data_direction dir, unsigned long attrs) +{ + return __iommu_map_sg_dma(dev, domain, iova, sg, nents, prot, GFP_ATOMIC, dir, attrs); +} + +static bool dev_is_untrusted(struct device *dev) +{ + return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; +} + +static bool dev_use_swiotlb(struct device *dev, size_t size, + enum dma_data_direction dir) +{ + return IS_ENABLED(CONFIG_SWIOTLB) && + (dev_is_untrusted(dev) || + dma_kmalloc_needs_bounce(dev, size, dir)); +} + +/** + * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API + * page flags. + * @dir: Direction of DMA transfer + * @coherent: Is the DMA master cache-coherent? + * @attrs: DMA attributes for the mapping + * + * Return: corresponding IOMMU API page protection flags + */ +static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, + unsigned long attrs) +{ + int prot = coherent ? IOMMU_CACHE : 0; + + if (attrs & DMA_ATTR_PRIVILEGED) + prot |= IOMMU_PRIV; + + switch (dir) { + case DMA_BIDIRECTIONAL: + return prot | IOMMU_READ | IOMMU_WRITE; + case DMA_TO_DEVICE: + return prot | IOMMU_READ; + case DMA_FROM_DEVICE: + return prot | IOMMU_WRITE; + default: + return 0; + } +} + +static void queue_iova(struct iommu_dma_cookie *cookie, + unsigned long pfn, unsigned long pages, + struct list_head *freelist) +{ + struct iova_fq *fq; + unsigned long flags; + unsigned int idx; + + /* + * Order against the IOMMU driver's pagetable update from unmapping + * @pte, to guarantee that fq_flush_iotlb() observes that if called + * from a different CPU before we release the lock below. Full barrier + * so it also pairs with iommu_dma_init_fq() to avoid seeing partially + * written fq state here. + */ + smp_mb(); + + fq = raw_cpu_ptr(cookie->fq); + spin_lock_irqsave(&fq->lock, flags); + + /* + * First remove all entries from the flush queue that have already been + * flushed out on another CPU. This makes the fq_full() check below less + * likely to be true. + */ + fq_ring_free(cookie, fq); + + if (fq_full(fq)) { + fq_flush_iotlb(cookie); + fq_ring_free(cookie, fq); + } + + idx = fq_ring_add(fq); + + fq->entries[idx].iova_pfn = pfn; + fq->entries[idx].pages = pages; + fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); + list_splice(freelist, &fq->entries[idx].freelist); + + spin_unlock_irqrestore(&fq->lock, flags); + + /* Avoid false sharing as much as possible. */ + if (!atomic_read(&cookie->fq_timer_on) && + !atomic_xchg(&cookie->fq_timer_on, 1)) + mod_timer(&cookie->fq_timer, + jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); +} + +static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, + size_t size, u64 dma_limit, struct device *dev) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + unsigned long shift, iova_len, iova; + + if (cookie->type == IOMMU_DMA_MSI_COOKIE) { + cookie->msi_iova += size; + return cookie->msi_iova - size; + } + + shift = iova_shift(iovad); + iova_len = size >> shift; + + dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); + + if (domain->geometry.force_aperture) + dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); + + /* + * Try to use all the 32-bit PCI addresses first. The original SAC vs. + * DAC reasoning loses relevance with PCIe, but enough hardware and + * firmware bugs are still lurking out there that it's safest not to + * venture into the 64-bit space until necessary. + * + * If your device goes wrong after seeing the notice then likely either + * its driver is not setting DMA masks accurately, the hardware has + * some inherent bug in handling >32-bit addresses, or not all the + * expected address bits are wired up between the device and the IOMMU. + */ + if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { + iova = alloc_iova_fast(iovad, iova_len, + DMA_BIT_MASK(32) >> shift, false); + if (iova) + goto done; + + dev->iommu->pci_32bit_workaround = false; + dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); + } + + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true); +done: + return (dma_addr_t)iova << shift; +} + +static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, + dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) +{ + struct iova_domain *iovad = &cookie->iovad; + + /* The MSI case is only ever cleaning up its most recent allocation */ + if (cookie->type == IOMMU_DMA_MSI_COOKIE) + cookie->msi_iova -= size; + else if (gather && gather->queued) + queue_iova(cookie, iova_pfn(iovad, iova), + size >> iova_shift(iovad), + &gather->freelist); + else + free_iova_fast(iovad, iova_pfn(iovad, iova), + size >> iova_shift(iovad)); +} + +static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, + size_t size) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, dma_addr); + struct iommu_iotlb_gather iotlb_gather; + size_t unmapped; + + dma_addr -= iova_off; + size = iova_align(iovad, size + iova_off); + iommu_iotlb_gather_init(&iotlb_gather); + iotlb_gather.queued = READ_ONCE(cookie->fq_domain); + + unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); + WARN_ON(unmapped != size); + + if (!iotlb_gather.queued) + iommu_iotlb_sync(domain, &iotlb_gather); + iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); +} + +static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, + size_t size, int prot, u64 dma_mask) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, phys); + dma_addr_t iova; + + if (static_branch_unlikely(&iommu_deferred_attach_enabled) && + iommu_deferred_attach(dev, domain)) + return DMA_MAPPING_ERROR; + + size = iova_align(iovad, size + iova_off); + + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); + if (!iova) + return DMA_MAPPING_ERROR; + + if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { + iommu_dma_free_iova(cookie, iova, size, NULL); + return DMA_MAPPING_ERROR; + } + return iova + iova_off; +} + +void pswiotlb_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +{ + phys_addr_t phys; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (is_pswiotlb_active(dev)) { + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + if (is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_sync_single_for_cpu(dev, nid, phys, size, dir, pool); + + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) + return; + + if (is_swiotlb_buffer(dev, phys)) + swiotlb_sync_single_for_cpu(dev, phys, size, dir); + } else { + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) + return; + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + if (is_swiotlb_buffer(dev, phys)) + swiotlb_sync_single_for_cpu(dev, phys, size, dir); + } +} + +void pswiotlb_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +{ + phys_addr_t phys; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (is_pswiotlb_active(dev)) { + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); + if (is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_sync_single_for_device(dev, nid, phys, size, dir, pool); + + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) + return; + } else { + if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) + return; + + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); + } + + if (is_swiotlb_buffer(dev, phys)) + swiotlb_sync_single_for_device(dev, phys, size, dir); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(phys, size, dir); +} + +void pswiotlb_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + int nid = dev->numa_node; + dma_addr_t start_orig; + phys_addr_t phys; + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + struct p_io_tlb_pool *pool; + + if (is_pswiotlb_active(dev)) { + start_orig = sg_dma_address(sgl); + for_each_sg(sgl, sg, nelems, i) { + if (dir != DMA_TO_DEVICE) { + unsigned int s_iova_off = iova_offset(iovad, sg->offset); + + if (i > 0) + start_orig += s_iova_off; + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), start_orig); + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, sg->length, dir); + + if (is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_sync_single_for_cpu(dev, nid, phys, + sg->length, dir, pool); + start_orig -= s_iova_off; + start_orig += iova_align(iovad, sg->length + s_iova_off); + } else { + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); + } + } + } else { + if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) + return; + + for_each_sg(sgl, sg, nelems, i) { + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); + + if (is_swiotlb_buffer(dev, sg_phys(sg))) + swiotlb_sync_single_for_cpu(dev, sg_phys(sg), + sg->length, dir); + } + } +} + +void pswiotlb_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + if (is_pswiotlb_active(dev)) { + for_each_sg(sgl, sg, nelems, i) { + if (is_pswiotlb_buffer(dev, nid, sg_phys(sg), &pool)) + pswiotlb_sync_single_for_device(dev, nid, sg_phys(sg), + sg->length, dir, pool); + if (dev_is_dma_coherent(dev) && !sg_dma_is_swiotlb(sgl)) + continue; + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); + } + } else { + if (dev_is_dma_coherent(dev) && !sg_dma_is_swiotlb(sgl)) + return; + + for_each_sg(sgl, sg, nelems, i) { + if (is_swiotlb_buffer(dev, sg_phys(sg))) + swiotlb_sync_single_for_device(dev, sg_phys(sg), + sg->length, dir); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); + } + } +} + +dma_addr_t pswiotlb_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + bool coherent = dev_is_dma_coherent(dev); + + int prot = dma_info_to_prot(dir, coherent, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t aligned_size = size; + dma_addr_t iova, dma_mask = dma_get_mask(dev); + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (dev_use_swiotlb(dev, size, dir) && + iova_offset(iovad, phys | size)) { + void *padding_start; + size_t padding_size; + + if (!is_swiotlb_active(dev)) { + dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); + return DMA_MAPPING_ERROR; + } + + aligned_size = iova_align(iovad, size); + phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, + iova_mask(iovad), dir, attrs); + + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + /* Cleanup the padding area. */ + padding_start = phys_to_virt(phys); + padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { + padding_start += size; + padding_size -= size; + } + + memset(padding_start, 0, padding_size); + } + + /* check whether dma addr is in local node */ + if (is_pswiotlb_active(dev)) { + if (dir != DMA_TO_DEVICE) { + if (unlikely(!dma_is_in_local_node(dev, nid, phys, aligned_size))) { + aligned_size = iova_align(iovad, size); + phys = pswiotlb_tbl_map_single(dev, nid, phys, size, + aligned_size, iova_mask(iovad), + dir, attrs); + if (phys == DMA_MAPPING_ERROR) { + phys = page_to_phys(page) + offset; + dev_warn_once(dev, + "Failed to allocate memory from pswiotlb, fall back to non-local dma\n"); + } + } + } + } + + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + + iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); + if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + if (iova == DMA_MAPPING_ERROR && is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_tbl_unmap_single(dev, nid, phys, 0, size, dir, attrs, pool); + return iova; +} + +void pswiotlb_iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + phys_addr_t phys; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + phys = iommu_iova_to_phys(domain, dma_handle); + if (WARN_ON(!phys)) + return; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + __iommu_dma_unmap(dev, dma_handle, size); + + if (unlikely(is_swiotlb_buffer(dev, phys))) + swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); + + if (is_pswiotlb_active(dev) && + is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_tbl_unmap_single(dev, nid, phys, 0, size, dir, attrs, pool); +} + +static void iommu_dma_unmap_page_sg(struct device *dev, dma_addr_t dma_handle, + size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + phys_addr_t phys; + int nid = dev->numa_node; + struct p_io_tlb_pool *pool; + + phys = iommu_iova_to_phys(domain, dma_handle); + + if (WARN_ON(!phys)) + return; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + if (is_pswiotlb_buffer(dev, nid, phys, &pool)) + pswiotlb_tbl_unmap_single(dev, nid, phys, offset, size, dir, attrs, pool); +} + +/* + * Prepare a successfully-mapped scatterlist to give back to the caller. + * + * At this point the segments are already laid out by pswiotlb_iommu_dma_map_sg() to + * avoid individually crossing any boundaries, so we merely need to check a + * segment's start address to avoid concatenating across one. + */ +static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, + dma_addr_t dma_addr) +{ + struct scatterlist *s, *cur = sg; + unsigned long seg_mask = dma_get_seg_boundary(dev); + unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); + int i, count = 0; + + for_each_sg(sg, s, nents, i) { + /* Restore this segment's original unaligned fields first */ + dma_addr_t s_dma_addr = sg_dma_address(s); + unsigned int s_iova_off = sg_dma_address(s); + unsigned int s_length = sg_dma_len(s); + unsigned int s_iova_len = s->length; + + sg_dma_address(s) = DMA_MAPPING_ERROR; + sg_dma_len(s) = 0; + + if (sg_dma_is_bus_address(s)) { + if (i > 0) + cur = sg_next(cur); + + sg_dma_unmark_bus_address(s); + sg_dma_address(cur) = s_dma_addr; + sg_dma_len(cur) = s_length; + sg_dma_mark_bus_address(cur); + count++; + cur_len = 0; + continue; + } + + s->offset += s_iova_off; + s->length = s_length; + + /* + * Now fill in the real DMA data. If... + * - there is a valid output segment to append to + * - and this segment starts on an IOVA page boundary + * - but doesn't fall at a segment boundary + * - and wouldn't make the resulting output segment too long + */ + if (cur_len && !s_iova_off && (dma_addr & seg_mask) && + (max_len - cur_len >= s_length)) { + /* ...then concatenate it with the previous one */ + cur_len += s_length; + } else { + /* Otherwise start the next output segment */ + if (i > 0) + cur = sg_next(cur); + cur_len = s_length; + count++; + + sg_dma_address(cur) = dma_addr + s_iova_off; + } + + sg_dma_len(cur) = cur_len; + dma_addr += s_iova_len; + + if (s_length + s_iova_off < s_iova_len) + cur_len = 0; + } + return count; +} + +/* + * If mapping failed, then just restore the original list, + * but making sure the DMA fields are invalidated. + */ +static void __invalidate_sg(struct scatterlist *sg, int nents) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + if (sg_dma_is_bus_address(s)) { + sg_dma_unmark_bus_address(s); + } else { + if (sg_dma_address(s) != DMA_MAPPING_ERROR) + s->offset += sg_dma_address(s); + if (sg_dma_len(s)) + s->length = sg_dma_len(s); + } + sg_dma_address(s) = DMA_MAPPING_ERROR; + sg_dma_len(s) = 0; + } +} + +static void iommu_dma_unmap_sg_pswiotlb_pagesize(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) + pswiotlb_iommu_dma_unmap_page(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); +} + +void iommu_dma_unmap_sg_pswiotlb(struct device *dev, struct scatterlist *sg, + unsigned long iova_start, size_t mapped, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + dma_addr_t start, start_orig; + struct scatterlist *s; + struct scatterlist *sg_orig = sg; + int i; + + start = iova_start; + start_orig = start; + for_each_sg(sg_orig, s, nents, i) { + if (!mapped || (start_orig > (start + mapped))) + break; + if (s->length == 0) + break; + iommu_dma_unmap_page_sg(dev, start_orig, 0, + s->length, dir, attrs); + start_orig += s->length; + } +} + +static int iommu_dma_map_sg_pswiotlb_pagesize(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *s; + int i; + + sg_dma_mark_swiotlb(sg); + + for_each_sg(sg, s, nents, i) { + sg_dma_address(s) = pswiotlb_iommu_dma_map_page(dev, sg_page(s), + s->offset, s->length, dir, attrs); + if (sg_dma_address(s) == DMA_MAPPING_ERROR) + goto out_unmap; + sg_dma_len(s) = s->length; + } + + return nents; + +out_unmap: + iommu_dma_unmap_sg_pswiotlb_pagesize(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return -EIO; +} + +/* + * The DMA API client is passing in a scatterlist which could describe + * any old buffer layout, but the IOMMU API requires everything to be + * aligned to IOMMU pages. Hence the need for this complicated bit of + * impedance-matching, to be able to hand off a suitably-aligned list, + * but still preserve the original offsets and sizes for the caller. + */ +int pswiotlb_iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + struct scatterlist *s, *prev = NULL; + int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); + struct pci_p2pdma_map_state p2pdma_state = {}; + enum pci_p2pdma_map_type map; + dma_addr_t iova; + size_t iova_len = 0; + unsigned long mask = dma_get_seg_boundary(dev); + ssize_t ret; + int i; + + if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { + ret = iommu_deferred_attach(dev, domain); + goto out; + } + + if (dir != DMA_TO_DEVICE && is_pswiotlb_active(dev) + && ((nents == 1) && (sg->length < PAGE_SIZE))) + return iommu_dma_map_sg_pswiotlb_pagesize(dev, sg, nents, dir, attrs); + + if ((dir == DMA_TO_DEVICE) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + pswiotlb_iommu_dma_sync_sg_for_device(dev, sg, nents, dir); + + /* + * Work out how much IOVA space we need, and align the segments to + * IOVA granules for the IOMMU driver to handle. With some clever + * trickery we can modify the list in-place, but reversibly, by + * stashing the unaligned parts in the as-yet-unused DMA fields. + */ + for_each_sg(sg, s, nents, i) { + size_t s_iova_off = iova_offset(iovad, s->offset); + size_t s_length = s->length; + size_t pad_len = (mask - iova_len + 1) & mask; + + if (is_pci_p2pdma_page(sg_page(s))) { + map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); + switch (map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + /* + * iommu_map_sg() will skip this segment as + * it is marked as a bus address, + * __finalise_sg() will copy the dma address + * into the output segment. + */ + continue; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + /* + * Mapping through host bridge should be + * mapped with regular IOVAs, thus we + * do nothing here and continue below. + */ + break; + default: + ret = -EREMOTEIO; + goto out_restore_sg; + } + } + + sg_dma_address(s) = s_iova_off; + sg_dma_len(s) = s_length; + s->offset -= s_iova_off; + s_length = iova_align(iovad, s_length + s_iova_off); + s->length = s_length; + + /* + * Due to the alignment of our single IOVA allocation, we can + * depend on these assumptions about the segment boundary mask: + * - If mask size >= IOVA size, then the IOVA range cannot + * possibly fall across a boundary, so we don't care. + * - If mask size < IOVA size, then the IOVA range must start + * exactly on a boundary, therefore we can lay things out + * based purely on segment lengths without needing to know + * the actual addresses beforehand. + * - The mask must be a power of 2, so pad_len == 0 if + * iova_len == 0, thus we cannot dereference prev the first + * time through here (i.e. before it has a meaningful value). + */ + if (pad_len && pad_len < s_length - 1) { + prev->length += pad_len; + iova_len += pad_len; + } + + iova_len += s_length; + prev = s; + } + + if (!iova_len) + return __finalise_sg(dev, sg, nents, 0); + + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); + if (!iova) { + ret = -ENOMEM; + goto out_restore_sg; + } + + /* + * We'll leave any physical concatenation to the IOMMU driver's + * implementation - it knows better than we do. + */ + if (dir != DMA_TO_DEVICE && is_pswiotlb_active(dev)) + ret = pswiotlb_iommu_map_sg_atomic_dma(dev, domain, + iova, sg, nents, prot, dir, attrs); + else + ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); + + if (ret < 0 || ret < iova_len) + goto out_free_iova; + + return __finalise_sg(dev, sg, nents, iova); + +out_free_iova: + iommu_dma_free_iova(cookie, iova, iova_len, NULL); +out_restore_sg: + __invalidate_sg(sg, nents); +out: + if (ret != -ENOMEM && ret != -EREMOTEIO) + return -EINVAL; + return ret; +} + +void pswiotlb_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + dma_addr_t start, end = 0, start_orig; + struct scatterlist *tmp, *s; + struct scatterlist *sg_orig = sg; + int i; + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + + if ((dir != DMA_TO_DEVICE) && ((nents == 1) && (sg->length < PAGE_SIZE))) { + iommu_dma_unmap_sg_pswiotlb_pagesize(dev, sg, nents, dir, attrs); + return; + } + + if ((dir == DMA_TO_DEVICE) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + pswiotlb_iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); + + /* + * The scatterlist segments are mapped into a single + * contiguous IOVA allocation, the start and end points + * just have to be determined. + */ + for_each_sg(sg, tmp, nents, i) { + if (sg_dma_is_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + + if (sg_dma_len(tmp) == 0) + break; + + start = sg_dma_address(tmp); + break; + } + + if (is_pswiotlb_active(dev)) { + /* check whether dma addr is in local node */ + start_orig = start; + if (dir != DMA_TO_DEVICE) { + for_each_sg(sg_orig, s, nents, i) { + unsigned int s_iova_off = iova_offset(iovad, s->offset); + + if (i > 0) + start_orig += s_iova_off; + iommu_dma_unmap_page_sg(dev, start_orig, + s_iova_off, s->length, + dir, attrs); + start_orig -= s_iova_off; + start_orig += iova_align(iovad, s->length + s_iova_off); + } + } + } + + nents -= i; + for_each_sg(tmp, tmp, nents, i) { + if (sg_dma_is_bus_address(tmp)) { + sg_dma_unmark_bus_address(tmp); + continue; + } + + if (sg_dma_len(tmp) == 0) + break; + + end = sg_dma_address(tmp) + sg_dma_len(tmp); + } + + if (end) + __iommu_dma_unmap(dev, start, end - start); +} diff --git a/kernel/dma/phytium/pswiotlb-mapping.c b/kernel/dma/phytium/pswiotlb-mapping.c new file mode 100644 index 0000000000000000000000000000000000000000..65674b7bdeab7e3fcf53517ef86d08c164858492 --- /dev/null +++ b/kernel/dma/phytium/pswiotlb-mapping.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Auxiliary DMA operations used by arch-independent dma-mapping + * routines when Phytium software IO tlb is required. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ +#include /* for max_pfn */ +#include +#include +#include +#include +#include +#include +#include +#include "../debug.h" +#include "../direct.h" +#include "pswiotlb-dma.h" + +/* + * The following functions are ported from + * ./drivers/dma/mapping.c + * static bool dma_go_direct(struct device *dev, dma_addr_t mask, + * const struct dma_map_ops *ops); + * static inline bool dma_map_direct(struct device *dev, + * const struct dma_map_ops *ops); + */ + +static bool dma_go_direct(struct device *dev, dma_addr_t mask, + const struct dma_map_ops *ops) +{ + if (likely(!ops)) + return true; +#ifdef CONFIG_DMA_OPS_BYPASS + if (dev->dma_ops_bypass) + return min_not_zero(mask, dev->bus_dma_limit) >= + dma_direct_get_required_mask(dev); +#endif + return false; +} + +static inline bool dma_map_direct(struct device *dev, + const struct dma_map_ops *ops) +{ + return dma_go_direct(dev, *dev->dma_mask, ops); +} +dma_addr_t pswiotlb_dma_map_page_distribute(struct device *dev, struct page *page, + size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + if (dma_map_direct(dev, ops) || + arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) + addr = pswiotlb_dma_direct_map_page(dev, page, offset, size, dir, attrs); + else + addr = pswiotlb_iommu_dma_map_page(dev, page, offset, size, dir, attrs); + debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); + + return addr; +} + +void pswiotlb_dma_unmap_page_attrs_distribute(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops) || + arch_dma_unmap_page_direct(dev, addr + size)) + pswiotlb_dma_direct_unmap_page(dev, addr, size, dir, attrs); + else if (ops->unmap_page) + pswiotlb_iommu_dma_unmap_page(dev, addr, size, dir, attrs); + debug_dma_unmap_page(dev, addr, size, dir); +} + +int pswiotlb_dma_map_sg_attrs_distribute(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + int ents; + + if (dma_map_direct(dev, ops) || + arch_dma_map_sg_direct(dev, sg, nents)) + ents = pswiotlb_dma_direct_map_sg(dev, sg, nents, dir, attrs); + else + ents = pswiotlb_iommu_dma_map_sg(dev, sg, nents, dir, attrs); + + if (ents > 0) + debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); + else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && + ents != -EIO)) + return -EIO; + + return ents; +} + +void pswiotlb_dma_unmap_sg_attrs_distribute(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops) || + arch_dma_unmap_sg_direct(dev, sg, nents)) + pswiotlb_dma_direct_unmap_sg(dev, sg, nents, dir, attrs); + else if (ops->unmap_sg) + pswiotlb_iommu_dma_unmap_sg(dev, sg, nents, dir, attrs); +} + +void pswiotlb_dma_sync_single_for_cpu_distribute(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops)) + pswiotlb_dma_direct_sync_single_for_cpu(dev, addr, size, dir); + else if (ops->sync_single_for_cpu) + pswiotlb_iommu_dma_sync_single_for_cpu(dev, addr, size, dir); + debug_dma_sync_single_for_cpu(dev, addr, size, dir); +} + +void pswiotlb_dma_sync_single_for_device_distribute(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops)) + pswiotlb_dma_direct_sync_single_for_device(dev, addr, size, dir); + else if (ops->sync_single_for_device) + pswiotlb_iommu_dma_sync_single_for_device(dev, addr, size, dir); + debug_dma_sync_single_for_device(dev, addr, size, dir); +} + +void pswiotlb_dma_sync_sg_for_cpu_distribute(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops)) + pswiotlb_dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); + else if (ops->sync_sg_for_cpu) + pswiotlb_iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir); + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} + +void pswiotlb_dma_sync_sg_for_device_distribute(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_map_direct(dev, ops)) + pswiotlb_dma_direct_sync_sg_for_device(dev, sg, nelems, dir); + else if (ops->sync_sg_for_device) + pswiotlb_iommu_dma_sync_sg_for_device(dev, sg, nelems, dir); + debug_dma_sync_sg_for_device(dev, sg, nelems, dir); +} diff --git a/kernel/dma/phytium/pswiotlb.c b/kernel/dma/phytium/pswiotlb.c new file mode 100644 index 0000000000000000000000000000000000000000..1cd2d1a1d20a2df2878f1a3a197c6f9152d0ef79 --- /dev/null +++ b/kernel/dma/phytium/pswiotlb.c @@ -0,0 +1,1752 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium software IO tlb to improve DMA performance. + * + * Copyright (c) 2024, Phytium Technology Co., Ltd. + */ + +#define pr_fmt(fmt) "Phytium software IO TLB: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#endif +#ifdef CONFIG_DMA_RESTRICTED_POOL +#include +#include +#include +#include +#include +#endif + +#include + +#define CREATE_TRACE_POINTS +#include + +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - P_IO_TLB_SHIFT)) + +/* + * Minimum Phytium IO TLB size to bother booting with. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define P_IO_TLB_MIN_SLABS ((1<<20) >> P_IO_TLB_SHIFT) +#define PSWIOTLB_VERSION "1.0.0" +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) + +int pswiotlb_node_num; +bool pswiotlb_mtimer_alive; + +/** + * struct p_io_tlb_slot - Phytium IO TLB slot descriptor + * @orig_addr: The original address corresponding to a mapped entry. + * @alloc_size: Size of the allocated buffer. + * @list: The free list describing the number of free entries available + * from each index. + */ +struct p_io_tlb_slot { + phys_addr_t orig_addr; + size_t alloc_size; + unsigned int list; +}; + +bool pswiotlb_force_disable; + +static struct page *alloc_dma_pages(int nid, gfp_t gfp, size_t bytes); + +struct p_io_tlb_mem p_io_tlb_default_mem[MAX_NUMNODES]; +static struct timer_list service_timer; + +static unsigned long default_npslabs = P_IO_TLB_DEFAULT_SIZE >> P_IO_TLB_SHIFT; +static unsigned long dynamic_inc_thr_npslabs = P_IO_TLB_INC_THR >> P_IO_TLB_SHIFT; +static unsigned long default_npareas; + +LIST_HEAD(passthroughlist); +static spinlock_t passthroughlist_lock; +static struct pswiotlb_passthroughlist passthroughlist_entry[1024]; +static struct dentry *passthroughlist_debugfs; +static struct dentry *pswiotlb_debugfs; +/** + * struct p_io_tlb_area - Phytium IO TLB memory area descriptor + * + * This is a single area with a single lock. + * + * @used: The number of used Phytium IO TLB block. + * @index: The slot index to start searching in this area for next round. + * @lock: The lock to protect the above data structures in the map and + * unmap calls. + */ +struct p_io_tlb_area { + unsigned long used; + unsigned int index; + spinlock_t lock; +}; + +static struct pswiotlb_passthroughlist_entry { + unsigned short vendor; + unsigned short device; +} ps_passthroughlist[] = { + {BL_PCI_VENDOR_ID_NVIDIA, 0xFFFF}, + {BL_PCI_VENDOR_ID_ILUVATAR, 0xFFFF}, + {BL_PCI_VENDOR_ID_METAX, 0xFFFF}, + {} +}; + +/* + * Round up number of slabs to the next power of 2. The last area is going + * be smaller than the rest if default_npslabs is not power of two. + * The number of slot in an area should be a multiple of P_IO_TLB_SEGSIZE, + * otherwise a segment may span two or more areas. It conflicts with free + * contiguous slots tracking: free slots are treated contiguous no matter + * whether they cross an area boundary. + * + * Return true if default_npslabs is rounded up. + */ +static bool round_up_default_npslabs(void) +{ + if (!default_npareas) + return false; + + if (default_npslabs < P_IO_TLB_SEGSIZE * default_npareas) + default_npslabs = P_IO_TLB_SEGSIZE * default_npareas; + else if (is_power_of_2(default_npslabs)) + return false; + default_npslabs = roundup_pow_of_two(default_npslabs); + return true; +} + +/** + * pswiotlb_adjust_nareas() - adjust the number of areas and slots + * @nareas: Desired number of areas. Zero is treated as 1. + * + * Adjust the default number of areas in a memory pool. + * The default size of the memory pool may also change to meet minimum area + * size requirements. + */ +static void pswiotlb_adjust_nareas(unsigned int nareas) +{ + if (!nareas) + nareas = 1; + else if (!is_power_of_2(nareas)) + nareas = roundup_pow_of_two(nareas); + + default_npareas = nareas; + + pr_info("area num %d.\n", nareas); + if (round_up_default_npslabs()) + pr_info("PSWIOTLB bounce buffer size roundup to %luMB", + (default_npslabs << P_IO_TLB_SHIFT) >> 20); +} + +/** + * limit_nareas() - get the maximum number of areas for a given memory pool size + * @nareas: Desired number of areas. + * @nslots: Total number of slots in the memory pool. + * + * Limit the number of areas to the maximum possible number of areas in + * a memory pool of the given size. + * + * Return: Maximum possible number of areas. + */ +static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots) +{ + if (nslots < nareas * P_IO_TLB_SEGSIZE) + return nslots / P_IO_TLB_SEGSIZE; + return nareas; +} + +static int __init +setup_p_io_tlb_npages(char *str) +{ + if (isdigit(*str)) { + /* avoid tail segment of size < P_IO_TLB_SEGSIZE */ + default_npslabs = + ALIGN(simple_strtoul(str, &str, 0), P_IO_TLB_SEGSIZE); + } + if (*str == ',') + ++str; + if (isdigit(*str)) + pswiotlb_adjust_nareas(simple_strtoul(str, &str, 0)); + if (*str == ',') + ++str; + if (!strcmp(str, "forceoff")) + pswiotlb_force_disable = true; + + return 0; +} +early_param("pswiotlb", setup_p_io_tlb_npages); + +static int __init +setup_pswiotlb_passthroughlist(char *str) +{ + char tmp_str[5] = {'\0'}; + unsigned long flags; + int i, j, k; + int ret; + + for (i = 0, j = 0, k = 0; i < strlen(str) + 1; i++) { + if (*(str + i) != ',' && *(str + i) != '\0') { + tmp_str[j++] = *(str + i); + } else { + j = 0; + + ret = kstrtou16(tmp_str, 16, &passthroughlist_entry[k].vendor); + if (ret) + return ret; + + passthroughlist_entry[k].from_grub = true; + + spin_lock_irqsave(&passthroughlist_lock, flags); + list_add_rcu(&passthroughlist_entry[k].node, &passthroughlist); + spin_unlock_irqrestore(&passthroughlist_lock, flags); + + k++; + } + } + + return 0; +} +early_param("pswiotlb_passthroughlist", setup_pswiotlb_passthroughlist); + +unsigned long pswiotlb_size_or_default(void) +{ + return default_npslabs << P_IO_TLB_SHIFT; +} + +void __init pswiotlb_adjust_size(unsigned long size) +{ + if (default_npslabs != P_IO_TLB_DEFAULT_SIZE >> P_IO_TLB_SHIFT) + return; + size = ALIGN(size, P_IO_TLB_SIZE); + default_npslabs = ALIGN(size >> P_IO_TLB_SHIFT, P_IO_TLB_SEGSIZE); + if (round_up_default_npslabs()) + size = default_npslabs << P_IO_TLB_SHIFT; + pr_info("PSWIOTLB bounce buffer size adjusted to %luMB", size >> 20); +} + +void pswiotlb_print_info(int nid) +{ + struct p_io_tlb_pool *mem = &p_io_tlb_default_mem[nid].defpool; + + if (!mem->nslabs) { + pr_warn("No local mem of numa node %d\n", nid); + return; + } + + pr_info("numa %d mapped [mem %pa-%pa] (%luMB)\n", nid, &mem->start, &mem->end, + (mem->nslabs << P_IO_TLB_SHIFT) >> 20); +} + +static inline unsigned long io_tlb_offset(unsigned long val) +{ + return val & (P_IO_TLB_SEGSIZE - 1); +} + +static inline unsigned long nr_slots(u64 val) +{ + return DIV_ROUND_UP(val, P_IO_TLB_SIZE); +} + +static void pswiotlb_record_mem_range(struct p_io_tlb_mem *mem) +{ + unsigned long start_pfn, end_pfn; + unsigned long min_pfn = (~(phys_addr_t)0 >> PAGE_SHIFT), max_pfn = 0; + int i, nid; + unsigned long total_pfn = 0; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { + pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, + (u64)start_pfn << PAGE_SHIFT, + ((u64)end_pfn << PAGE_SHIFT) - 1); + if (nid == mem->numa_node_id) { + if (min_pfn > start_pfn) + min_pfn = start_pfn; + if (max_pfn < end_pfn) + max_pfn = end_pfn; + total_pfn += end_pfn - start_pfn + 1; + } + } + + mem->node_min_addr = (u64)min_pfn << PAGE_SHIFT; + mem->node_max_addr = ((u64)max_pfn << PAGE_SHIFT) - 1; + mem->node_total_mem = (u64)total_pfn << PAGE_SHIFT; +} + +static void pswiotlb_init_io_tlb_pool(struct p_io_tlb_pool *mem, int nid, phys_addr_t start, + unsigned long npslabs, bool late_alloc, unsigned int nareas) +{ + void *vaddr = phys_to_virt(start); + unsigned long bytes = npslabs << P_IO_TLB_SHIFT, i; + + mem->nslabs = npslabs; + mem->start = start; + mem->end = mem->start + bytes; + mem->late_alloc = late_alloc; + mem->numa_node_id = nid; + mem->nareas = nareas; + mem->area_nslabs = npslabs / mem->nareas; + mem->free_th = PSWIOTLB_FREE_THRESHOLD; + + for (i = 0; i < mem->nareas; i++) { + spin_lock_init(&mem->areas[i].lock); + mem->areas[i].index = 0; + mem->areas[i].used = 0; + } + + for (i = 0; i < mem->nslabs; i++) { + mem->slots[i].list = P_IO_TLB_SEGSIZE - io_tlb_offset(i); + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; + } + memset(vaddr, 0, bytes); + mem->vaddr = vaddr; +} + +/** + * add_mem_pool() - add a memory pool to the allocator + * @mem: Phytium software IO TLB allocator. + * @pool: Memory pool to be added. + */ +static void add_mem_pool(struct p_io_tlb_mem *mem, struct p_io_tlb_pool *pool) +{ + unsigned long flags; + + spin_lock_irqsave(&mem->lock, flags); + if (mem->capacity != mem->whole_size) { + mem->pool_addr[mem->whole_size] = mem->pool_addr[mem->capacity]; + mem->pool_addr[mem->capacity] = pool; + } else { + mem->pool_addr[mem->capacity] = pool; + } + /* prevent any other writes prior to this time */ + smp_wmb(); + mem->capacity++; + mem->whole_size++; + mem->nslabs += pool->nslabs; + spin_unlock_irqrestore(&mem->lock, flags); +} + +static void __init *pswiotlb_memblock_alloc(unsigned long npslabs, + int nid, unsigned int flags, + int (*remap)(void *tlb, unsigned long npslabs)) +{ + size_t bytes = PAGE_ALIGN(npslabs << P_IO_TLB_SHIFT); + void *tlb; + + tlb = memblock_alloc_node(bytes, PAGE_SIZE, nid); + + if (!tlb) { + pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", + __func__, bytes); + return NULL; + } + + if (remap && remap(tlb, npslabs) < 0) { + memblock_free(tlb, PAGE_ALIGN(bytes)); + pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); + return NULL; + } + + return tlb; +} + +static void check_if_pswiotlb_in_local_node(struct p_io_tlb_mem *mem, + struct p_io_tlb_pool *pool) +{ + if ((pool->start < mem->node_min_addr) || + pool->end > mem->node_max_addr) { + mem->nslabs = 0; + pool->nslabs = 0; + } +} + +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the Phytium software IO TLB used to implement the DMA API. + */ +void __init pswiotlb_init_remap(bool addressing_limit, int nid, unsigned int flags, + int (*remap)(void *tlb, unsigned long npslabs)) +{ + struct p_io_tlb_pool *mem = &p_io_tlb_default_mem[nid].defpool; + unsigned long npslabs; + unsigned int nareas; + size_t alloc_size; + void *tlb; + + if (!addressing_limit) + return; + if (pswiotlb_force_disable) + return; + + if (!remap) + p_io_tlb_default_mem[nid].can_grow = true; + p_io_tlb_default_mem[nid].phys_limit = virt_to_phys(high_memory - 1); + + if (!default_npareas) + pswiotlb_adjust_nareas(num_possible_cpus()); + + npslabs = default_npslabs; + nareas = limit_nareas(default_npareas, npslabs); + while ((tlb = pswiotlb_memblock_alloc(npslabs, nid, flags, remap)) == NULL) { + if (npslabs <= P_IO_TLB_MIN_SLABS) + return; + npslabs = ALIGN(npslabs >> 1, P_IO_TLB_SEGSIZE); + nareas = limit_nareas(nareas, npslabs); + } + + if (default_npslabs != npslabs) { + pr_info("PSWIOTLB bounce buffer size adjusted %lu -> %lu slabs", + default_npslabs, npslabs); + default_npslabs = npslabs; + } + + alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), npslabs)); + mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); + if (!mem->slots) { + pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); + return; + } + + mem->areas = memblock_alloc(array_size(sizeof(struct p_io_tlb_area), + nareas), SMP_CACHE_BYTES); + if (!mem->areas) { + pr_warn("%s: Failed to allocate mem->areas.\n", __func__); + return; + } + + pswiotlb_init_io_tlb_pool(mem, nid, __pa(tlb), npslabs, false, nareas); + add_mem_pool(&p_io_tlb_default_mem[nid], mem); + check_if_pswiotlb_in_local_node(&p_io_tlb_default_mem[nid], mem); + + if (flags & PSWIOTLB_VERBOSE) + pswiotlb_print_info(nid); +} +/** + * pswiotlb_free_tlb() - free a dynamically allocated Phytium IO TLB buffer + * @vaddr: Virtual address of the buffer. + * @bytes: Size of the buffer. + */ +static void pswiotlb_free_tlb(void *vaddr, size_t bytes) +{ + if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && + dma_free_from_pool(NULL, vaddr, bytes)) + return; + + /* Intentional leak if pages cannot be encrypted again. */ + if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes))) + __free_pages(virt_to_page(vaddr), get_order(bytes)); +} +/** + * pswiotlb_alloc_tlb() - allocate a dynamic Phytium IO TLB buffer + * @dev: Device for which a memory pool is allocated. + * @bytes: Size of the buffer. + * @phys_limit: Maximum allowed physical address of the buffer. + * @gfp: GFP flags for the allocation. + * + * Return: Allocated pages, or %NULL on allocation failure. + */ +static struct page *pswiotlb_alloc_tlb(struct device *dev, int nid, size_t bytes, + u64 phys_limit, gfp_t gfp) +{ + struct page *page; + + /* + * Allocate from the atomic pools if memory is encrypted and + * the allocation is atomic, because decrypting may block. + */ + if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) { + void *vaddr; + + if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)) + return NULL; + + return dma_alloc_from_pool(dev, bytes, &vaddr, gfp, + pswiotlb_dma_coherent_ok); + } + + gfp &= ~GFP_ZONEMASK; + if (phys_limit <= DMA_BIT_MASK(zone_dma_bits)) + gfp |= __GFP_DMA; + else if (phys_limit <= DMA_BIT_MASK(32)) + gfp |= __GFP_DMA32; + + while ((page = alloc_dma_pages(nid, gfp, bytes)) && + page_to_phys(page) + bytes - 1 > phys_limit) { + /* allocated, but too high */ + __free_pages(page, get_order(bytes)); + + if (IS_ENABLED(CONFIG_ZONE_DMA32) && + phys_limit < DMA_BIT_MASK(64) && + !(gfp & (__GFP_DMA32 | __GFP_DMA))) + gfp |= __GFP_DMA32; + else if (IS_ENABLED(CONFIG_ZONE_DMA) && + !(gfp & __GFP_DMA)) + gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA; + else + return NULL; + } + + return page; +} +/** + * pswiotlb_alloc_pool() - allocate a new Phytium IO TLB memory pool + * @dev: Device for which a memory pool is allocated. + * @minslabs: Minimum number of slabs. + * @nslabs: Desired (maximum) number of slabs. + * @nareas: Number of areas. + * @phys_limit: Maximum DMA buffer physical address. + * @gfp: GFP flags for the allocations. + * + * Allocate and initialize a new Phytium IO TLB memory pool. The actual number of + * slabs may be reduced if allocation of @nslabs fails. If even + * @minslabs cannot be allocated, this function fails. + * + * Return: New memory pool, or %NULL on allocation failure. + */ +static struct p_io_tlb_pool *pswiotlb_alloc_pool(struct device *dev, + int nid, unsigned long minslabs, unsigned long nslabs, + unsigned int nareas, u64 phys_limit, bool transient, gfp_t gfp) +{ + struct p_io_tlb_pool *pool; + unsigned int slot_order; + struct page *tlb; + size_t pool_size; + size_t tlb_size; + + if (nslabs > SLABS_PER_PAGE << MAX_ORDER) { + nslabs = SLABS_PER_PAGE << MAX_ORDER; + nareas = limit_nareas(nareas, nslabs); + } + + pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); + pool = kzalloc(pool_size, gfp); + if (!pool) + goto error; + pool->areas = (void *)pool + sizeof(*pool); + + if (!transient) { + nslabs = ALIGN(nslabs >> 1, P_IO_TLB_SEGSIZE); + nareas = limit_nareas(nareas, nslabs); + } + tlb_size = nslabs << P_IO_TLB_SHIFT; + while (!(tlb = pswiotlb_alloc_tlb(dev, nid, tlb_size, phys_limit, gfp))) { + if (nslabs <= minslabs) + goto error_tlb; + nslabs = ALIGN(nslabs >> 1, P_IO_TLB_SEGSIZE); + nareas = limit_nareas(nareas, nslabs); + tlb_size = nslabs << P_IO_TLB_SHIFT; + } + if (page_to_nid(tlb) != nid) + goto error_slots; + + slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); + pool->slots = (struct p_io_tlb_slot *) + __get_free_pages(gfp, slot_order); + if (!pool->slots) + goto error_slots; + + pswiotlb_init_io_tlb_pool(pool, nid, page_to_phys(tlb), nslabs, true, nareas); + return pool; + +error_slots: + pswiotlb_free_tlb(page_address(tlb), tlb_size); +error_tlb: + kfree(pool); +error: + return NULL; +} +static void pswiotlb_prepare_release_pool(struct p_io_tlb_mem *mem, + struct p_io_tlb_pool *pool, int pool_idx) +{ + int capacity; + unsigned long flags; + + spin_lock_irqsave(&mem->lock, flags); + capacity = mem->capacity; + mem->pool_addr[pool_idx] = mem->pool_addr[capacity - 1]; + mem->pool_addr[capacity - 1] = pool; + mem->capacity--; + mem->nslabs -= pool->nslabs; + spin_unlock_irqrestore(&mem->lock, flags); +} +static void pswiotlb_release_pool(struct p_io_tlb_mem *mem, + struct p_io_tlb_pool *pool, int pool_idx) +{ + unsigned int bytes = pool->nslabs * P_IO_TLB_SIZE; + unsigned int order = get_order(bytes); + struct page *page_start; + size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); + int pool_idx1; + unsigned long flags; + + spin_lock_irqsave(&mem->lock, flags); + pool_idx1 = mem->whole_size - 1; + mem->pool_addr[pool_idx] = mem->pool_addr[pool_idx1]; + mem->whole_size--; + spin_unlock_irqrestore(&mem->lock, flags); + + bitmap_free(pool->busy_record); + free_pages((unsigned long)pool->slots, get_order(slots_size)); + page_start = pfn_to_page(PFN_DOWN(pool->start)); + __free_pages(page_start, order); + kfree(pool); +} +static void pswiotlb_monitor_service(struct timer_list *timer) +{ + int i, j, pool_idx; + struct p_io_tlb_pool *pool; + struct p_io_tlb_mem *mem; + int capacity, whole_size; + + for (i = 0; i < pswiotlb_node_num; i++) { + mem = &p_io_tlb_default_mem[i]; + whole_size = mem->whole_size; + capacity = mem->capacity; + rcu_read_lock(); + for (pool_idx = 1; pool_idx < whole_size; pool_idx++) { + pool = mem->pool_addr[pool_idx]; + for (j = 0; j < DIV_ROUND_UP(pool->nareas, BITS_PER_LONG); j++) { + if (*(pool->busy_record + j) != 0) { + pool->busy_flag = true; + break; + } + pool->busy_flag = false; + } + if (!pool->busy_flag) + pool->free_cnt++; + else + pool->free_cnt = 0; + if (pool->free_cnt >= pool->free_th && pool_idx < capacity) { + pswiotlb_prepare_release_pool(mem, pool, pool_idx); + capacity--; + } + if (pool->free_cnt >= 2 * pool->free_th && !pool->busy_flag) { + pswiotlb_release_pool(mem, pool, pool_idx); + whole_size--; + } + } + rcu_read_unlock(); + } + + mod_timer(timer, jiffies + 2 * HZ); +} +static struct p_io_tlb_pool *pswiotlb_formal_alloc(struct device *dev, + struct p_io_tlb_mem *mem) +{ + struct p_io_tlb_pool *pool; + + pool = pswiotlb_alloc_pool(dev, mem->numa_node_id, + P_IO_TLB_MIN_SLABS, dynamic_inc_thr_npslabs, + dynamic_inc_thr_npslabs, mem->phys_limit, + 0, GFP_ATOMIC | GFP_NOWAIT | __GFP_NOWARN); + if (!pool) { + pr_warn_once("Failed to allocate new formal pool"); + return NULL; + } + + pool->busy_record = bitmap_zalloc(pool->nareas, GFP_ATOMIC); + if (!pool->busy_record) { + pr_warn_ratelimited("%s: Failed to allocate pool busy record.\n", __func__); + return NULL; + } + + add_mem_pool(mem, pool); + + return pool; +} + +/** + * pswiotlb_dyn_free() - RCU callback to free a memory pool + * @rcu: RCU head in the corresponding struct p_io_tlb_pool. + */ +static void pswiotlb_dyn_free(struct rcu_head *rcu) +{ + struct p_io_tlb_pool *pool = container_of(rcu, struct p_io_tlb_pool, rcu); + size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); + size_t tlb_size = pool->end - pool->start; + + free_pages((unsigned long)pool->slots, get_order(slots_size)); + pswiotlb_free_tlb(pool->vaddr, tlb_size); + kfree(pool); +} +static void pswiotlb_init_tlb_mem_dynamic(struct p_io_tlb_mem *mem, int nid) +{ + spin_lock_init(&mem->lock); + mem->capacity = 0; + mem->whole_size = 0; + mem->numa_node_id = nid; +} + +bool pswiotlb_is_dev_in_passthroughlist(struct pci_dev *dev) +{ + struct pswiotlb_passthroughlist *bl_entry; + + rcu_read_lock(); + list_for_each_entry_rcu(bl_entry, &passthroughlist, node) { + if (bl_entry->vendor == dev->vendor) { + rcu_read_unlock(); + goto out; + } + } + rcu_read_unlock(); + + return true; +out: + return false; +} + +static void pswiotlb_show_passthroughlist(void) +{ + struct pswiotlb_passthroughlist *bl_entry; + + pr_info("The following vendors devices belong to are incompatible with pswiotlb temporarily:\n"); + rcu_read_lock(); + list_for_each_entry_rcu(bl_entry, &passthroughlist, node) + printk(KERN_CONT "0x%-06x", bl_entry->vendor); + rcu_read_unlock(); +} +static void __init pswiotlb_passthroughlist_init(void) +{ + int dev_num = 0; + int i; + size_t alloc_size; + struct pswiotlb_passthroughlist *passthroughlist_array; + + spin_lock_init(&passthroughlist_lock); + + for (i = 0; ps_passthroughlist[i].vendor != 0; i++) + dev_num++; + + alloc_size = PAGE_ALIGN(array_size(sizeof(struct pswiotlb_passthroughlist), dev_num)); + passthroughlist_array = memblock_alloc(alloc_size, PAGE_SIZE); + if (!passthroughlist_array) { + pr_warn("%s: Failed to allocate memory for passthroughlist\n", + __func__); + return; + } + + for (i = 0; i < dev_num; i++) { + passthroughlist_array[i].vendor = ps_passthroughlist[i].vendor; + passthroughlist_array[i].device = ps_passthroughlist[i].device; + + spin_lock(&passthroughlist_lock); + list_add_rcu(&passthroughlist_array[i].node, &passthroughlist); + spin_unlock(&passthroughlist_lock); + } + + pswiotlb_show_passthroughlist(); +} + +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the software IO TLB used to implement the DMA API. + */ +void __init pswiotlb_init(bool addressing_limit, unsigned int flags) +{ + int i; + int nid; + unsigned long start_pfn, end_pfn; + + /* Get number of numa node*/ + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid); + pswiotlb_node_num = nid + 1; + pr_info("Total number of numa nodes is %d\n", pswiotlb_node_num); + for (i = 0; i < pswiotlb_node_num; i++) { + struct p_io_tlb_mem *mem = &p_io_tlb_default_mem[i]; + + pswiotlb_init_tlb_mem_dynamic(mem, i); + pswiotlb_record_mem_range(mem); + pr_info(" node %3d memory range: [%#018Lx-%#018Lx], total memory: %ldMB\n", + i, mem->node_min_addr, mem->node_max_addr, + mem->node_total_mem >> 20); + } + /* Get P TLB memory according to numa node id */ + for (i = 0; i < pswiotlb_node_num; i++) + pswiotlb_init_remap(addressing_limit, i, flags, NULL); + + pswiotlb_passthroughlist_init(); +} + +/** + * alloc_dma_pages() - allocate pages to be used for DMA + * @gfp: GFP flags for the allocation. + * @bytes: Size of the buffer. + * + * Allocate pages from the buddy allocator. If successful, make the allocated + * pages decrypted that they can be used for DMA. + * + * Return: Decrypted pages, or %NULL on failure. + */ +static struct page *alloc_dma_pages(int nid, gfp_t gfp, size_t bytes) +{ + unsigned int order = get_order(bytes); + struct page *page; + void *vaddr; + + page = alloc_pages_node(nid, gfp, order); + if (!page) + return NULL; + + vaddr = page_address(page); + if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes))) + goto error; + return page; + +error: + __free_pages(page, order); + return NULL; +} + +/** + * pswiotlb_find_pool() - find the Phytium IO TLB pool for a physical address + * @dev: Device which has mapped the DMA buffer. + * @paddr: Physical address within the DMA buffer. + * + * Find the Phytium IO TLB memory pool descriptor which contains the given physical + * address, if any. + * + * Return: Memory pool which contains @paddr, or %NULL if none. + */ +struct p_io_tlb_pool *pswiotlb_find_pool(struct device *dev, int nid, phys_addr_t paddr) +{ + struct p_io_tlb_mem *mem = &dev->dma_p_io_tlb_mem[nid]; + struct p_io_tlb_pool *pool; + int i; + int whole_size; + + /* prevent any other reads prior to this time */ + smp_rmb(); + whole_size = mem->whole_size; + rcu_read_lock(); + for (i = 0; i < whole_size; i++) { + pool = mem->pool_addr[i]; + if (paddr >= pool->start && paddr < pool->end) + goto out; + } + + pool = NULL; +out: + rcu_read_unlock(); + return pool; +} + +/** + * pswiotlb_dev_init() - initialize pswiotlb fields in &struct device + * @dev: Device to be initialized. + */ +void pswiotlb_dev_init(struct device *dev) +{ + dev->dma_uses_p_io_tlb = false; +} + +void pswiotlb_store_local_node(struct pci_dev *dev, struct pci_bus *bus) +{ + int nid; + struct p_io_tlb_pool *defpool; + struct p_io_tlb_mem *mem; + + dev->dev.local_node = pcibus_to_node(bus); + /* register pswiotlb resources */ + dev->dev.dma_p_io_tlb_mem = p_io_tlb_default_mem; + nid = dev->dev.local_node; + defpool = &dev->dev.dma_p_io_tlb_mem[nid].defpool; + mem = &dev->dev.dma_p_io_tlb_mem[nid]; + pci_info(dev, "numa node: %d, pswiotlb defpool range: [%#018Lx-%#018Lx]\n" + "local node range: [%#018Lx-%#018Lx]\n", nid, + defpool->start, defpool->end, mem->node_min_addr, mem->node_max_addr); +} +/* + * Return the offset into a pswiotlb slot required to keep the device happy. + */ +static unsigned int pswiotlb_align_offset(struct device *dev, u64 addr) +{ + if (dma_get_min_align_mask(dev)) + return addr & dma_get_min_align_mask(dev) & (P_IO_TLB_SIZE - 1); + else + return addr & (P_IO_TLB_SIZE - 1); +} +/* + * Bounce: copy the pswiotlb buffer from or back to the original dma location + */ +static void pswiotlb_bounce(struct device *dev, int nid, phys_addr_t tlb_addr, size_t size, + enum dma_data_direction dir, struct p_io_tlb_pool *mem) +{ + int index = (tlb_addr - mem->start) >> P_IO_TLB_SHIFT; + phys_addr_t orig_addr = mem->slots[index].orig_addr; + size_t alloc_size = mem->slots[index].alloc_size; + unsigned long pfn = PFN_DOWN(orig_addr); + unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; + unsigned int tlb_offset, orig_addr_offset; + + if (orig_addr == INVALID_PHYS_ADDR) + return; + + tlb_offset = tlb_addr & (P_IO_TLB_SIZE - 1); + orig_addr_offset = pswiotlb_align_offset(dev, orig_addr); + if (tlb_offset < orig_addr_offset) { + dev_WARN_ONCE(dev, 1, + "Access before mapping start detected. orig offset %u, requested offset %u.\n", + orig_addr_offset, tlb_offset); + return; + } + + tlb_offset -= orig_addr_offset; + if (tlb_offset > alloc_size) { + dev_WARN_ONCE(dev, 1, + "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", + alloc_size, size, tlb_offset); + return; + } + + orig_addr += tlb_offset; + alloc_size -= tlb_offset; + + if (size > alloc_size) { + dev_WARN_ONCE(dev, 1, + "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", + alloc_size, size); + size = alloc_size; + } + + if (PageHighMem(pfn_to_page(pfn))) { + unsigned int offset = orig_addr & ~PAGE_MASK; + struct page *page; + unsigned int sz = 0; + unsigned long flags; + + dev_info(dev, "%s line=%d !!!!!!HighMem!!!!!! dir: %d, tlb_addr: %#018Lx, size: %#lx\n", + __func__, __LINE__, dir, tlb_addr, size); + + while (size) { + sz = min_t(size_t, PAGE_SIZE - offset, size); + + local_irq_save(flags); + page = pfn_to_page(pfn); + if (dir == DMA_TO_DEVICE) + memcpy_from_page(vaddr, page, offset, sz); + else + memcpy_to_page(page, offset, vaddr, sz); + local_irq_restore(flags); + + size -= sz; + pfn++; + vaddr += sz; + offset = 0; + } + } else if (dir == DMA_TO_DEVICE) { + memcpy(vaddr, phys_to_virt(orig_addr), size); + } else { + memcpy(phys_to_virt(orig_addr), vaddr, size); + } +} +static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) +{ + return start + (idx << P_IO_TLB_SHIFT); +} +/* + * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. + */ +static inline unsigned long get_max_slots(unsigned long boundary_mask) +{ + return (boundary_mask >> P_IO_TLB_SHIFT) + 1; +} + +static unsigned int wrap_area_index(struct p_io_tlb_pool *mem, unsigned int index) +{ + if (index >= mem->area_nslabs) + return 0; + return index; +} + +/** + * pswiotlb_area_find_slots() - search for slots in one Phytium IO TLB memory area + * @dev: Device which maps the buffer. + * @pool: Memory pool to be searched. + * @area_index: Index of the Phytium IO TLB memory area to be searched. + * @orig_addr: Original (non-bounced) Phytium IO buffer address. + * @alloc_size: Total requested size of the bounce buffer, + * including initial alignment padding. + * @alloc_align_mask: Required alignment of the allocated buffer. + * + * Find a suitable sequence of Phytium IO TLB entries for the request and allocate + * a buffer from the given Phytium IO TLB memory area. + * This function takes care of locking. + * + * Return: Index of the first allocated slot, or -1 on error. + */ +static int pswiotlb_area_find_slots(struct device *dev, int nid, struct p_io_tlb_pool *pool, + int area_index, phys_addr_t orig_addr, size_t alloc_size, + unsigned int alloc_align_mask) +{ + struct p_io_tlb_area *area = pool->areas + area_index; + unsigned long boundary_mask = dma_get_seg_boundary(dev); + dma_addr_t tbl_dma_addr = + phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; + unsigned long max_slots = get_max_slots(boundary_mask); + unsigned int iotlb_align_mask = + dma_get_min_align_mask(dev) | alloc_align_mask; + unsigned int nslots = nr_slots(alloc_size), stride; + unsigned int offset = pswiotlb_align_offset(dev, orig_addr); + unsigned int index, slots_checked, count = 0, i; + unsigned long flags; + unsigned int slot_base; + unsigned int slot_index; + + WARN_ON(!nslots); + WARN_ON(area_index >= pool->nareas); + + /* + * For allocations of PAGE_SIZE or larger only look for page aligned + * allocations. + */ + if (alloc_size >= PAGE_SIZE) + iotlb_align_mask |= ~PAGE_MASK; + iotlb_align_mask &= ~(P_IO_TLB_SIZE - 1); + + /* + * For mappings with an alignment requirement don't bother looping to + * unaligned slots once we found an aligned one. + */ + stride = (iotlb_align_mask >> P_IO_TLB_SHIFT) + 1; + + if (spin_trylock_irqsave(&area->lock, flags)) { + if (unlikely(nslots > pool->area_nslabs - area->used)) + goto not_found; + + slot_base = area_index * pool->area_nslabs; + index = area->index; + + for (slots_checked = 0; slots_checked < pool->area_nslabs;) { + slot_index = slot_base + index; + + if (orig_addr && + (slot_addr(tbl_dma_addr, slot_index) & + iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { + index = wrap_area_index(pool, index + 1); + slots_checked++; + continue; + } + + if (!iommu_is_span_boundary(slot_index, nslots, + nr_slots(tbl_dma_addr), + max_slots)) { + if (pool->slots[slot_index].list >= nslots) + goto found; + } + index = wrap_area_index(pool, index + stride); + slots_checked += stride; + } + } else { + return -1; + } + +not_found: + spin_unlock_irqrestore(&area->lock, flags); + return -1; + +found: + /* + * If we find a slot that indicates we have 'nslots' number of + * contiguous buffers, we allocate the buffers from that slot onwards + * and set the list of free entries to '0' indicating unavailable. + */ + for (i = slot_index; i < slot_index + nslots; i++) { + pool->slots[i].list = 0; + pool->slots[i].alloc_size = alloc_size - (offset + + ((i - slot_index) << P_IO_TLB_SHIFT)); + } + for (i = slot_index - 1; + io_tlb_offset(i) != P_IO_TLB_SEGSIZE - 1 && + pool->slots[i].list; i--) + pool->slots[i].list = ++count; + + /* + * Update the indices to avoid searching in the next round. + */ + area->index = wrap_area_index(pool, index + nslots); + area->used += nslots; + spin_unlock_irqrestore(&area->lock, flags); + + return slot_index; +} + +/** + * pswiotlb_pool_find_slots() - search for slots in one memory pool + * @dev: Device which maps the buffer. + * @pool: Memory pool to be searched. + * @orig_addr: Original (non-bounced)Phytium IO buffer address. + * @alloc_size: Total requested size of the bounce buffer, + * including initial alignment padding. + * @alloc_align_mask: Required alignment of the allocated buffer. + * + * Search through one memory pool to find a sequence of slots that match the + * allocation constraints. + * + * Return: Index of the first allocated slot, or -1 on error. + */ +static int pswiotlb_pool_find_slots(struct device *dev, int nid, struct p_io_tlb_pool *pool, + phys_addr_t orig_addr, size_t alloc_size, + unsigned int alloc_align_mask) +{ + int start = raw_smp_processor_id() & (pool->nareas - 1); + int i = start, index; + + do { + index = pswiotlb_area_find_slots(dev, nid, pool, i, orig_addr, + alloc_size, alloc_align_mask); + if (index >= 0) { + if ((pool != &p_io_tlb_default_mem[nid].defpool) && + !pool->transient) { + bitmap_set(pool->busy_record, i, 1); + } + return index; + } + if (++i >= pool->nareas) + i = 0; + } while (i != start); + + return -1; +} + +/** + * pswiotlb_find_slots() - search for slots in the whole pswiotlb + * @dev: Device which maps the buffer. + * @orig_addr: Original (non-bounced) Phytium IO buffer address. + * @alloc_size: Total requested size of the bounce buffer, + * including initial alignment padding. + * @alloc_align_mask: Required alignment of the allocated buffer. + * @retpool: Used memory pool, updated on return. + * + * Search through the whole Phytium software IO TLB to find a sequence of slots that + * match the allocation constraints. + * + * Return: Index of the first allocated slot, or -1 on error. + */ +static int pswiotlb_find_slots(struct device *dev, int nid, phys_addr_t orig_addr, + size_t alloc_size, unsigned int alloc_align_mask, + struct p_io_tlb_pool **retpool) +{ + struct p_io_tlb_mem *mem = &dev->dma_p_io_tlb_mem[nid]; + struct p_io_tlb_pool *pool; + int index; + int try_pool_idx; + int i; + int cpuid; + int current_ratio; + unsigned long pswiotlb_mem; + unsigned long nslabs_per_pool = dynamic_inc_thr_npslabs; + + cpuid = raw_smp_processor_id(); + + rcu_read_lock(); +#ifndef CONFIG_ARM64_4K_PAGES + for (i = 0; i < 15; i++) { + if (i == 0) { + pool = mem->pool_addr[0]; + index = pswiotlb_pool_find_slots(dev, nid, pool, orig_addr, + alloc_size, alloc_align_mask); + } else if (i == 1 && mem->capacity > (cpuid + 1)) { + pool = mem->pool_addr[cpuid + 1]; + index = pswiotlb_pool_find_slots(dev, nid, pool, orig_addr, + alloc_size, alloc_align_mask); + } else { + try_pool_idx = get_random_u32() % mem->capacity; + pool = mem->pool_addr[try_pool_idx]; + index = pswiotlb_pool_find_slots(dev, nid, pool, orig_addr, + alloc_size, alloc_align_mask); + } + + if (index >= 0) { + rcu_read_unlock(); + goto found; + } + } +#else + for (i = 0; i < 15; i++) { + try_pool_idx = get_random_u32() % mem->capacity; + pool = mem->pool_addr[try_pool_idx]; + index = pswiotlb_pool_find_slots(dev, nid, pool, orig_addr, + alloc_size, alloc_align_mask); + + if (index >= 0) { + rcu_read_unlock(); + goto found; + } + } +#endif + rcu_read_unlock(); + if (nslabs_per_pool > SLABS_PER_PAGE << MAX_ORDER) + nslabs_per_pool = SLABS_PER_PAGE << MAX_ORDER; + + nslabs_per_pool = ALIGN(nslabs_per_pool >> 1, P_IO_TLB_SEGSIZE); + pswiotlb_mem = P_IO_TLB_DEFAULT_SIZE + + (nslabs_per_pool << P_IO_TLB_SHIFT) * (mem->whole_size - 1); + current_ratio = (pswiotlb_mem * 100 + mem->node_total_mem / 2) / mem->node_total_mem; + if (current_ratio >= P_IO_TLB_EXT_WATERMARK) { + dev_warn_once(dev, "Total pswiotlb (%ld MB) exceeds the watermark (%d%%)\n" + "of memory (%ld MB) in node %d, pswiotlb expansion is prohibited.\n", + pswiotlb_mem >> 20, P_IO_TLB_EXT_WATERMARK, + mem->node_total_mem >> 20, nid); + return -1; + } + + if (!mem->can_grow) + return -1; + + pool = pswiotlb_formal_alloc(dev, mem); + if (!pool) + return -1; + + /* retry */ + rcu_read_lock(); + index = pswiotlb_pool_find_slots(dev, nid, pool, orig_addr, + alloc_size, alloc_align_mask); + rcu_read_unlock(); + + if (index < 0) { + pswiotlb_dyn_free(&pool->rcu); + return -1; + } + +found: + WRITE_ONCE(dev->dma_uses_p_io_tlb, true); + + /* + * The general barrier orders reads and writes against a presumed store + * of the PSWIOTLB buffer address by a device driver (to a driver private + * data structure). It serves two purposes. + * + * First, the store to dev->dma_uses_p_io_tlb must be ordered before the + * presumed store. This guarantees that the returned buffer address + * cannot be passed to another CPU before updating dev->dma_uses_p_io_tlb. + * + * Second, the load from mem->pools must be ordered before the same + * presumed store. This guarantees that the returned buffer address + * cannot be observed by another CPU before an update of the RCU list + * that was made by pswiotlb_dyn_alloc() on a third CPU (cf. multicopy + * atomicity). + * + * See also the comment in is_pswiotlb_buffer(). + */ + smp_mb(); + + *retpool = pool; + return index; +} +#ifdef CONFIG_DEBUG_FS + +/** + * mem_used() - get number of used slots in an allocator + * @mem: Phytium software IO TLB allocator. + * + * The result is accurate in this version of the function, because an atomic + * counter is available if CONFIG_DEBUG_FS is set. + * + * Return: Number of used slots. + */ +static unsigned long mem_used(struct p_io_tlb_mem *mem) +{ + return atomic_long_read(&mem->total_used); +} + +#else /* !CONFIG_DEBUG_FS */ + +/** + * mem_pool_used() - get number of used slots in a memory pool + * @pool: Phytium software IO TLB memory pool. + * + * The result is not accurate, see mem_used(). + * + * Return: Approximate number of used slots. + */ +static unsigned long mem_pool_used(struct p_io_tlb_pool *pool) +{ + int i; + unsigned long used = 0; + + for (i = 0; i < pool->nareas; i++) + used += pool->areas[i].used; + return used; +} + +/** + * mem_used() - get number of used slots in an allocator + * @mem: Phytium software IO TLB allocator. + * + * The result is not accurate, because there is no locking of individual + * areas. + * + * Return: Approximate number of used slots. + */ +static unsigned long mem_used(struct p_io_tlb_mem *mem) +{ + struct p_io_tlb_pool *pool; + unsigned long used = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(pool, &mem->pools, node) + used += mem_pool_used(pool); + rcu_read_unlock(); + + return used; +} + +#endif /* CONFIG_DEBUG_FS */ + +phys_addr_t pswiotlb_tbl_map_single(struct device *dev, int nid, phys_addr_t orig_addr, + size_t mapping_size, size_t alloc_size, + unsigned int alloc_align_mask, enum dma_data_direction dir, + unsigned long attrs) +{ + struct p_io_tlb_mem *mem = &dev->dma_p_io_tlb_mem[nid]; + unsigned int offset = pswiotlb_align_offset(dev, orig_addr); + struct p_io_tlb_pool *pool; + unsigned int i; + unsigned long index; + phys_addr_t tlb_addr; + struct page *page; + + if (alloc_size > (P_IO_TLB_SEGSIZE << P_IO_TLB_SHIFT)) { + dev_warn_ratelimited(dev, "alloc size 0x%lx is larger than segment(0x%x) of pswiotlb\n", + alloc_size, P_IO_TLB_SEGSIZE << P_IO_TLB_SHIFT); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + + if (!mem || !mem->nslabs) { + dev_warn_ratelimited(dev, + "Can not allocate PSWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + + if (mapping_size > alloc_size) { + dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", + mapping_size, alloc_size); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + + index = pswiotlb_find_slots(dev, nid, orig_addr, + alloc_size + offset, alloc_align_mask, &pool); + if (index == -1) { + if (!(attrs & DMA_ATTR_NO_WARN)) + dev_warn_once(dev, + "pswiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", + alloc_size, mem->nslabs, mem_used(mem)); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + + /* + * Save away the mapping from the original address to the DMA address. + * This is needed when we sync the memory. Then we sync the buffer if + * needed. + */ + for (i = 0; i < nr_slots(alloc_size + offset); i++) + pool->slots[index + i].orig_addr = slot_addr(orig_addr, i); + tlb_addr = slot_addr(pool->start, index) + offset; + page = pfn_to_page(PFN_DOWN(tlb_addr)); + set_bit(PG_pswiotlb, &page->flags); + + /* + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig + * to the tlb buffer, if we knew for sure the device will + * overwrite the entire current content. But we don't. Thus + * unconditional bounce may prevent leaking pswiotlb content (i.e. + * kernel memory) to user-space. + */ + pswiotlb_bounce(dev, nid, tlb_addr, mapping_size, DMA_TO_DEVICE, pool); + return tlb_addr; +} +static void pswiotlb_release_slots(struct device *dev, int nid, phys_addr_t tlb_addr, + struct p_io_tlb_pool *mem) +{ + unsigned long flags; + unsigned int offset = pswiotlb_align_offset(dev, tlb_addr); + int index = (tlb_addr - offset - mem->start) >> P_IO_TLB_SHIFT; + int nslots = nr_slots(mem->slots[index].alloc_size + offset); + int aindex = index / mem->area_nslabs; + struct p_io_tlb_area *area = &mem->areas[aindex]; + int count, i; + struct page *page = pfn_to_page(PFN_DOWN(tlb_addr)); + + /* + * Return the buffer to the free list by setting the corresponding + * entries to indicate the number of contiguous entries available. + * While returning the entries to the free list, we merge the entries + * with slots below and above the pool being returned. + */ + WARN_ON(aindex >= mem->nareas); + + spin_lock_irqsave(&area->lock, flags); + if (index + nslots < ALIGN(index + 1, P_IO_TLB_SEGSIZE)) + count = mem->slots[index + nslots].list; + else + count = 0; + + /* + * Step 1: return the slots to the free list, merging the slots with + * superceeding slots + */ + for (i = index + nslots - 1; i >= index; i--) { + mem->slots[i].list = ++count; + mem->slots[i].orig_addr = INVALID_PHYS_ADDR; + mem->slots[i].alloc_size = 0; + } + + /* + * Step 2: merge the returned slots with the preceding slots, if + * available (non zero) + */ + for (i = index - 1; + io_tlb_offset(i) != P_IO_TLB_SEGSIZE - 1 && mem->slots[i].list; + i--) + mem->slots[i].list = ++count; + area->used -= nslots; + if ((mem != &p_io_tlb_default_mem[nid].defpool) && (area->used == 0)) + bitmap_clear(mem->busy_record, aindex, 1); + clear_bit(PG_pswiotlb, &page->flags); + spin_unlock_irqrestore(&area->lock, flags); +} +/* + * tlb_addr is the physical address of the bounce buffer to unmap. + */ +void pswiotlb_tbl_unmap_single(struct device *dev, int nid, phys_addr_t tlb_addr, + size_t offset, size_t mapping_size, enum dma_data_direction dir, + unsigned long attrs, struct p_io_tlb_pool *pool) +{ + struct page *page = pfn_to_page(PFN_DOWN(tlb_addr)); + /* + * First, sync the memory before unmapping the entry + */ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) && + (test_bit(PG_pswiotlbsync, &page->flags) == false)) + pswiotlb_bounce(dev, nid, tlb_addr, mapping_size, DMA_FROM_DEVICE, pool); + + tlb_addr -= offset; + pswiotlb_release_slots(dev, nid, tlb_addr, pool); + + clear_bit(PG_pswiotlbsync, &page->flags); +} +void pswiotlb_sync_single_for_device(struct device *dev, int nid, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, struct p_io_tlb_pool *pool) +{ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + pswiotlb_bounce(dev, nid, tlb_addr, size, DMA_TO_DEVICE, pool); + else + WARN_ON(dir != DMA_FROM_DEVICE); +} + +void pswiotlb_sync_single_for_cpu(struct device *dev, int nid, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, struct p_io_tlb_pool *pool) +{ + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + struct page *page = pfn_to_page(PFN_DOWN(tlb_addr)); + pswiotlb_bounce(dev, nid, tlb_addr, size, DMA_FROM_DEVICE, pool); + set_bit(PG_pswiotlbsync, &page->flags); + } + else + WARN_ON(dir != DMA_TO_DEVICE); +} +/* + * Create a pswiotlb mapping for the buffer at @paddr, and in case of DMAing + * to the device copy the data into it as well. + */ +dma_addr_t pswiotlb_map(struct device *dev, int nid, phys_addr_t paddr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t pswiotlb_addr; + dma_addr_t dma_addr; + + trace_pswiotlb_bounced(dev, phys_to_dma(dev, paddr), size); + + pswiotlb_addr = pswiotlb_tbl_map_single(dev, nid, paddr, size, + PAGE_ALIGN(size), PAGE_SIZE - 1, dir, attrs); + if (pswiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + dma_addr = phys_to_dma_unencrypted(dev, pswiotlb_addr); + + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(pswiotlb_addr, size, dir); + return dma_addr; +} + +const struct pswiotlb_bypass_rules bypass_rules_list[] = { + {PCI_VENDOR_ID_MELLANOX, true, DMA_BIDIRECTIONAL}, + {0, } +}; + +size_t pswiotlb_max_mapping_size(struct device *dev) +{ + int min_align_mask = dma_get_min_align_mask(dev); + int min_align = 0; + + /* + * pswiotlb_find_slots() skips slots according to + * min align mask. This affects max mapping size. + * Take it into acount here. + */ + if (min_align_mask) + min_align = roundup(min_align_mask, P_IO_TLB_SIZE); + + return ((size_t)P_IO_TLB_SIZE) * P_IO_TLB_SEGSIZE - min_align; +} + +/** + * is_pswiotlb_allocated() - check if the default Phytium software IO TLB is initialized + */ +bool is_pswiotlb_allocated(struct device *dev) +{ + int nid = dev->local_node; + return p_io_tlb_default_mem[nid].nslabs; +} + +bool is_pswiotlb_active(struct device *dev) +{ + int nid = dev->local_node; + struct p_io_tlb_mem *mem = &dev->dma_p_io_tlb_mem[nid]; + + return mem && mem->nslabs; +} + +/** + * default_pswiotlb_base() - get the base address of the default PSWIOTLB + * + * Get the lowest physical address used by the default Phytium software IO TLB pool. + */ +phys_addr_t default_pswiotlb_base(struct device *dev) +{ + int nid = dev->local_node; + + p_io_tlb_default_mem[nid].can_grow = false; + + return p_io_tlb_default_mem[nid].defpool.start; +} + +/** + * default_pswiotlb_limit() - get the address limit of the default PSWIOTLB + * + * Get the highest physical address used by the default Phytium software IO TLB pool. + */ +phys_addr_t default_pswiotlb_limit(struct device *dev) +{ + int nid = dev->local_node; + + return p_io_tlb_default_mem[nid].phys_limit; +} +#ifdef CONFIG_DEBUG_FS + +static int p_io_tlb_used_get(void *data, u64 *val) +{ + struct p_io_tlb_mem *mem = data; + + *val = mem_used(mem); + return 0; +} + +static int p_io_tlb_hiwater_get(void *data, u64 *val) +{ + struct p_io_tlb_mem *mem = data; + + *val = atomic_long_read(&mem->used_hiwater); + return 0; +} + +static int p_io_tlb_hiwater_set(void *data, u64 val) +{ + struct p_io_tlb_mem *mem = data; + + /* Only allow setting to zero */ + if (val != 0) + return -EINVAL; + + atomic_long_set(&mem->used_hiwater, val); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_p_io_tlb_used, p_io_tlb_used_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_p_io_tlb_hiwater, p_io_tlb_hiwater_get, + p_io_tlb_hiwater_set, "%llu\n"); + +static void pswiotlb_create_debugfs_files(struct p_io_tlb_mem *mem, + int nid, const char *dirname) +{ + atomic_long_set(&mem->total_used, 0); + atomic_long_set(&mem->used_hiwater, 0); + + mem->debugfs = debugfs_create_dir(dirname, pswiotlb_debugfs); + if (!mem->nslabs) + return; + + debugfs_create_ulong("p_io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); + debugfs_create_file("p_io_tlb_used", 0400, mem->debugfs, mem, + &fops_p_io_tlb_used); + debugfs_create_file("p_io_tlb_used_hiwater", 0600, mem->debugfs, mem, + &fops_p_io_tlb_hiwater); +} + +static int passthroughlist_display_show(struct seq_file *m, void *v) +{ + struct pswiotlb_passthroughlist *bl_entry; + + rcu_read_lock(); + list_for_each_entry_rcu(bl_entry, &passthroughlist, node) { + seq_printf(m, "0x%04x\n", bl_entry->vendor); + } + rcu_read_unlock(); + + return 0; +} + +static int version_display_show(struct seq_file *m, void *v) +{ + seq_puts(m, "pswiotlb version "); + seq_printf(m, "%s\n", PSWIOTLB_VERSION); + + return 0; +} + +static int passthroughlist_add(void *data, u64 val) +{ + struct pswiotlb_passthroughlist *bl_entry; + unsigned long flags; + + bl_entry = kzalloc(sizeof(*bl_entry), GFP_ATOMIC); + if (!bl_entry) + return -ENOMEM; + + bl_entry->vendor = val; + bl_entry->from_grub = false; + + spin_lock_irqsave(&passthroughlist_lock, flags); + list_add_rcu(&bl_entry->node, &passthroughlist); + spin_unlock_irqrestore(&passthroughlist_lock, flags); + + return 0; +} + +static int passthroughlist_del(void *data, u64 val) +{ + struct pswiotlb_passthroughlist *bl_entry; + unsigned long flags; + + rcu_read_lock(); + list_for_each_entry_rcu(bl_entry, &passthroughlist, node) { + if (bl_entry->vendor == val) + goto found; + } + rcu_read_unlock(); + + return 0; +found: + rcu_read_unlock(); + spin_lock_irqsave(&passthroughlist_lock, flags); + list_del_rcu(&bl_entry->node); + spin_unlock_irqrestore(&passthroughlist_lock, flags); + + if (bl_entry->from_grub == false) + kfree(bl_entry); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(passthroughlist_display); +DEFINE_SHOW_ATTRIBUTE(version_display); +DEFINE_DEBUGFS_ATTRIBUTE(fops_passthroughlist_add, NULL, + passthroughlist_add, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_passthroughlist_del, NULL, + passthroughlist_del, "%llu\n"); + +static void pswiotlb_create_passthroughlist_debugfs_files(const char *dirname) +{ + passthroughlist_debugfs = debugfs_create_dir(dirname, pswiotlb_debugfs); + if (!passthroughlist_debugfs) + return; + + debugfs_create_file("show_devices", 0400, passthroughlist_debugfs, NULL, + &passthroughlist_display_fops); + debugfs_create_file("add_device", 0600, passthroughlist_debugfs, NULL, + &fops_passthroughlist_add); + debugfs_create_file("del_device", 0600, passthroughlist_debugfs, NULL, + &fops_passthroughlist_del); +} + +static void pswiotlb_create_pswiotlb_debugfs_files(const char *dirname) +{ + int i; + char name[20] = ""; + char passthroughlist_name[50] = ""; + + pswiotlb_debugfs = debugfs_create_dir(dirname, pswiotlb_debugfs); + if (!pswiotlb_debugfs) + return; + + debugfs_create_file("version", 0400, pswiotlb_debugfs, NULL, + &version_display_fops); + + for (i = 0; i < pswiotlb_node_num; i++) { + sprintf(name, "%s-%d", "pswiotlb", i); + pswiotlb_create_debugfs_files(&p_io_tlb_default_mem[i], i, name); + } + sprintf(passthroughlist_name, "%s", "pswiotlb-passthroughlist"); + pswiotlb_create_passthroughlist_debugfs_files(passthroughlist_name); +} + +static int __init pswiotlb_create_default_debugfs(void) +{ + char name[20] = ""; + + if (!pswiotlb_mtimer_alive && !pswiotlb_force_disable) { + pr_info("setup pswiotlb monitor timer service\n"); + timer_setup(&service_timer, pswiotlb_monitor_service, 0); + pswiotlb_mtimer_alive = true; + + /* check pswiotlb every 2 seconds*/ + mod_timer(&service_timer, jiffies + 2 * HZ); + } + + if (!pswiotlb_force_disable) { + sprintf(name, "%s", "pswiotlb"); + pswiotlb_create_pswiotlb_debugfs_files(name); + } + + return 0; +} + +late_initcall(pswiotlb_create_default_debugfs); + +#else /* !CONFIG_DEBUG_FS */ + +static inline void pswiotlb_create_debugfs_files(struct p_io_tlb_mem *mem, + const char *dirname) +{ +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/kernel/extable.c b/kernel/extable.c index 71f482581cab4c316ed2ddd2a4eedb6771b11c37..33b3dbc5485c5c5c44e253a8d57b2b3a5c6dda65 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -29,6 +29,11 @@ DEFINE_MUTEX(text_mutex); extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __stop___ex_table[]; +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +extern struct exception_table_entry __start___mc_ex_table[]; +extern struct exception_table_entry __stop___mc_ex_table[]; +#endif + /* Cleared by build time tools if the table is already sorted. */ u32 __initdata __visible main_extable_sort_needed = 1; @@ -40,6 +45,14 @@ void __init sort_main_extable(void) pr_notice("Sorting __ex_table...\n"); sort_extable(__start___ex_table, __stop___ex_table); } + +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + if (main_extable_sort_needed && + &__stop___mc_ex_table > &__start___mc_ex_table) { + pr_notice("Sorting __mc_ex_table...\n"); + sort_extable(__start___mc_ex_table, __stop___mc_ex_table); + } +#endif } /* Given an address, look for it in the kernel exception table */ @@ -63,6 +76,22 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the machine check exception table */ +const +struct exception_table_entry *search_mc_exception_tables(unsigned long addr) +{ + const struct exception_table_entry *e; + + e = search_extable(__start___mc_ex_table, + __stop___mc_ex_table - __start___mc_ex_table, addr); + if (!e) + e = search_module_mc_extables(addr); + + return e; +} +#endif + int notrace core_kernel_text(unsigned long addr) { if (is_kernel_text(addr)) diff --git a/kernel/module/main.c b/kernel/module/main.c index b00e31721a73e398c9bce71164139e92766b7c1c..f3b9a1aea32b96a77693d8f8506804d6284aeea6 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2204,6 +2204,11 @@ static int find_module_sections(struct module *mod, struct load_info *info) mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + mod->mc_extable = section_objs(info, "__mc_ex_table", + sizeof(*mod->mc_extable), &mod->num_mc_exentries); +#endif + if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); @@ -2438,6 +2443,10 @@ static int post_relocation(struct module *mod, const struct load_info *info) /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE + sort_extable(mod->mc_extable, mod->mc_extable + mod->num_mc_exentries); +#endif + /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); @@ -3270,6 +3279,35 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) return e; } +#ifdef CONFIG_ARCH_HAS_MC_EXTABLE +/* Given an address, look for it in the module machine check safe exception tables. */ +const struct exception_table_entry *search_module_mc_extables(unsigned long addr) +{ + const struct exception_table_entry *e = NULL; + struct module *mod; + + preempt_disable(); + mod = __module_address(addr); + if (!mod) + goto out; + + if (!mod->num_mc_exentries) + goto out; + + e = search_extable(mod->mc_extable, + mod->num_mc_exentries, + addr); +out: + preempt_enable(); + + /* + * Now, if we found one, we are running inside it now, hence + * we cannot unload the module, hence no refcnt needed. + */ + return e; +} +#endif + /** * is_module_address() - is this address inside a module? * @addr: the address to check. diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f526ad6688168443913385eda101fa1..26052cfccb4c8d68e7c8e5c25f309b16914f642d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2252,10 +2252,26 @@ static struct ctl_table vm_table[] = { { } }; +static struct ctl_table debug_table[] = { +#if defined(CONFIG_ARM64) && defined(CONFIG_ARCH_HAS_COPY_MC) + { + .procname = "machine_check_safe", + .data = &sysctl_machine_check_safe, + .maxlen = sizeof(sysctl_machine_check_safe), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif + { } +}; + int __init sysctl_init_bases(void) { register_sysctl_init("kernel", kern_table); register_sysctl_init("vm", vm_table); + register_sysctl_init("debug", debug_table); return 0; } diff --git a/scripts/sorttable.h b/scripts/sorttable.h index 7bd0184380d3b9f6dfb1a0dff566efd13f6b5a4f..b268cba2a7a22a8637cb6907c1bfd129266cd77b 100644 --- a/scripts/sorttable.h +++ b/scripts/sorttable.h @@ -294,6 +294,11 @@ static int do_sort(Elf_Ehdr *ehdr, unsigned int orc_num_entries = 0; #endif + Elf_Shdr *mc_extab_sec = NULL; + Elf_Rel *mc_relocs = NULL; + int mc_relocs_size = 0; + char *mc_extab_image = NULL; + shstrndx = r2(&ehdr->e_shstrndx); if (shstrndx == SHN_XINDEX) shstrndx = r(&shdr[0].sh_link); @@ -320,6 +325,18 @@ static int do_sort(Elf_Ehdr *ehdr, relocs = (void *)ehdr + _r(&s->sh_offset); relocs_size = _r(&s->sh_size); } + + if (!strcmp(secstrings + idx, "__mc_ex_table")) { + mc_extab_sec = s; + + if ((r(&s->sh_type) == SHT_REL || + r(&s->sh_type) == SHT_RELA) && + r(&s->sh_info) == i) { + mc_relocs = (void *)ehdr + _r(&s->sh_offset); + mc_relocs_size = _r(&s->sh_size); + } + } + if (r(&s->sh_type) == SHT_SYMTAB_SHNDX) symtab_shndx = (Elf32_Word *)((const char *)ehdr + _r(&s->sh_offset)); @@ -409,12 +426,18 @@ static int do_sort(Elf_Ehdr *ehdr, } extab_image = (void *)ehdr + _r(&extab_sec->sh_offset); + + if (mc_extab_sec) + mc_extab_image = (void *)ehdr + _r(&mc_extab_sec->sh_offset); + strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset); symtab = (const Elf_Sym *)((const char *)ehdr + _r(&symtab_sec->sh_offset)); if (custom_sort) { custom_sort(extab_image, _r(&extab_sec->sh_size)); + if (mc_extab_image) + custom_sort(mc_extab_image, _r(&mc_extab_sec->sh_size)); } else { int num_entries = _r(&extab_sec->sh_size) / extable_ent_size; qsort(extab_image, num_entries, @@ -425,6 +448,9 @@ static int do_sort(Elf_Ehdr *ehdr, if (relocs) memset(relocs, 0, relocs_size); + if (mc_relocs) + memset(mc_relocs, 0, mc_relocs_size); + /* find the flag main_extable_sort_needed */ for (sym = (void *)ehdr + _r(&symtab_sec->sh_offset); sym < sym + _r(&symtab_sec->sh_size) / sizeof(Elf_Sym); diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c index 18e5faac712f67aa5a6d338cf01903a8818a734c..443c78af3f1043b4dcd53efae111285cb95288ae 100644 --- a/sound/pci/hda/hda_phytium.c +++ b/sound/pci/hda/hda_phytium.c @@ -65,6 +65,11 @@ static int probe_only[SNDRV_CARDS]; static int jackpoll_ms[SNDRV_CARDS]; static int single_cmd = -1; static int enable_msi = -1; +#ifdef CONFIG_PM +static int power_save = 1; +#else +#define power_save 0 +#endif #ifdef CONFIG_SND_HDA_INPUT_BEEP static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = CONFIG_SND_HDA_INPUT_BEEP_MODE}; @@ -97,7 +102,17 @@ module_param_array(beep_mode, bool, NULL, 0444); MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode (0=off, 1=on) (default=1)."); #endif -#define power_save 0 +#ifdef CONFIG_PM +static int param_set_xint(const char *val, const struct kernel_param *kp); +static const struct kernel_param_ops param_ops_xint = { + .set = param_set_xint, + .get = param_get_int, +}; +#define param_check_xint param_check_int + +module_param(power_save, xint, 0644); +MODULE_PARM_DESC(power_save, "Automatic power-saving timeout (in second, 0 = disable)."); +#endif /* CONFIG_PM */ static int align_buffer_size = -1; module_param(align_buffer_size, bint, 0644); @@ -415,6 +430,28 @@ static void azx_del_card_list(struct azx *chip) mutex_unlock(&card_list_lock); } +/* trigger power-save check at writing parameter */ +static int param_set_xint(const char *val, const struct kernel_param *kp) +{ + struct hda_ft *hda; + struct azx *chip; + int prev = power_save; + int ret = param_set_int(val, kp); + + if (ret || prev == power_save) + return ret; + + mutex_lock(&card_list_lock); + list_for_each_entry(hda, &card_list, list) { + chip = &hda->chip; + if (!hda->probe_continued || chip->disabled) + continue; + snd_hda_set_power_save(&chip->bus, power_save * 1000); + } + mutex_unlock(&card_list_lock); + return 0; +} + #else #define azx_add_card_list(chip) /* NOP */ #define azx_del_card_list(chip) /* NOP */ @@ -916,6 +953,51 @@ static const struct hda_controller_ops axi_hda_ops = { .link_power = azx_ft_link_power, }; +static ssize_t runtime_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip = card->private_data; + struct hdac_bus *bus = azx_bus(chip); + unsigned int cmd = 0x001f0500; + unsigned int res = -1; + char *status; + + dev_info(dev, "Inquire codec status!\n"); + mutex_lock(&bus->cmd_mutex); + snd_hdac_bus_send_cmd(bus, cmd); + snd_hdac_bus_get_response(bus, 0, &res); + mutex_unlock(&bus->cmd_mutex); + + switch (res & 0x3) { + case 0x0: + status = "D0"; + break; + case 0x1: + status = "D1"; + break; + case 0x2: + status = "D2"; + break; + case 0x3: + status = "D3"; + break; + } + + return sprintf(buf, "%s\n", status); +} + +static DEVICE_ATTR_RO(runtime_status); + +static struct attribute *runtime_status_attrs[] = { + &dev_attr_runtime_status.attr, + NULL, +}; + +static const struct attribute_group hda_ft_runtime_status_group = { + .attrs = runtime_status_attrs, +}; + static DECLARE_BITMAP(probed_devs, SNDRV_CARDS); static int hda_ft_probe(struct platform_device *pdev) @@ -967,7 +1049,6 @@ static int hda_ft_probe(struct platform_device *pdev) return err; } -#define codec_power_count(codec) codec->core.dev.power.usage_count.counter /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { [AZX_DRIVER_FT] = 4, @@ -980,7 +1061,6 @@ static int azx_probe_continue(struct azx *chip) int dev = chip->dev_index; int err; struct hdac_bus *bus = azx_bus(chip); - struct hda_codec *codec; hda->probe_continued = 1; @@ -1014,15 +1094,14 @@ static int azx_probe_continue(struct azx *chip) if (azx_has_pm_runtime(chip)) pm_runtime_put_noidle(hddev); - list_for_each_codec(codec, &chip->bus) { - if (codec_power_count(codec) > 0) { - pm_runtime_mark_last_busy(&codec->core.dev); - pm_runtime_put_autosuspend(&codec->core.dev); - } + if (sysfs_create_group(&hda->dev->kobj, &hda_ft_runtime_status_group)) { + dev_warn(hda->dev, "failed create sysfs\n"); + goto err_sysfs; } - return err; +err_sysfs: + sysfs_remove_group(&hda->dev->kobj, &hda_ft_runtime_status_group); out_free: if (bus->irq >= 0) { free_irq(bus->irq, (void *)chip); @@ -1043,6 +1122,7 @@ static int hda_ft_remove(struct platform_device *pdev) hda = container_of(chip, struct hda_ft, chip); cancel_work_sync(&hda->probe_work); clear_bit(chip->dev_index, probed_devs); + sysfs_remove_group(&hda->dev->kobj, &hda_ft_runtime_status_group); snd_card_free(card); return 0; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 1de59b63abdd994dc04baa51401b3cbcdac77b86..6e16cc2ccebf4f54b17fe464373c640e7e89d089 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -1379,6 +1379,11 @@ config SND_SOC_PEB2466 To compile this driver as a module, choose M here: the module will be called snd-soc-peb2466. +config SND_SOC_PHYTIUM_CODEC_V2 + tristate "Phytium Codec V2 driver" + help + Select Y if you want to add Phytium codec V2 driver. + config SND_SOC_RK3328 tristate "Rockchip RK3328 audio CODEC" select REGMAP_MMIO diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 7dc577a7261c7f7086401ce47ede4bb283959a1e..a2f54c2ca7d444061284e430be0bafd62c534147 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -374,6 +374,8 @@ snd-soc-wsa881x-objs := wsa881x.o snd-soc-wsa883x-objs := wsa883x.o snd-soc-wsa884x-objs := wsa884x.o snd-soc-zl38060-objs := zl38060.o +snd-soc-phytium-codec-v2-objs := phytium-codec-v2.o + # Amp snd-soc-max9877-objs := max9877.o snd-soc-max98504-objs := max98504.o @@ -762,6 +764,7 @@ obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o obj-$(CONFIG_SND_SOC_WSA883X) += snd-soc-wsa883x.o obj-$(CONFIG_SND_SOC_WSA884X) += snd-soc-wsa884x.o obj-$(CONFIG_SND_SOC_ZL38060) += snd-soc-zl38060.o +obj-$(CONFIG_SND_SOC_PHYTIUM_CODEC_V2) += snd-soc-phytium-codec-v2.o # Amp obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o diff --git a/sound/soc/codecs/phytium-codec-v2.c b/sound/soc/codecs/phytium-codec-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..f117e76ed3bd5cba41af6f7fb1650528b2dbef37 --- /dev/null +++ b/sound/soc/codecs/phytium-codec-v2.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium CODEC ALSA SoC Audio driver + * + * Copyright (C) 2024, Phytium Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "phytium-codec-v2.h" + +#define PHYT_CODEC_V2_VERSION "1.1.0" +#define PHYTIUM_RATES (SNDRV_PCM_RATE_192000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_88200 | \ + SNDRV_PCM_RATE_8000_48000) +#define PHYTIUM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S18_3LE | \ + SNDRV_PCM_FMTBIT_S20_3LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ + SNDRV_PCM_FMTBIT_S32_LE) + +static const struct snd_kcontrol_new phyt_snd_controls[] = { + SOC_SINGLE("PCM Volume", PHYTIUM_CODEC_PLAYBACKE_VOL, 0, 0xc0, 0), + SOC_SINGLE("Playback Volume1", PHYTIUM_CODEC_PLAYBACKE_OUT1_VOL, 0, 0x24, 0), + SOC_SINGLE("Playback Volume2", PHYTIUM_CODEC_PLAYBACKE_OUT2_VOL, 0, 0x24, 0), + + SOC_SINGLE("Capture Digital Volume", PHYTIUM_CODEC_CAPTURE_VOL, 0, 0xc0, 0), + SOC_SINGLE("Mic PGA Volume", PHYTIUM_CODEC_CAPTURE_IN1_VOL, 0, 8, 0), +}; + +/* + * DAPM Controls + */ + +/* Input Mux */ +static const char * const phyt_mux_sel[] = { + "Line 1", "Line 2"}; + +static const struct soc_enum phyt_mux_enum = + SOC_ENUM_SINGLE(PHYTIUM_CODEC_INMUX_SEL, 0, + ARRAY_SIZE(phyt_mux_sel), + phyt_mux_sel); + +static const struct snd_kcontrol_new phyt_pga_controls = + SOC_DAPM_ENUM("Route", phyt_mux_enum); + +/* dapm widgets */ +static const struct snd_soc_dapm_widget phyt_dapm_widgets[] = { + /* Input Signal */ + SND_SOC_DAPM_INPUT("INPUT1"), + SND_SOC_DAPM_INPUT("INPUT2"), + + /* Input Mux */ + SND_SOC_DAPM_MUX("Input Mux", PHYTIUM_CODEC_INMUX_ENABLE, 0, 0, &phyt_pga_controls), + + /* Input ADC */ + SND_SOC_DAPM_ADC("Input ADC", "Capture", PHYTIUM_CODEC_ADC_ENABLE, 0, 0), + + /* Output DAC */ + SND_SOC_DAPM_DAC("Output DAC", "Playback", PHYTIUM_CODEC_DAC_ENABLE, 0, 0), + + /* Output Signal */ + SND_SOC_DAPM_OUTPUT("OUTPUT1"), + SND_SOC_DAPM_OUTPUT("OUTPUT2"), +}; + +static const struct snd_soc_dapm_route phyt_dapm_routes[] = { + { "Input Mux", "Line 1", "INPUT1"}, + { "Input Mux", "Line 2", "INPUT2"}, + + { "Input ADC", NULL, "Input Mux"}, + + { "OUTPUT1", NULL, "Output DAC"}, + { "OUTPUT2", NULL, "Output DAC"}, +}; + +static void phyt_codec_show_status(uint8_t status) +{ + switch (status) { + case 0: + pr_err("success\n"); + break; + case 2: + pr_err("device busy\n"); + break; + case 3: + pr_err("read/write error\n"); + break; + case 4: + pr_err("no device\n"); + break; + default: + pr_err("unknown error: %d\n", status); + break; + } +} + +int phyt_codec_msg_set_cmd(struct phytium_codec *priv) +{ + struct phytcodec_cmd *ans_msg; + int timeout = 5000, ret = 0; + + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_AP2RV_INT_STATE, SEND_INTR); + + ans_msg = priv->sharemem_base; + + while (timeout && (ans_msg->complete == PHYTCODEC_COMPLETE_NOT_READY + || ans_msg->complete == PHYTCODEC_COMPLETE_GOING)) { + udelay(200); + timeout--; + } + + if (timeout == 0) { + dev_err(priv->dev, "failed to receive msg, timeout\n"); + return -EINVAL; + } else if (ans_msg->complete == PHYTCODEC_COMPLETE_SUCCESS) { + dev_dbg(priv->dev, "receive msg successfully\n"); + if (ans_msg->status != 0) { + phyt_codec_show_status(ans_msg->status); + dev_err(priv->dev, "controller status error code:%d\n", + ans_msg->status); + return -EINVAL; + } + } else if (ans_msg->complete != PHYTCODEC_COMPLETE_SUCCESS) { + dev_err(priv->dev, "receive msg; error code:%d\n", + ans_msg->complete); + ret = -EINVAL; + } else { + dev_err(priv->dev, "unkonwn error"); + ret = -EINVAL; + } + + return ret; +} + +static int phyt_set_cmd(struct phytium_codec *priv, + unsigned int cmd) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + int ret = 0; + + msg->reserved = 0; + msg->seq = 0; + msg->cmd_id = PHYTCODEC_MSG_CMD_SET; + msg->cmd_subid = cmd; + msg->complete = 0; + ret = phyt_codec_msg_set_cmd(priv); + if (ret < 0) { + dev_err(priv->dev, "set cmd_subid 0x%x failed\n", cmd); + ret = -EINVAL; + goto error; + } +error: + return ret; +} + +static int phyt_pm_cmd(struct phytium_codec *priv, + unsigned int cmd) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + uint16_t total_regs_len; + uint8_t *regs; + int ret = 0, i; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + + msg->reserved = 0; + msg->seq = 0; + msg->cmd_id = PHYTCODEC_MSG_CMD_SET; + msg->cmd_subid = cmd; + msg->complete = 0; + msg->cmd_para.phytcodec_reg.cnt = 0; + if (cmd == PHYTCODEC_MSG_CMD_SET_RESUME) { + memcpy(msg->cmd_para.phytcodec_reg.regs, priv->regs, REG_SH_LEN); + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_INT_MASK, 0x0); + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_INT_ENABLE, 0x1); + } + ret = phyt_codec_msg_set_cmd(priv); + if (ret < 0) { + dev_err(priv->dev, "set cmd_subid 0x%x failed\n", cmd); + ret = -EINVAL; + goto error; + } + total_regs_len = msg->cmd_para.phytcodec_reg.total_regs_len; + + if (cmd == PHYTCODEC_MSG_CMD_SET_SUSPEND || cmd == PHYTCODEC_MSG_CMD_GET_ALL_REGS) { + regs = kmalloc(total_regs_len, GFP_KERNEL); + priv->regs = regs; + while (total_regs_len > REG_SH_LEN * msg->cmd_para.phytcodec_reg.cnt) { + memcpy(regs, msg->cmd_para.phytcodec_reg.regs, REG_SH_LEN); + regs += REG_SH_LEN; + msg->complete = 0; + ret = phyt_codec_msg_set_cmd(priv); + if (ret < 0) { + dev_err(priv->dev, "set cmd_subid 0x%x failed\n", cmd); + ret = -EINVAL; + goto error; + } + } + memcpy(regs, msg->cmd_para.phytcodec_reg.regs, + total_regs_len - REG_SH_LEN * (msg->cmd_para.phytcodec_reg.cnt - 1)); + if (cmd == PHYTCODEC_MSG_CMD_GET_ALL_REGS) { + dev_dbg(priv->dev, "all codec registers:\n"); + for (i = 0; i < total_regs_len; i++) + dev_dbg(priv->dev, "0x%02x-0x%02x\n", i, priv->regs[i]); + kfree(priv->regs); + } + } else if (cmd == PHYTCODEC_MSG_CMD_SET_RESUME) { + regs = priv->regs; + while (total_regs_len > REG_SH_LEN * msg->cmd_para.phytcodec_reg.cnt) { + regs += REG_SH_LEN; + memcpy(msg->cmd_para.phytcodec_reg.regs, regs, REG_SH_LEN); + msg->complete = 0; + ret = phyt_codec_msg_set_cmd(priv); + if (ret < 0) { + dev_err(priv->dev, "set cmd_subid 0x%x failed\n", cmd); + ret = -EINVAL; + goto error; + } + } + kfree(priv->regs); + } +error: + return ret; +} + +static int phyt_get_cmd(struct phytium_codec *priv, unsigned int cmd) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + int ret = 0; + + msg->reserved = 0; + msg->seq = 0; + msg->cmd_id = PHYTCODEC_MSG_CMD_GET; + msg->cmd_subid = cmd; + msg->complete = 0; + ret = phyt_codec_msg_set_cmd(priv); + if (ret < 0) { + dev_err(priv->dev, "get cmd_subid 0x%x failed\n", cmd); + ret = -EINVAL; + } + + return ret; +} + +static int phyt_probe(struct snd_soc_component *component) +{ + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + return phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_PROBE); +} + +static void phyt_remove(struct snd_soc_component *component) +{ + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_REMOVE); +} + +static int phyt_suspend(struct snd_soc_component *component) +{ + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + return phyt_pm_cmd(priv, PHYTCODEC_MSG_CMD_SET_SUSPEND); +} + +static int phyt_resume(struct snd_soc_component *component) +{ + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + return phyt_pm_cmd(priv, PHYTCODEC_MSG_CMD_SET_RESUME); +} + +static int phyt_set_bias_level(struct snd_soc_component *component, + enum snd_soc_bias_level level) +{ + int ret; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + struct phytcodec_cmd *msg = priv->sharemem_base; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + msg->cmd_para.para[0] = priv->channels/2; + + switch (level) { + case SND_SOC_BIAS_ON: + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_BIAS_ON); + break; + + case SND_SOC_BIAS_PREPARE: + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_BIAS_PREPARE); + break; + + case SND_SOC_BIAS_STANDBY: + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_BIAS_STANDBY); + else + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_BIAS_STANDBY); + break; + + case SND_SOC_BIAS_OFF: + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_BIAS_OFF); + break; + } + + return ret; +} + +static const struct snd_soc_component_driver phyt_component_driver = { + .probe = phyt_probe, + .remove = phyt_remove, + .suspend = phyt_suspend, + .resume = phyt_resume, + .set_bias_level = phyt_set_bias_level, + .controls = phyt_snd_controls, + .num_controls = ARRAY_SIZE(phyt_snd_controls), + .dapm_widgets = phyt_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(phyt_dapm_widgets), + .dapm_routes = phyt_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(phyt_dapm_routes), + .suspend_bias_off = 1, + .idle_bias_on = 1, + .use_pmdown_time = 1, + .endianness = 1, +}; + +static int phyt_mute(struct snd_soc_dai *dai, int mute, int direction) +{ + int ret; + struct snd_soc_component *component = dai->component; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + struct phytcodec_cmd *msg = priv->sharemem_base; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + msg->cmd_para.para[0] = (uint8_t)direction; + if (mute) + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_MUTE); + else + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_UNMUTE); + + return ret; +} + +static int phyt_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + int ret; + struct snd_soc_component *component = dai->component; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_STARTUP); + else + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_STARTUP_RC); + + return ret; +} + +static void phyt_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + int ret; + struct snd_soc_component *component = dai->component; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_SHUTDOWN); + else + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_SHUTDOWN_RC); +} + +static int phyt_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + int wl, ret = 0; + struct snd_soc_component *component = dai->component; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + priv->channels = params_channels(params); + switch (params_width(params)) { + case 16: + wl = 3; + break; + case 18: + wl = 2; + break; + case 20: + wl = 1; + break; + case 24: + wl = 0; + break; + case 32: + wl = 4; + break; + default: + ret = -EINVAL; + goto error; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + snd_soc_component_write(component, PHYTIUM_CODEC_HW_PARAM, wl); + else + snd_soc_component_write(component, PHYTIUM_CODEC_HW_PARAM_RC, wl); + +error: + return ret; +} + +static int phyt_set_dai_fmt(struct snd_soc_dai *codec_dai, + unsigned int fmt) +{ + int ret; + struct snd_soc_component *component = codec_dai->component; + struct phytium_codec *priv = snd_soc_component_get_drvdata(component); + + if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) + return -EINVAL; + + if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S) + return -EINVAL; + + if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) + return -EINVAL; + + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_DAI_FMT); + + return ret; +} + +static const struct snd_soc_dai_ops phyt_dai_ops = { + .startup = phyt_startup, + .shutdown = phyt_shutdown, + .hw_params = phyt_hw_params, + .mute_stream = phyt_mute, + .set_fmt = phyt_set_dai_fmt, +}; + +static struct snd_soc_dai_driver phyt_dai = { + .name = "phytium-hifi-v2", + .playback = { + .stream_name = "Playback", + .channels_min = 2, + .channels_max = 2, + .rates = PHYTIUM_RATES, + .formats = PHYTIUM_FORMATS, + }, + .capture = { + .stream_name = "Capture", + .channels_min = 2, + .channels_max = 2, + .rates = PHYTIUM_RATES, + .formats = PHYTIUM_FORMATS, + }, + .ops = &phyt_dai_ops, + .symmetric_rate = 1, +}; + +static const struct regmap_config phyt_codec_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = REG_MAX, + .cache_type = REGCACHE_NONE, +}; + +void phyt_enable_debug(struct phytium_codec *priv) +{ + u32 reg; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + phyt_writel_reg(priv->regfile_base, + PHYTIUM_CODEC_DEBUG, reg | PHYTIUM_CODEC_DEBUG_ENABLE); +} + +void phyt_disable_debug(struct phytium_codec *priv) +{ + u32 reg; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + reg &= ~PHYTIUM_CODEC_DEBUG_ENABLE; + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG, reg); +} + +void phyt_enable_alive(struct phytium_codec *priv) +{ + u32 reg; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + phyt_writel_reg(priv->regfile_base, + PHYTIUM_CODEC_DEBUG, reg | PHYTIUM_CODEC_ALIVE_ENABLE); +} + +void phyt_disable_alive(struct phytium_codec *priv) +{ + u32 reg; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + reg &= ~PHYTIUM_CODEC_ALIVE_ENABLE; + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG, reg); +} + +void phyt_heartbeat(struct phytium_codec *priv) +{ + u32 reg; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + phyt_writel_reg(priv->regfile_base, + PHYTIUM_CODEC_DEBUG, reg | PHYTIUM_CODEC_HEARTBIT_VAL); +} + +static void phyt_timer_handle(struct timer_list *t) +{ + struct phytium_codec *priv = from_timer(priv, t, timer); + + if (priv->alive_enabled && priv->heartbeat) + priv->heartbeat(priv); + + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(2000)); +} + +static int phyt_get_one_reg(struct phytium_codec *priv, uint8_t arg1, uint8_t arg2) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + int ret = 0; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + msg->cmd_para.rw_data.addr = arg1; + msg->cmd_para.rw_data.reg = arg2; + ret = phyt_get_cmd(priv, PHYTCODEC_MSG_CMD_GET_ONE_REG); + dev_dbg(priv->dev, "val: 0x%x\n", msg->cmd_para.rw_data.val); + + return ret; +} + +static int phyt_set_one_reg(struct phytium_codec *priv, uint8_t arg1, uint8_t arg2, uint16_t arg3) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + int ret = 0; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + msg->cmd_para.rw_data.addr = arg1; + msg->cmd_para.rw_data.reg = arg2; + msg->cmd_para.rw_data.val = arg3; + ret = phyt_set_cmd(priv, PHYTCODEC_MSG_CMD_SET_ONE_REG); + + return ret; +} + +static ssize_t debug_show(struct device *dev, struct device_attribute *da, char *buf) +{ + struct phytium_codec *priv = dev_get_drvdata(dev); + ssize_t ret; + u32 reg; + dev_info(dev, "Usage: echo [args...] > debug\n"); + dev_info(dev, "Usage: echo help 1 > debug for more details"); + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + ret = sprintf(buf, "%x\n", reg); + + return ret; +} + +static ssize_t debug_store(struct device *dev, struct device_attribute *da, + const char *buf, size_t size) +{ + struct phytium_codec *priv = dev_get_drvdata(dev); + char *arg1_str = NULL, *arg2_str = NULL, *arg3_str = NULL; + uint8_t arg1 = 0, arg2 = 0; + uint16_t arg3 = 0; + char *cmd_buffer, *cmd; + long value; + u32 reg; + int status; + + cmd_buffer = kmalloc(size + 1, GFP_KERNEL); + if (!cmd_buffer) + goto error; + strscpy(cmd_buffer, buf, size + 1); + + cmd = strsep(&cmd_buffer, " "); + if (!cmd) { + dev_err(dev, "Invalid command argument\n"); + goto error; + } + + arg1_str = strsep(&cmd_buffer, " "); + if (arg1_str) { + status = kstrtoul(arg1_str, 0, &value); + if (status) { + dev_err(dev, "Invalid value for arg1: %s\n", arg1_str); + goto error; + } + arg1 = (uint8_t)value; + } + + arg2_str = strsep(&cmd_buffer, " "); + if (arg2_str) { + status = kstrtoul(arg2_str, 0, &value); + if (status) { + dev_err(dev, "Invalid value for arg2: %s\n", arg2_str); + goto error; + } + arg2 = (uint8_t)value; + } + + arg3_str = strsep(&cmd_buffer, " "); + if (arg3_str) { + status = kstrtou16(arg3_str, 0, &arg3); + if (status) { + dev_err(dev, "Invalid value for arg3: %s\n", arg3_str); + goto error; + } + } + + if (strcmp(cmd, "dbg") == 0) { + if (!arg1_str || !arg2_str) { + dev_err(dev, "debug command requires two arguments\n"); + goto error; + } + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG); + if (arg1 == 1) { + if (arg2 == 1) { + priv->alive_enabled = true; + reg |= BIT(arg1); + } else if (arg2 == 0) { + priv->alive_enabled = false; + reg &= ~BIT(arg1); + } else { + dev_err(dev, "arg2 should be 0 or 1 for dbg command\n"); + goto error; + } + } else if (arg1 == 0) { + if (arg2 == 1) { + priv->debug_enabled = true; + reg |= BIT(arg1); + } else if (arg2 == 0) { + priv->debug_enabled = false; + reg &= ~BIT(arg1); + } else { + dev_err(dev, "arg2 should be 0 or 1 for dbg command\n"); + goto error; + } + } else { + dev_err(dev, "arg1 should be 0 or 1 for dbg command\n"); + goto error; + } + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_DEBUG, reg); + } else if (strcmp(cmd, "get") == 0) { + if (!arg1_str || !arg2_str) { + dev_err(dev, "get command requires two arguments\n"); + goto error; + } + phyt_get_one_reg(priv, arg1, arg2); + } else if (strcmp(cmd, "set") == 0) { + if (!arg1_str || !arg2_str || !arg3_str) { + dev_err(dev, "set command requires three arguments\n"); + goto error; + } + phyt_set_one_reg(priv, arg1, arg2, arg3); + } else if (strcmp(cmd, "dump") == 0) { + if (!arg1_str) { + dev_err(dev, "dump command requires one argument\n"); + goto error; + } + phyt_pm_cmd(priv, PHYTCODEC_MSG_CMD_GET_ALL_REGS); + } else if (strcmp(cmd, "help") == 0) { + dev_info(dev, "Available commands:\n" + "dump all regs: echo \"dump\" > debug\n" + "dbg: echo \"dbg 0 1\" > debug\n" + "heartbeat: echo \"dbg 1 1\" > debug\n" + "read a reg: echo \"get [addr] [reg]\" > debug\n" + "write a reg: echo \"set [addr] [reg] [val]\" > debug\n"); + } else { + dev_err(dev, "Unknown command: %s\n", cmd); + goto error; + } + + kfree(cmd_buffer); + return size; + +error: + kfree(cmd_buffer); + return -EINVAL; +} + +static DEVICE_ATTR_RW(debug); + +static struct attribute *phyt_codec_device_attr[] = { + &dev_attr_debug.attr, + NULL, +}; + +static const struct attribute_group phyt_codec_device_group = { + .attrs = phyt_codec_device_attr, +}; + +static int phyt_get_channels(struct phytium_codec *priv) +{ + struct phytcodec_cmd *msg = priv->sharemem_base; + int ret = 0; + uint8_t channels; + + memset(msg, 0, sizeof(struct phytcodec_cmd)); + ret = phyt_get_cmd(priv, PHYTCODEC_MSG_CMD_GET_CHANNELS); + channels = msg->cmd_para.para[0] * 2; + + return channels; +} + +static int phyt_codec_probe(struct platform_device *pdev) +{ + struct phytium_codec *priv; + struct resource *res; + int ret; + struct device *dev; + + dev = &pdev->dev; + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(dev, "failed to alloc struct phytium_codec\n"); + ret = -ENOMEM; + goto failed_alloc_phytium_codec; + } + + dev_set_drvdata(dev, priv); + priv->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regfile_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->regfile_base)) { + dev_err(&pdev->dev, "failed to ioremap resource0\n"); + ret = PTR_ERR(priv->regfile_base); + goto failed_ioremap_res0; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->sharemem_base = devm_ioremap_wc(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(priv->sharemem_base)) { + dev_err(&pdev->dev, "failed to ioremap resource1\n"); + ret = PTR_ERR(priv->sharemem_base); + goto failed_ioremap_res1; + } + + priv->regmap = devm_regmap_init_mmio(dev, priv->regfile_base, + &phyt_codec_regmap_config); + if (IS_ERR(priv->regmap)) { + dev_err(dev, "failed to init regmap\n"); + ret = PTR_ERR(priv->regmap); + goto failed_regmap_init; + } + + phyt_disable_debug(priv); + phyt_disable_alive(priv); + priv->debug_enabled = false; + priv->alive_enabled = false; + priv->heartbeat = phyt_heartbeat; + priv->timer.expires = jiffies + msecs_to_jiffies(10000); + timer_setup(&priv->timer, phyt_timer_handle, 0); + add_timer(&priv->timer); + + if (sysfs_create_group(&pdev->dev.kobj, &phyt_codec_device_group)) + dev_warn(dev, "failed to create sysfs\n"); + + phyt_dai.playback.channels_max = phyt_get_channels(priv); + phyt_dai.capture.channels_max = phyt_dai.playback.channels_max; + + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_INT_MASK, 0x0); + phyt_writel_reg(priv->regfile_base, PHYTIUM_CODEC_INT_ENABLE, 0x1); + + ret = devm_snd_soc_register_component(dev, &phyt_component_driver, + &phyt_dai, 1); + if (ret != 0) { + dev_err(dev, "not able to register codec dai\n"); + goto failed_register_com; + } + + return 0; +failed_register_com: +failed_regmap_init: +failed_ioremap_res1: +failed_ioremap_res0: +failed_alloc_phytium_codec: + return ret; +} + +static int phyt_codec_remove(struct platform_device *pdev) +{ + struct phytium_codec *priv = dev_get_drvdata(&pdev->dev); + + sysfs_remove_group(&pdev->dev.kobj, &phyt_codec_device_group); + del_timer(&priv->timer); + return 0; +} + +static const struct of_device_id phyt_codec_of_match[] = { + { .compatible = "phytium,codec-2.0", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phyt_codec_of_match); + +static const struct acpi_device_id phyt_codec_acpi_match[] = { + { "PHYT1002", 0}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phyt_codec_acpi_match); + +static struct platform_driver phyt_codec_driver = { + .probe = phyt_codec_probe, + .remove = phyt_codec_remove, + .driver = { + .name = "phytium-codec-v2", + .of_match_table = of_match_ptr(phyt_codec_of_match), + .acpi_match_table = phyt_codec_acpi_match, + }, +}; + +module_platform_driver(phyt_codec_driver); +MODULE_DESCRIPTION("Phytium CODEC V2 Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_VERSION(PHYT_CODEC_V2_VERSION); diff --git a/sound/soc/codecs/phytium-codec-v2.h b/sound/soc/codecs/phytium-codec-v2.h new file mode 100644 index 0000000000000000000000000000000000000000..b699c7276e91f4a3077d1b6a61f0318328c95877 --- /dev/null +++ b/sound/soc/codecs/phytium-codec-v2.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Phytium CODEC ASoC driver + * + * Copyright (C) 2024, Phytium Technology Co., Ltd. + * + */ + +#ifndef __PHYTIUM_CODEC_V2_H +#define __PHYTIUM_CODEC_V2_H + +#include +#include + +/****************register *****************/ +#define PHYTIUM_CODEC_AP2RV_INT_MASK 0x20 +#define PHYTIUM_CODEC_AP2RV_INT_STATE 0x24 + #define SEND_INTR (1 << 4) + +#define PHYTIUM_CODEC_DEBUG 0x58 + #define PHYTIUM_CODEC_DEBUG_ENABLE (1 << 0) + #define PHYTIUM_CODEC_ALIVE_ENABLE (1 << 1) + #define PHYTIUM_CODEC_HEARTBIT_VAL (1 << 2) + +#define PHYTIUM_CODEC_PLAYBACKE_VOL 0X500 +#define PHYTIUM_CODEC_PLAYBACKE_OUT1_VOL 0X504 +#define PHYTIUM_CODEC_PLAYBACKE_OUT2_VOL 0X508 +#define PHYTIUM_CODEC_CAPTURE_VOL 0X50c +#define PHYTIUM_CODEC_CAPTURE_IN1_VOL 0X510 +#define PHYTIUM_CODEC_HW_PARAM 0X514 +#define PHYTIUM_CODEC_HW_PARAM_RC 0X518 +#define PHYTIUM_CODEC_INMUX_ENABLE 0X51c +#define PHYTIUM_CODEC_INMUX_SEL 0X520 +#define PHYTIUM_CODEC_ADC_ENABLE 0X524 +#define PHYTIUM_CODEC_DAC_ENABLE 0x528 +#define PHYTIUM_CODEC_INT_STATUS 0x560 +#define PHYTIUM_CODEC_INT_MASK 0x564 +#define PHYTIUM_CODEC_INT_ENABLE 0x590 + #define PIPE_NUM 11 + +#define REG_MAX 0x52c +#define REG_SH_LEN 52 + +/****************register end *****************/ + +#define PHYTIUM_CODEC_LSD_ID 0x701 + +enum phytcodec_msg_cmd_id { + PHYTCODEC_MSG_CMD_DEFAULT = 0, + PHYTCODEC_MSG_CMD_SET, + PHYTCODEC_MSG_CMD_GET, + PHYTCODEC_MSG_CMD_DATA, + PHYTCODEC_MSG_CMD_REPORT, + PHYTCODEC_MSG_PROTOCOL = 0x10, +}; + +enum phytcodec_get_subid { + PHYTCODEC_MSG_CMD_GET_CHANNELS = 0, + PHYTCODEC_MSG_CMD_GET_ONE_REG, + PHYTCODEC_MSG_CMD_GET_ALL_REGS, +}; + +enum phytcodec_set_subid { + PHYTCODEC_MSG_CMD_SET_PROBE = 0, + PHYTCODEC_MSG_CMD_SET_REMOVE, + PHYTCODEC_MSG_CMD_SET_SUSPEND, + PHYTCODEC_MSG_CMD_SET_RESUME, + PHYTCODEC_MSG_CMD_SET_STARTUP, + PHYTCODEC_MSG_CMD_SET_STARTUP_RC, + PHYTCODEC_MSG_CMD_SET_MUTE, + PHYTCODEC_MSG_CMD_SET_UNMUTE, + PHYTCODEC_MSG_CMD_SET_DAI_FMT, + PHYTCODEC_MSG_CMD_SET_BIAS_ON, + PHYTCODEC_MSG_CMD_SET_BIAS_OFF, + PHYTCODEC_MSG_CMD_SET_BIAS_PREPARE, + PHYTCODEC_MSG_CMD_SET_BIAS_STANDBY, + PHYTCODEC_MSG_CMD_SET_SHUTDOWN, + PHYTCODEC_MSG_CMD_SET_SHUTDOWN_RC, + PHYTCODEC_MSG_CMD_SET_ONE_REG, +}; + +enum phytcodec_complete { + PHYTCODEC_COMPLETE_NOT_READY = 0, + PHYTCODEC_COMPLETE_SUCCESS, + PHYTCODEC_COMPLETE_GOING, + PHYTCODEC_COMPLETE_GENERIC_ERROR = 0x10, + PHYTCODEC_COMPLETE_TYPE_NOT_SUPPORTED, + PHYTCODEC_COMPLETE_CMD_NOT_SUPPORTED, + PHYTCODEC_COMPLETE_INVALID_PARAMETERS, +}; + +struct phytcodec_rw_data { + uint8_t addr; + uint8_t reg; + uint16_t val; +}; + +struct phytcodec_reg { + uint16_t total_regs_len; + uint8_t one_reg_len; + uint8_t cnt; + uint8_t regs[52]; +}; + +struct phytcodec_cmd { + uint8_t reserved; + uint8_t seq; + uint8_t cmd_id; + uint8_t cmd_subid; + uint16_t len; + uint8_t status; + uint8_t complete; + union { + uint8_t para[56]; + struct phytcodec_reg phytcodec_reg; + struct phytcodec_rw_data rw_data; + } cmd_para; +}; + +struct phytium_codec { + void __iomem *regfile_base; + void __iomem *sharemem_base; + struct device *dev; + uint8_t *regs; + uint8_t channels; + + struct regmap *regmap; + struct completion comp; + bool deemph; + + bool debug_enabled; + bool alive_enabled; + struct timer_list timer; + void (*heartbeat)(struct phytium_codec *priv); +}; + +static inline unsigned int +phyt_readl_reg(void *base, uint32_t offset) +{ + unsigned int data; + + data = readl(base + offset); + + return data; +} + +static inline void +phyt_writel_reg(void *base, uint32_t offset, uint32_t data) +{ + writel(data, base + offset); +} +#endif diff --git a/sound/soc/phytium/Kconfig b/sound/soc/phytium/Kconfig index b4fd9ae0b654bd337194dd15e02663060dfdf6a6..c567699bcb568e4500cdf97cb53d9c9671ff1a43 100644 --- a/sound/soc/phytium/Kconfig +++ b/sound/soc/phytium/Kconfig @@ -1,4 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +comment "Phytium I2S Driver and Phytium I2S V2 Driver are generally not used together" + depends on SND_SOC_PHYTIUM_I2S && SND_SOC_PHYTIUM_I2S_V2 + config SND_SOC_PHYTIUM_I2S tristate "Phytium I2S Device Driver" depends on ARCH_PHYTIUM @@ -23,10 +26,25 @@ config SND_PMDK_ES8336 Say Y if you want to add Phytium machine support for ES8336 codecs. +config SND_SOC_PHYTIUM_I2S_V2 + tristate "Phytium I2S V2 Device Driver" + help + Say Y or M if you want to add support for I2S v2 driver for + Phytium I2S device . The device supports 2 channels each + for play and record. + +config SND_SOC_PHYTIUM_MACHINE_V2 + tristate "Phytium Machine V2 Driver" + depends on SND_SOC_PHYTIUM_I2S_V2 + help + Say Y or M if you want to add Phytium machine v2 support for + codecs. + config SND_PMDK_DP tristate "Phytium machine support with DP" - depends on I2C && SND_SOC_PHYTIUM_I2S + depends on SND_SOC_PHYTIUM_I2S || SND_SOC_PHYTIUM_I2S_V2 select SND_SOC_HDMI_CODEC help Say Y if you want to add Phytium machine support for Displayport. + diff --git a/sound/soc/phytium/Makefile b/sound/soc/phytium/Makefile index 45cd2ed9d420cce7c1e2cd52a617e7cc4e6c0175..b3e38c874da6629f9d1d37aec884521d0c8ca071 100644 --- a/sound/soc/phytium/Makefile +++ b/sound/soc/phytium/Makefile @@ -12,3 +12,9 @@ obj-$(CONFIG_SND_PMDK_ES8336) += snd-soc-pmdk-es8336.o snd-soc-pmdk-dp-objs :=pmdk_dp.o obj-$(CONFIG_SND_PMDK_DP) += snd-soc-pmdk-dp.o + +snd-soc-phytium-i2s-v2-objs := phytium-i2s-v2.o +obj-$(CONFIG_SND_SOC_PHYTIUM_I2S_V2) += snd-soc-phytium-i2s-v2.o + +snd-soc-phytium-machine-v2-objs := phytium-machine-v2.o +obj-$(CONFIG_SND_SOC_PHYTIUM_MACHINE_V2) += snd-soc-phytium-machine-v2.o diff --git a/sound/soc/phytium/phytium-i2s-v2.c b/sound/soc/phytium/phytium-i2s-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..1817bf3ce2970e88db8d07292d4f382cb5f15ece --- /dev/null +++ b/sound/soc/phytium/phytium-i2s-v2.c @@ -0,0 +1,1151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium I2S ASoC driver + * + * Copyright (C) 2024, Phytium Technology Co., Ltd. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-i2s-v2.h" + +#define PHYT_I2S_V2_VERSION "1.0.5" + +static struct snd_soc_jack hs_jack; +static irqreturn_t phyt_i2s_gpio_interrupt(int irq, void *dev_id); +/* Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "Front", + .mask = SND_JACK_HEADPHONE, + }, +}; + +static struct snd_pcm_hardware phytium_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_BLOCK_TRANSFER, + .rates = SNDRV_PCM_RATE_8000 | + SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000, + .rate_min = 8000, + .rate_max = 48000, + .formats = (SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE), + .channels_min = 2, + .channels_max = 4, + .buffer_bytes_max = 4096*16, + .period_bytes_min = 1024, + .period_bytes_max = 4096*4, + .periods_min = 2, + .periods_max = 16, +}; + +static int phyt_pcm_new(struct snd_soc_component *component, + struct snd_soc_pcm_runtime *rtd) +{ + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + size_t size = phytium_pcm_hardware.buffer_bytes_max; + int ret = 0; + + snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, + SNDRV_DMA_TYPE_DEV, + priv->dev, size, size); + + if (ret < 0) { + dev_err(priv->dev, "can't alloc preallocate buffer\n"); + goto failed_preallocate_buffer; + } + + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, priv->dev, BDL_SIZE, + &priv->pcm_config[0].bdl_dmab); + if (ret < 0) { + dev_err(priv->dev, "can't alloc bdl0 buffer\n"); + goto failed_alloc_bdl0; + } + + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, priv->dev, BDL_SIZE, + &priv->pcm_config[1].bdl_dmab); + if (ret < 0) { + dev_err(priv->dev, "can't alloc bdl1 buffer\n"); + goto failed_alloc_bdl1; + } + return 0; + +failed_alloc_bdl1: + snd_dma_free_pages(&priv->pcm_config[0].bdl_dmab); +failed_alloc_bdl0: + snd_pcm_lib_preallocate_free_for_all(rtd->pcm); +failed_preallocate_buffer: + return ret; +} + +static void phyt_pcm_free(struct snd_soc_component *component, + struct snd_pcm *pcm) +{ + struct snd_soc_pcm_runtime *rtd = pcm->private_data; + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + + snd_dma_free_pages(&priv->pcm_config[1].bdl_dmab); + snd_dma_free_pages(&priv->pcm_config[0].bdl_dmab); + snd_pcm_lib_preallocate_free_for_all(pcm); +} + +static int phyt_pcm_open(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + struct snd_pcm_runtime *runtime = substream->runtime; + + if (!priv->i2s_dp) + phytium_pcm_hardware.formats |= SNDRV_PCM_FMTBIT_S32_LE; + snd_soc_set_runtime_hwparams(substream, &phytium_pcm_hardware); + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128); + runtime->private_data = priv; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + priv->substream_playback = substream; + else + priv->substream_capture = substream; + + return 0; +} + +static int phyt_pcm_close(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + priv->substream_playback = NULL; + else + priv->substream_capture = NULL; + + return 0; +} + +static int phyt_pcm_hw_params(struct snd_soc_component *component, + struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + int ret; + uint32_t format_val; + + switch (params_format(hw_params)) { + case SNDRV_PCM_FORMAT_S16_LE: + format_val = BYTE_2; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + format_val = BYTE_4; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + format_val = BYTE_4; + break; + + default: + dev_err(priv->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + priv->pcm_config[DIRECTION_PLAYBACK].format_val = format_val; + else + priv->pcm_config[DIRECTION_CAPTURE].format_val = format_val; + + ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); + if (ret < 0) + return ret; + + return 0; +} + +static void phyt_bdl_entry_setup(dma_addr_t addr, uint32_t **pbdl, + uint32_t period_bytes, uint32_t with_ioc) +{ + uint32_t *bdl = *pbdl; + uint32_t chunk; + + while (period_bytes > 0) { + bdl[0] = cpu_to_le32((u32)addr); + bdl[1] = cpu_to_le32(upper_32_bits(addr)); + if (period_bytes > BDL_BUFFER_MAX_SIZE) + chunk = BDL_BUFFER_MAX_SIZE; + else + chunk = period_bytes; + + bdl[2] = cpu_to_le32(chunk); + period_bytes -= chunk; + bdl[3] = (period_bytes || !with_ioc) ? 0 : cpu_to_le32(0x01); + bdl += 4; + } + + *pbdl = bdl; +} + + +static int phyt_pcm_prepare(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + struct snd_pcm_runtime *runtime = substream->runtime; + struct phytium_pcm_config *pcm_config; + int periods = 0, i, offset = 0; + uint32_t *bdl = NULL; + struct snd_dma_buffer *data_dmab = NULL; + int direction; + uint32_t frags = 0; + uint32_t config = 0, format_val; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + priv->pcm_config[DIRECTION_PLAYBACK].buffer_size = + snd_pcm_lib_buffer_bytes(substream); + priv->pcm_config[DIRECTION_PLAYBACK].period_bytes = + snd_pcm_lib_period_bytes(substream); + priv->pcm_config[DIRECTION_PLAYBACK].no_period_wakeup = + runtime->no_period_wakeup; + direction = DIRECTION_PLAYBACK; + format_val = priv->pcm_config[DIRECTION_PLAYBACK].format_val; + } else { + priv->pcm_config[DIRECTION_CAPTURE].buffer_size = + snd_pcm_lib_buffer_bytes(substream); + priv->pcm_config[DIRECTION_CAPTURE].period_bytes = + snd_pcm_lib_period_bytes(substream); + priv->pcm_config[DIRECTION_CAPTURE].no_period_wakeup = + runtime->no_period_wakeup; + direction = DIRECTION_CAPTURE; + format_val = priv->pcm_config[DIRECTION_CAPTURE].format_val; + } + + pcm_config = &priv->pcm_config[direction]; + + periods = pcm_config->buffer_size / pcm_config->period_bytes; + bdl = (uint32_t *)pcm_config->bdl_dmab.area; + data_dmab = snd_pcm_get_dma_buf(substream); + for (i = 0; i < periods; i++) { + phyt_bdl_entry_setup(data_dmab->addr + offset, &bdl, + pcm_config->period_bytes, + !pcm_config->no_period_wakeup); + offset += pcm_config->period_bytes; + frags += DIV_ROUND_UP(pcm_config->period_bytes, BDL_BUFFER_MAX_SIZE); + } + + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHAL_CONFG0, CHANNEL_0_1_ENABLE); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_MASK_INT, CHANNEL_0_1_INT_MASK); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_BDLPL(direction), + (uint32_t)(pcm_config->bdl_dmab.addr)); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_BDLPU(direction), + upper_32_bits(pcm_config->bdl_dmab.addr)); + if (direction == DIRECTION_PLAYBACK) + config = PLAYBACK_ADDRESS_OFFSET; + else + config = CAPTRUE_ADDRESS_OFFSET; + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_DEV_ADDR(direction), + E2000_LSD_I2S_BASE + config); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_LVI(direction), frags - 1); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_CBL(direction), + pcm_config->buffer_size); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_DSIZE(direction), + D_SIZE(format_val, direction)); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_DLENTH(direction), D_LENTH); + + return 0; +} + +static int phyt_pcm_hw_free(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + int direction; + u32 mask; + int cnt = 10; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + direction = DIRECTION_PLAYBACK; + else + direction = DIRECTION_CAPTURE; + + mask = readl(priv->dma_reg_base + PHYTIUM_DMA_MASK_INT); + mask &= ~BIT(direction); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_MASK_INT, mask); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_CTL(direction), 0); + while (cnt--) { + if (readl(priv->dma_reg_base + PHYTIUM_DMA_CHALX_CTL(direction)) == 0) + break; + udelay(200); + } + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_CTL(direction), 2); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_CTL(direction), 0); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_STS, DMA_TX_DONE); + else + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_STS, DMA_RX_DONE); + + return snd_pcm_lib_free_pages(substream); +} + +static int phyt_pcm_trigger(struct snd_soc_component *component, + struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + bool start = false; + int direction = 0, value = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + start = true; + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + start = false; + break; + default: + return -EINVAL; + } + + direction = ((substream->stream == + SNDRV_PCM_STREAM_PLAYBACK) ? DIRECTION_PLAYBACK:DIRECTION_CAPTURE); + + if (start) + value = (substream->stream == + SNDRV_PCM_STREAM_PLAYBACK) ? PLAYBACK_START:CAPTURE_START; + else + value = CTL_STOP; + + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CHALX_CTL(direction), value); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_CTL, DMA_ENABLE); + + return 0; +} + +static snd_pcm_uframes_t phyt_pcm_pointer(struct snd_soc_component *component, + struct snd_pcm_substream *substream) +{ + struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); + + uint32_t pos = 0; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + pos = readl(priv->dma_reg_base + PHYTIUM_DMA_LPIB(DIRECTION_PLAYBACK)); + else + pos = readl(priv->dma_reg_base + PHYTIUM_DMA_LPIB(DIRECTION_CAPTURE)); + + return bytes_to_frames(substream->runtime, pos); +} + +int phyt_i2s_msg_set_cmd(struct phytium_i2s *priv, bool is_gpio) +{ + struct phyti2s_cmd *ans_msg; + int timeout = 40, ret = 0; + + if (is_gpio) { + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_AP2RV_INT_STATE, + SEND_GPIO_INTR); + ans_msg = priv->sharemem_base + PHYTIUM_GPIO_OFFSET; + } else { + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_AP2RV_INT_STATE, SEND_INTR); + ans_msg = priv->sharemem_base; + } + + while ((ans_msg->complete == PHYTI2S_COMPLETE_NONE + || ans_msg->complete == PHYTI2S_COMPLETE_GOING) + && timeout) { + udelay(500); + timeout--; + } + + if (timeout == 0) { + dev_err(priv->dev, "wait cmd reply timeout\n"); + ret = -EBUSY; + } + + if (ans_msg->complete == PHYTI2S_COMPLETE_ERROR) { + dev_err(priv->dev, "handle cmd failed\n"); + ret = -EINVAL; + } + + if (ans_msg->complete == PHYTI2S_COMPLETE_ID_NOT_SUPPORTED) { + dev_err(priv->dev, "cmd not support!\n"); + ret = -EINVAL; + } + + if (ans_msg->complete == PHYTI2S_COMPLETE_SUBID_NOT_SUPPORTED) { + dev_err(priv->dev, "cmd subid not support!\n"); + ret = -EINVAL; + } + + if (ans_msg->complete == PHYTI2S_COMPLETE_INVALID_PARAMETERS) { + dev_err(priv->dev, "cmd params not support!\n"); + ret = -EINVAL; + } + + return ret; +} + +static int phyt_i2s_enable_gpio(struct phytium_i2s *priv) +{ + struct phyti2s_cmd *msg = priv->sharemem_base + PHYTIUM_GPIO_OFFSET; + struct gpio_i2s_data *data = &msg->cmd_para.gpio_i2s_data; + int ret = 0; + + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_GPIO; + msg->complete = 0; + data->enable = 1; + ret = phyt_i2s_msg_set_cmd(priv, true); + if (ret) + dev_err(priv->dev, "PHYTI2S_MSG_CMD_SET_GPIO enable failed: %d\n", ret); + + return ret; +} + +static int phyt_i2s_disable_gpioint(struct phytium_i2s *priv) +{ + struct phyti2s_cmd *msg = priv->sharemem_base + PHYTIUM_GPIO_OFFSET; + struct gpio_i2s_data *data = &msg->cmd_para.gpio_i2s_data; + int ret = 0; + + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_GPIO; + msg->complete = 0; + data->enable = 0; + ret = phyt_i2s_msg_set_cmd(priv, true); + if (ret) + dev_err(priv->dev, "PHYTIUM_MSG_CMD_SET_GPIO disable failed: %d\n", ret); + + return ret; +} + +static int phyt_pcm_suspend(struct snd_soc_component *component) +{ + return 0; +} + +static int phyt_pcm_resume(struct snd_soc_component *component) +{ + struct phytium_i2s *priv = snd_soc_component_get_drvdata(component); + struct snd_soc_dai *dai; + struct phyti2s_cmd *msg = priv->sharemem_base; + struct set_mode_data *data = &msg->cmd_para.set_mode_data; + int ret = 0; + + for_each_component_dais(component, dai) { + if (snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_PLAYBACK)) { + data->direction = DIRECTION_PLAYBACK; + data->data_width = priv->data_width; + data->sample_rate = priv->sample_rate; + data->chan_nr = priv->chan_nr; + data->clk_base = priv->clk_base; + data->enable = 1; + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_MODE; + msg->complete = 0; + ret = phyt_i2s_msg_set_cmd(priv, false); + if (ret) { + dev_err(priv->dev, "phytium-i2s: resume failed: %d\n", ret); + ret = -EINVAL; + goto error; + } + } + if (snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_CAPTURE)) { + data->direction = DIRECTION_CAPTURE; + data->data_width = priv->data_width; + data->sample_rate = priv->sample_rate; + data->chan_nr = priv->chan_nr; + data->clk_base = priv->clk_base; + data->enable = 1; + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_MODE; + msg->complete = 0; + ret = phyt_i2s_msg_set_cmd(priv, false); + if (ret) { + dev_err(priv->dev, "phytium-i2s: resume failed: %d\n", ret); + ret = -EINVAL; + goto error; + } + } + } + if (priv->insert >= 0) + phyt_i2s_enable_gpio(priv); +error: + return ret; +} + +static int phyt_pcm_component_probe(struct snd_soc_component *component) +{ + struct phytium_i2s *priv = snd_soc_component_get_drvdata(component); + struct snd_soc_card *card = component->card; + int ret; + + if (priv->insert < 0) + return 0; + + ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET, + &hs_jack, hs_jack_pins, + ARRAY_SIZE(hs_jack_pins)); + if (ret < 0) { + dev_err(component->dev, "Cannot create jack\n"); + return ret; + } + ret = phyt_i2s_enable_gpio(priv); + if (ret < 0) { + dev_err(component->dev, "failed to enable gpio\n"); + return ret; + } + phyt_i2s_gpio_interrupt(priv->gpio_irq, priv); + return 0; +} + +static const struct snd_soc_component_driver phytium_i2s_component = { + .name = "phytium-i2s", + .pcm_construct = phyt_pcm_new, + .pcm_destruct = phyt_pcm_free, + .suspend = phyt_pcm_suspend, + .resume = phyt_pcm_resume, + .probe = phyt_pcm_component_probe, + .open = phyt_pcm_open, + .close = phyt_pcm_close, + .hw_params = phyt_pcm_hw_params, + .prepare = phyt_pcm_prepare, + .hw_free = phyt_pcm_hw_free, + .trigger = phyt_pcm_trigger, + .pointer = phyt_pcm_pointer, + .legacy_dai_naming = 1, +}; + +static int phyt_i2s_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) +{ + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(dai); + struct phyti2s_cmd *msg = priv->sharemem_base; + struct set_mode_data *data = &msg->cmd_para.set_mode_data; + int ret = 0; + + memset(msg, 0, sizeof(struct phyti2s_cmd)); + + data->direction = ((substream->stream == + SNDRV_PCM_STREAM_PLAYBACK) ? DIRECTION_PLAYBACK:DIRECTION_CAPTURE); + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + data->data_width = 16; + break; + case SNDRV_PCM_FORMAT_S24_LE: + data->data_width = 24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + data->data_width = 32; + break; + default: + dev_err(priv->dev, "phytium-i2s: unsupported dai fmt"); + ret = -EINVAL; + goto error; + } + + data->sample_rate = params_rate(params); + data->chan_nr = params_channels(params); + data->enable = 1; + data->clk_base = priv->clk_base; + priv->data_width = data->data_width; + priv->sample_rate = data->sample_rate; + priv->chan_nr = data->chan_nr; + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_MODE; + msg->complete = 0; + ret = phyt_i2s_msg_set_cmd(priv, false); + if (ret) { + dev_err(priv->dev, "phytium-i2s: PHYTI2S_MSG_CMD_SET_MODE failed: %d\n", ret); + ret = -EINVAL; + } + +error: + return ret; +} + +static int phyt_i2s_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) +{ + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(dai); + bool start = false; + int ret = 0, cfg = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + priv->running += 1; + start = true; + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + priv->running -= 1; + start = false; + break; + default: + ret = -EINVAL; + break; + } + + if (!start && priv->running) + goto error; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + cfg = start ? TX_EN : TX_DIS; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_ITER, cfg); + } else { + cfg = start ? RX_EN : RX_DIS; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_IRER, cfg); + } + +error: + return ret; +} + +static int phyt_i2s_hw_free(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct phytium_i2s *priv = snd_soc_dai_get_drvdata(dai); + struct phyti2s_cmd *msg = priv->sharemem_base; + struct set_mode_data *data = &msg->cmd_para.set_mode_data; + int ret = 0; + + if (priv->running > 0) + return 0; + + memset(msg, 0, sizeof(struct phyti2s_cmd)); + + data->direction = ((substream->stream == + SNDRV_PCM_STREAM_PLAYBACK) ? DIRECTION_PLAYBACK:DIRECTION_CAPTURE); + data->enable = 0; + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_MODE; + msg->complete = 0; + ret = phyt_i2s_msg_set_cmd(priv, false); + if (ret) { + dev_err(priv->dev, "phytium-i2s: SET_MODE disable failed: %d\n", ret); + ret = -EINVAL; + } + + return ret; +} + +static const struct snd_soc_dai_ops phytium_i2s_dai_ops = { + .hw_params = phyt_i2s_hw_params, + .trigger = phyt_i2s_trigger, + .hw_free = phyt_i2s_hw_free, +}; + +static void phyt_i2s_gpio_jack_work(struct work_struct *work) +{ + struct phytium_i2s *priv = container_of(work, struct phytium_i2s, + phyt_i2s_gpio_work.work); + struct phyti2s_cmd *msg = priv->sharemem_base + PHYTIUM_GPIO_OFFSET; + struct gpio_i2s_data *data; + int ret = 0; + u32 unplug = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_HPDET); + + if (IS_ERR_OR_NULL(hs_jack.card->snd_card)) { + dev_warn(priv->dev, "sound card %s not binded", + hs_jack.card->name); + return; + } + + if (unplug & 0x1) { + if (hs_jack.jack) { + snd_soc_jack_report(&hs_jack, HEADPHONE_DISABLE, SND_JACK_HEADSET); + priv->insert = 0; + } + } else { + if (hs_jack.jack) { + snd_soc_jack_report(&hs_jack, HEADPHONE_ENABLE, SND_JACK_HEADSET); + priv->insert = 1; + } + } + + data = &msg->cmd_para.gpio_i2s_data; + + msg->id = PHYTIUM_I2S_LSD_ID; + msg->cmd_id = PHYTI2S_MSG_CMD_SET; + msg->cmd_subid = PHYTI2S_MSG_CMD_SET_GPIO; + msg->complete = 0; + data->enable = -1; + data->insert = priv->insert; + ret = phyt_i2s_msg_set_cmd(priv, true); + if (ret) + dev_err(priv->dev, "PHYTI2S_MSG_CMD_SET_GPIO report jack failed: %d\n", ret); +} + +static irqreturn_t phyt_i2s_gpio_interrupt(int irq, void *dev_id) +{ + struct phytium_i2s *priv = dev_id; + + queue_delayed_work(system_power_efficient_wq, &priv->phyt_i2s_gpio_work, + msecs_to_jiffies(100)); + + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_GPIO_PORTA_EOI, BIT(0)); + return IRQ_HANDLED; +} + +static irqreturn_t phyt_i2s_interrupt(int irq, void *dev_id) +{ + struct phytium_i2s *priv = dev_id; + uint32_t status; + int ret = IRQ_NONE; + + status = readl(priv->dma_reg_base + PHYTIUM_DMA_STS); + + if (status & DMA_TX_DONE) { + snd_pcm_period_elapsed(priv->substream_playback); + writel(DMA_TX_DONE, priv->dma_reg_base + PHYTIUM_DMA_STS); + ret = IRQ_HANDLED; + } + + if (status & DMA_RX_DONE) { + snd_pcm_period_elapsed(priv->substream_capture); + writel(DMA_RX_DONE, priv->dma_reg_base + PHYTIUM_DMA_STS); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int phyt_configure_dai_driver(struct phytium_i2s *priv, + struct snd_soc_dai_driver *dai_driver) +{ + dai_driver->playback.stream_name = "i2s-Playback"; + dai_driver->playback.channels_min = MIN_CHANNEL_NUM; + dai_driver->playback.channels_max = 4; + dai_driver->playback.rates = SNDRV_PCM_RATE_8000_192000; + dai_driver->playback.formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE; + + dai_driver->symmetric_rate = 1; + dai_driver->ops = &phytium_i2s_dai_ops; + if (priv->i2s_dp) + return 0; + + dai_driver->playback.formats |= SNDRV_PCM_FMTBIT_S32_LE; + dai_driver->capture.stream_name = "i2s-Capture"; + dai_driver->capture.channels_min = 2; + dai_driver->capture.channels_max = 4; + dai_driver->capture.rates = SNDRV_PCM_RATE_8000_192000; + dai_driver->capture.formats = SNDRV_PCM_FMTBIT_S8 | + SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S20_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S32_LE; + + return 0; +} + +static void phyt_i2s_heartbeat(struct phytium_i2s *priv) +{ + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, + debug_reg | HEARTBEAT); +} + +static void phyt_i2s_enable_heartbeat(struct phytium_i2s *priv) +{ + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + priv->heart_enable = true; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, + debug_reg | HEART_ENABLE); +} + +static void phyt_i2s_disable_heartbeat(struct phytium_i2s *priv) +{ + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + priv->heart_enable = false; + debug_reg &= ~HEART_ENABLE; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, debug_reg); +} + +static void phyt_i2s_enable_debug(struct phytium_i2s *priv) +{ + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, + debug_reg | DEBUG_ENABLE); +} + +static void phyt_i2s_disable_debug(struct phytium_i2s *priv) +{ + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + debug_reg &= ~DEBUG_ENABLE; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, debug_reg); +} + +static void phyt_i2s_timer_handler(struct timer_list *timer) +{ + struct phytium_i2s *priv = from_timer(priv, timer, timer); + + if (priv->heart_enable) + phyt_i2s_heartbeat(priv); + + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(2000)); +} + +void phyt_i2s_show_log(struct phytium_i2s *priv) +{ + u32 reg, len; + u8 *plog; + int i; + + if (!priv->log_addr) + return; + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + plog = priv->log_addr; + if (reg & LOG_MASK) { + len = strnlen((char *)priv->log_addr, LOG_SIZE_MAX); + dev_info(priv->dev, "log len :%d, addr:0x%llx, size:%d\n", len, + (u64)priv->log_addr, priv->log_size); + if (len > LOG_LINE_MAX_LEN) { + for (i = 0; i < len; i += LOG_LINE_MAX_LEN) + dev_info(priv->dev, "(DEV)%.*s\n", LOG_LINE_MAX_LEN, &plog[i]); + } else { + dev_info(priv->dev, "(DDR)%.*s\n", LOG_LINE_MAX_LEN, &plog[i]); + } + for (i = 0; i < priv->log_size; i++) + plog[i] = 0; + } + reg &= ~LOG_MASK; + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG, reg); +} + +int phyt_i2s_alloc_log_mem(struct phytium_i2s *priv) +{ + u32 reg; + u64 phy_addr; + + reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + phy_addr = ((reg & ADDR_MASK) >> ADDR_LOW_SHIFT) << ADDR_SHIFT; + + priv->log_size = ((reg & LOG_SIZE_MASK) >> LOG_SIZE_LOW_SHIFT) * 1024; + priv->log_addr = devm_ioremap_wc(priv->dev, phy_addr, priv->log_size); + if (IS_ERR(priv->log_addr)) { + dev_err(priv->dev, "log_addr alloc failed\n"); + return -ENOMEM; + } + return 0; +} + +static ssize_t phyt_i2s_debug_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct phytium_i2s *priv = dev_get_drvdata(dev); + u32 debug_reg = phyt_readl_reg(priv->regfile_base, PHYTIUM_REGFILE_DEBUG); + + return sprintf(buf, "%x\n", debug_reg); +} + +static ssize_t phyt_i2s_debug_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct phytium_i2s *priv = dev_get_drvdata(dev); + char *p; + char *token; + u8 loc, dis_en; + int ret; + long value; + + dev_info(dev, "echo debug(1)/alive(0) enable(1)/disable(0) > debug\n"); + + p = kmalloc(size, GFP_KERNEL); + if (p == NULL) + return -EINVAL; + strscpy(p, buf, sizeof(p)); + + token = strsep(&p, " "); + if (!token) { + ret = -EINVAL; + goto error; + } + + ret = kstrtol(token, 0, &value); + if (ret) + goto error; + loc = (u8)value; + + token = strsep(&p, " "); + if (!token) { + ret = -EINVAL; + goto error; + } + + ret = kstrtol(token, 0, &value); + if (ret) + goto error; + dis_en = value; + + if (loc == 1) { + if (dis_en) { + phyt_i2s_enable_debug(priv); + phyt_i2s_show_log(priv); + } else { + phyt_i2s_disable_debug(priv); + } + } else if (loc == 0) { + if (dis_en) + phyt_i2s_enable_heartbeat(priv); + else + phyt_i2s_disable_heartbeat(priv); + } + + kfree(p); + return size; +error: + kfree(p); + return ret; +} + +static DEVICE_ATTR_RW(phyt_i2s_debug); + +static struct attribute *phyt_i2s_device_attrs[] = { + &dev_attr_phyt_i2s_debug.attr, + NULL, +}; + +static const struct attribute_group phyt_i2s_device_group = { + .attrs = phyt_i2s_device_attrs, +}; + +static int phyt_i2s_probe(struct platform_device *pdev) +{ + struct phytium_i2s *priv; + struct snd_soc_dai_driver *dai_driver; + struct resource *res; + int ret, irq, gpio_irq; + struct clk *clk; + struct fwnode_handle *np; + uint32_t status; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto failed_alloc_phytium_i2s; + } + + dev_set_drvdata(&pdev->dev, priv); + priv->dev = &pdev->dev; + + dai_driver = devm_kzalloc(&pdev->dev, sizeof(*dai_driver), GFP_KERNEL); + if (!dai_driver) { + ret = -ENOMEM; + goto failed_alloc_dai_driver; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regfile_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->regfile_base)) { + dev_err(&pdev->dev, "failed to ioremap resource0\n"); + ret = PTR_ERR(priv->regfile_base); + goto failed_ioremap_res0; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->sharemem_base = devm_ioremap_wc(&pdev->dev, res->start, resource_size(res)); + if (IS_ERR(priv->sharemem_base)) { + dev_err(&pdev->dev, "failed to ioremap resource1\n"); + ret = PTR_ERR(priv->sharemem_base); + goto failed_ioremap_res1; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + priv->dma_reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->dma_reg_base)) { + dev_err(&pdev->dev, "failed to ioremap resource2\n"); + ret = PTR_ERR(priv->dma_reg_base); + goto failed_ioremap_res2; + } + + ret = phyt_i2s_alloc_log_mem(priv); + if (ret != 0) { + dev_err(&pdev->dev, "failed to alloc log mem\n"); + goto failed_alloc_log_mem; + } + + status = readl(priv->dma_reg_base + PHYTIUM_DMA_STS); + if (status & DMA_TX_DONE) + writel(DMA_TX_DONE, priv->dma_reg_base + PHYTIUM_DMA_STS); + if (status & DMA_RX_DONE) + writel(DMA_RX_DONE, priv->dma_reg_base + PHYTIUM_DMA_STS); + phyt_writel_reg(priv->dma_reg_base, PHYTIUM_DMA_MASK_INT, 0x0); + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, phyt_i2s_interrupt, IRQF_SHARED, + pdev->name, priv); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto failed_request_irq; + } + + gpio_irq = platform_get_irq_optional(pdev, 1); + priv->insert = -1; + if (gpio_irq > 0) { + priv->gpio_irq = gpio_irq; + INIT_DELAYED_WORK(&priv->phyt_i2s_gpio_work, phyt_i2s_gpio_jack_work); + phyt_writel_reg(priv->regfile_base, PHYTIUM_REGFILE_GPIO_PORTA_EOI, BIT(0)); + ret = phyt_i2s_disable_gpioint(priv); + if (ret < 0) { + dev_err(&pdev->dev, "failed to disable gpioint\n"); + goto failed_disable_gpioint; + } + priv->insert = 0; + ret = devm_request_irq(&pdev->dev, gpio_irq, phyt_i2s_gpio_interrupt, + IRQF_SHARED, pdev->name, priv); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request gpio irq\n"); + goto failed_request_irq; + } + } + + if (pdev->dev.of_node) { + ret = device_property_read_string(&pdev->dev, "dai-name", &dai_driver->name); + if (ret < 0) { + dev_err(&pdev->dev, "missing dai-name property %d\n", ret); + goto failed_get_dai_name; + } + clk = devm_clk_get(&pdev->dev, NULL); + priv->clk_base = clk_get_rate(clk); + } else if (has_acpi_companion(&pdev->dev)) { + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_string(np, "dai-name", &dai_driver->name); + if (ret < 0) { + dev_err(&pdev->dev, "missing dai-name property from acpi\n"); + goto failed_get_dai_name; + } + ret = fwnode_property_read_u32(np, "i2s_clk", &priv->clk_base); + if (ret < 0) { + dev_err(&pdev->dev, "missing i2s_clk property from acpi\n"); + goto failed_get_dai_name; + } + } + + if (strstr(dai_driver->name, "dp")) + priv->i2s_dp = 1; + else + priv->i2s_dp = 0; + phyt_configure_dai_driver(priv, dai_driver); + + ret = devm_snd_soc_register_component(&pdev->dev, &phytium_i2s_component, + dai_driver, 1); + if (ret != 0) { + dev_err(&pdev->dev, "not able to register dai\n"); + goto failed_register_com; + } + + if (sysfs_create_group(&priv->dev->kobj, &phyt_i2s_device_group)) + dev_warn(&pdev->dev, "failed create sysfs\n"); + + phyt_i2s_disable_heartbeat(priv); + phyt_i2s_disable_debug(priv); + priv->timer.expires = jiffies + msecs_to_jiffies(5000); + timer_setup(&priv->timer, phyt_i2s_timer_handler, 0); + add_timer(&priv->timer); + + return 0; +failed_register_com: +failed_get_dai_name: +failed_request_irq: +failed_disable_gpioint: +failed_alloc_log_mem: +failed_ioremap_res2: +failed_ioremap_res1: +failed_ioremap_res0: +failed_alloc_dai_driver: +failed_alloc_phytium_i2s: + return ret; +} + +static int phyt_i2s_remove(struct platform_device *pdev) +{ + struct phytium_i2s *priv = dev_get_drvdata(&pdev->dev); + + sysfs_remove_group(&priv->dev->kobj, &phyt_i2s_device_group); + del_timer(&priv->timer); + return 0; +} + +static const struct of_device_id phyt_i2s_of_match[] = { + { .compatible = "phytium,i2s-2.0", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phyt_i2s_of_match); + +static const struct acpi_device_id phyt_i2s_acpi_match[] = { + { "PHYT0062", 0}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, phyt_i2s_acpi_match); + +static struct platform_driver phyt_i2s_driver = { + .probe = phyt_i2s_probe, + .remove = phyt_i2s_remove, + .driver = { + .name = "phytium-i2s-v2", + .of_match_table = of_match_ptr(phyt_i2s_of_match), + .acpi_match_table = phyt_i2s_acpi_match, + }, +}; + +module_platform_driver(phyt_i2s_driver); + +MODULE_DESCRIPTION("Phytium I2S V2 Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_VERSION(PHYT_I2S_V2_VERSION); diff --git a/sound/soc/phytium/phytium-i2s-v2.h b/sound/soc/phytium/phytium-i2s-v2.h new file mode 100644 index 0000000000000000000000000000000000000000..5db90b103cd2a7e22db505449665b2d1bd7fa726 --- /dev/null +++ b/sound/soc/phytium/phytium-i2s-v2.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Phytium I2S ASoC driver + * + * Copyright (C) 2024, Phytium Technology Co., Ltd. + * + */ + +#ifndef __PHYTIUM_VIRT_I2S_H +#define __PHYTIUM_VIRT_I2S_H + +#include +#include + +struct phytium_pcm_config { + uint32_t buffer_size; + uint32_t period_bytes; + uint32_t frags; + uint32_t format_val; + unsigned int no_period_wakeup: 1; + struct snd_dma_buffer bdl_dmab; +}; + +struct phytium_i2s { + void __iomem *regfile_base; + void __iomem *sharemem_base; + void __iomem *dma_reg_base; + struct device *dev; + struct device *parent; + + struct snd_pcm_substream *substream_playback; + struct snd_pcm_substream *substream_capture; + u32 clk_base; + struct phytium_pcm_config pcm_config[2]; + struct delayed_work phyt_i2s_gpio_work; + int running; + struct timer_list timer; + bool heart_enable; + uint32_t chan_nr; + uint32_t data_width; + uint32_t sample_rate; + int insert; + int gpio_irq; + bool i2s_dp; + void __iomem *log_addr; + u32 log_size; +}; + +struct pdata_pd230x_mfd { + struct device *dev; + char *name; + int clk_base; +}; + +#define MAX_CHANNEL_NUM 8 +#define MIN_CHANNEL_NUM 2 + +#define BDL_SIZE 4096 +#define BDL_BUFFER_MAX_SIZE 0x100000 + +#define DIRECTION_PLAYBACK 0 +#define DIRECTION_CAPTURE 1 + +#define HEADPHONE_DISABLE 0 +#define HEADPHONE_ENABLE 1 + +/* *******************msg*****************************/ +#define PHYTIUM_I2S_LSD_ID 0x00 + +enum phyti2s_msg_cmd_id { + PHYTI2S_MSG_CMD_DEFAULT = 0, + PHYTI2S_MSG_CMD_SET, + PHYTI2S_MSG_CMD_GET, + PHYTI2S_MSG_CMD_DATA, + PHYTI2S_MSG_CMD_REPORT, + PHYTI2S_SYS_PROTOCOL = 0x10, +}; + +enum phyti2s_set_subid { + PHYTI2S_MSG_CMD_SET_MODE = 0, + PHYTI2S_MSG_CMD_SET_TRIGGER, + PHYTI2S_MSG_CMD_SET_GPIO, +}; + +enum phyti2s_complete { + PHYTI2S_COMPLETE_NONE = 0, + PHYTI2S_COMPLETE_DONE, + PHYTI2S_COMPLETE_GOING, + PHYTI2S_COMPLETE_ERROR = 0x10, + PHYTI2S_COMPLETE_ID_NOT_SUPPORTED, + PHYTI2S_COMPLETE_SUBID_NOT_SUPPORTED, + PHYTI2S_COMPLETE_INVALID_PARAMETERS, +}; + +struct set_mode_data { + int direction; + uint32_t chan_nr; + uint32_t data_width; + uint32_t sample_rate; + uint32_t enable; + uint32_t clk_base; +}; + +struct trigger_i2s_data { + int direction; + uint32_t start; +}; + +struct gpio_i2s_data { + int enable; + int insert; +}; + +struct phyti2s_cmd { + uint16_t id; + uint8_t cmd_id; + uint8_t cmd_subid; + uint16_t len; + uint16_t complete; + union { + uint8_t para[56]; + struct set_mode_data set_mode_data; + struct trigger_i2s_data trigger_i2s_data; + struct gpio_i2s_data gpio_i2s_data; + } cmd_para; +}; +/* *******************msg end ****************************/ + + +/*********************register ***************************/ +/* regfile */ +#define PHYTIUM_REGFILE_TX_HEAD 0x00 + #define TX_HEAD_INTR (1 << 16) +#define PHYTIUM_REGFILE_TX_TAIL 0X04 + +#define PHYTIUM_REGFILE_AP2RV_INT_MASK 0x20 +#define PHYTIUM_REGFILE_AP2RV_INT_STATE 0x24 + #define SEND_INTR (1 << 4) + #define SEND_GPIO_INTR (1 << 8) +#define PHYTIUM_REGFILE_GPIO_PORTA_EOI 0x30 +#define PHYTIUM_REGFILE_DEBUG 0x58 + #define DEBUG_ENABLE (1 << 0) + #define HEART_ENABLE (1 << 1) + #define HEARTBEAT (1 << 2) + #define LOG_MASK (1 << 3) + #define LOG_SIZE_LOW_SHIFT 4 + #define LOG_SIZE_MASK GENMASK(7, 4) + #define LOG_SIZE_MAX 8192 + #define ADDR_LOW_SHIFT 8 + #define ADDR_MASK GENMASK(27, 8) + #define ADDR_SHIFT 12 + #define LOG_LINE_MAX_LEN 400 +#define PHYTIUM_REGFILE_HPDET 0x34 +#define PHYTIUM_REGFILE_IRER 0x38 +#define RX_EN 1 +#define RX_DIS 0 +#define PHYTIUM_REGFILE_ITER 0x3c +#define TX_EN 1 +#define TX_DIS 0 + +/*gpio share memory*/ +#define PHYTIUM_GPIO_OFFSET 0x40 + +/* DMA register */ +#define PHYTIUM_DMA_CTL 0x0000 + #define DMA_ENABLE 0x1 +#define PHYTIUM_DMA_CHAL_CONFG0 0x0004 + #define CHANNEL_0_1_ENABLE 0x8180 +#define PHYTIUM_DMA_STS 0x0008 + #define DMA_TX_DONE 0x1 + #define DMA_RX_DONE 0x100 +#define PHYTIUM_DMA_MASK_INT 0x000c + #define CHANNEL_0_1_INT_MASK 0x80000003 +#define PHYTIUM_DMA_BDLPU(x) (0x40 * x + 0x0040) +#define PHYTIUM_DMA_BDLPL(x) (0x40 * x + 0x0044) +#define PHYTIUM_DMA_CHALX_DEV_ADDR(x) (0x40 * x + 0x0048) + #define E2000_LSD_I2S_BASE 0x28009000 + #define PLAYBACK_ADDRESS_OFFSET 0x1c8 + #define CAPTRUE_ADDRESS_OFFSET 0x1c0 +#define PHYTIUM_DMA_CHALX_LVI(x) (0x40 * x + 0x004c) +#define PHYTIUM_DMA_LPIB(x) (0x40 * x + 0x0050) +#define PHYTIUM_DMA_CHALX_CBL(x) (0x40 * x + 0x0054) +#define PHYTIUM_DMA_CHALX_CTL(x) (0x40 * x + 0x0058) + #define PLAYBACK_START 0x1 + #define CAPTURE_START 0x5 + #define CTL_STOP 0x0 +#define PHYTIUM_DMA_CHALX_DSIZE(x) (0x40 * x + 0x0064) + #define BYTE_4 0x0 + #define BYTE_2 0x2 + #define D_SIZE(byte_mode, dir) (byte_mode << (dir*2)) +#define PHYTIUM_DMA_CHALX_DLENTH(x) (0x40 * x + 0x0068) + #define D_LENTH 0x0 +/*********************register end ***************************/ + + +static inline unsigned int +phyt_readl_reg(void *base, uint32_t offset) +{ + unsigned int data; + + data = readl(base + offset); + return data; +} + +static inline void +phyt_writel_reg(void *base, uint32_t offset, uint32_t data) +{ + writel(data, base + offset); +} +#endif diff --git a/sound/soc/phytium/phytium-machine-v2.c b/sound/soc/phytium/phytium-machine-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..cf29c368b2219a9514e05c84010dd8df8553e7d3 --- /dev/null +++ b/sound/soc/phytium/phytium-machine-v2.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, Phytium Techonology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include + +#define PHYT_MACHINE_V2_VERSION "1.0.0" + +/* PMDK widgets */ +static const struct snd_soc_dapm_widget phyt_machine_dapm_widgets[] = { + SND_SOC_DAPM_HP("Front", NULL), + SND_SOC_DAPM_HP("Rear", NULL), + + SND_SOC_DAPM_MIC("FrontIn", NULL), + SND_SOC_DAPM_MIC("RearIn", NULL), +}; + +/* PMDK control */ +static const struct snd_kcontrol_new phyt_machine_controls[] = { + SOC_DAPM_PIN_SWITCH("Front"), + SOC_DAPM_PIN_SWITCH("Rear"), + SOC_DAPM_PIN_SWITCH("FrontIn"), + SOC_DAPM_PIN_SWITCH("RearIn"), +}; + +/* PMDK connections */ +static const struct snd_soc_dapm_route phyt_machine_audio_map[] = { + {"INPUT1", NULL, "FrontIn"}, + {"INPUT2", NULL, "RearIn"}, + {"Front", NULL, "OUTPUT1"}, + {"Rear", NULL, "OUTPUT2"}, +}; + +#define PMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \ + SND_SOC_DAIFMT_CBS_CFS) + +SND_SOC_DAILINK_DEFS(phyt_machine, + DAILINK_COMP_ARRAY(COMP_CPU("phytium-i2s-v2")), + DAILINK_COMP_ARRAY(COMP_CODEC("PHYT1002:00", "phytium-hifi-v2")), + DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); + +static struct snd_soc_dai_link phyt_machine_dai[] = { + { + .name = "PHYTIUM HIFI V2", + .stream_name = "PHYTIUM HIFT V2", + .dai_fmt = PMDK_DAI_FMT, + SND_SOC_DAILINK_REG(phyt_machine), + }, +}; + +static struct snd_soc_card phyt_machine_card = { + .name = "PHYT-MIE-V2", + .owner = THIS_MODULE, + .dai_link = phyt_machine_dai, + .num_links = ARRAY_SIZE(phyt_machine_dai), + + .dapm_widgets = phyt_machine_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(phyt_machine_dapm_widgets), + .controls = phyt_machine_controls, + .num_controls = ARRAY_SIZE(phyt_machine_controls), + .dapm_routes = phyt_machine_audio_map, + .num_dapm_routes = ARRAY_SIZE(phyt_machine_audio_map), +}; + +static int phyt_machine_probe(struct platform_device *pdev) +{ + struct snd_soc_card *card = &phyt_machine_card; + struct device *dev = &pdev->dev; + + card->dev = dev; + + return devm_snd_soc_register_card(&pdev->dev, card); +} + +static const struct acpi_device_id phyt_machine_match[] = { + { "PHYT1001", 0}, + { } +}; +MODULE_DEVICE_TABLE(acpi, phyt_machine_match); + +static struct platform_driver phyt_machine_driver = { + .probe = phyt_machine_probe, + .driver = { + .name = "phyt_machine", + .acpi_match_table = phyt_machine_match, +#ifdef CONFIG_PM + .pm = &snd_soc_pm_ops, +#endif + }, +}; + +module_platform_driver(phyt_machine_driver); + +MODULE_DESCRIPTION("ALSA SoC Phytium Machine V2"); +MODULE_AUTHOR("Yang Xun "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PHYT_MACHINE_V2_VERSION); diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c index ace159cd51159a6ef8da22a8b8933afdfa0947b8..07254f15c8475768787e336e8ac372571814bc8f 100644 --- a/sound/soc/phytium/phytium_i2s.c +++ b/sound/soc/phytium/phytium_i2s.c @@ -494,9 +494,13 @@ static int phytium_i2s_resume(struct snd_soc_component *component) static int phytium_i2s_component_probe(struct snd_soc_component *component) { + struct i2s_phytium *dev = snd_soc_component_get_drvdata(component); struct snd_soc_card *card = component->card; int ret; + if (!dev->detect) + return 0; + ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET, &hs_jack, hs_jack_pins, ARRAY_SIZE(hs_jack_pins)); @@ -1471,7 +1475,7 @@ static int phytium_i2s_probe(struct platform_device *pdev) if (i2s->irq_id < 0) return i2s->irq_id; - i2s->gpio_irq_id = platform_get_irq(pdev, 1); + i2s->gpio_irq_id = platform_get_irq_optional(pdev, 1); i2s->detect = true; if (i2s->gpio_irq_id < 0) diff --git a/sound/soc/phytium/pmdk_dp.c b/sound/soc/phytium/pmdk_dp.c index 85ff8e5667a8d5b9667687ec4b802dacbec28cd5..1f785413bf76b20d010256d3e94a2caf284081c8 100644 --- a/sound/soc/phytium/pmdk_dp.c +++ b/sound/soc/phytium/pmdk_dp.c @@ -8,8 +8,6 @@ #include #include -#define DAI_CNT(pmdk_dai_) sizeof(pmdk_dai_)/sizeof(struct snd_soc_dai_link) - struct pmdk_dp_private { struct snd_soc_jack jack0; struct snd_soc_jack jack1; @@ -48,7 +46,6 @@ static int pmdk_dp0_init(struct snd_soc_pcm_runtime *runtime) dev_err(card->dev, "Jack creation failed %d\n", ret); return ret; } - snd_soc_component_set_jack(component, &priv->jack0, NULL); return ret; } @@ -104,30 +101,31 @@ SND_SOC_DAILINK_DEFS(pmdk_dp2_dai, DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.1346918658", "i2s-hifi")), DAILINK_COMP_ARRAY(COMP_PLATFORM("snd-soc-dummy"))); -static struct snd_soc_dai_link pmdk_dai_local[] = { -{ +static struct snd_soc_dai_link pmdk_dai0 = { .name = "Phytium dp0-audio", .stream_name = "Playback", .dai_fmt = SMDK_DAI_FMT, .init = pmdk_dp0_init, - .playback_only = true, SND_SOC_DAILINK_REG(pmdk_dp0_dai), -},{ + .playback_only = 1, +}; + +static struct snd_soc_dai_link pmdk_dai1 = { .name = "Phytium dp1-audio", .stream_name = "Playback", .dai_fmt = SMDK_DAI_FMT, .init = pmdk_dp1_init, - .playback_only = true, SND_SOC_DAILINK_REG(pmdk_dp1_dai), -}, -{ + .playback_only = 1, +}; + +static struct snd_soc_dai_link pmdk_dai2 = { .name = "Phytium dp2-audio", .stream_name = "Playback", .dai_fmt = SMDK_DAI_FMT, .init = pmdk_dp2_init, - .playback_only = true, SND_SOC_DAILINK_REG(pmdk_dp2_dai), -}, + .playback_only = 1, }; static struct snd_soc_card pmdk = { @@ -148,27 +146,43 @@ static int pmdk_sound_probe(struct platform_device *pdev) struct pmdk_dp_private *priv; struct snd_soc_dai_link *pmdk_dai; int num_dp = 2; - char dp_mask = 0x7; - int i,j = 0; - card->dev = &pdev->dev; + int dp_mask = 3; + card->dev = &pdev->dev; device_property_read_u32(&pdev->dev, "num-dp", &num_dp); - device_property_read_u8(&pdev->dev, "dp-mask", &dp_mask); + device_property_read_u32(&pdev->dev, "dp-mask", &dp_mask); pmdk_dai = devm_kzalloc(&pdev->dev, num_dp * sizeof(*pmdk_dai), GFP_KERNEL); if (!pmdk_dai) return -ENOMEM; - if (!num_dp || num_dp > DAI_CNT(pmdk_dai_local)) - return -EINVAL; - - for(i = 0; i < num_dp; i++) { - for (; j < DAI_CNT(pmdk_dai_local); j++) { - if (BIT(j) & dp_mask) { - pmdk_dai[i] = pmdk_dai_local[j]; - j++; - break; - } + switch (num_dp) { + case 1: + if (dp_mask == 2) + pmdk_dai[0] = pmdk_dai1; + else if (dp_mask == 4) + pmdk_dai[0] = pmdk_dai2; + else + pmdk_dai[0] = pmdk_dai0; + break; + case 2: + if (dp_mask == 5) { + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai2; + } else if (dp_mask == 6) { + pmdk_dai[0] = pmdk_dai1; + pmdk_dai[1] = pmdk_dai2; + } else { + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; } + break; + case 3: + pmdk_dai[0] = pmdk_dai0; + pmdk_dai[1] = pmdk_dai1; + pmdk_dai[2] = pmdk_dai2; + break; + default: + return -EINVAL; } card->dai_link = pmdk_dai; @@ -209,6 +223,6 @@ static struct platform_driver pmdk_sound_driver = { module_platform_driver(pmdk_sound_driver); -MODULE_AUTHOR("Zhang Yiqun"); +MODULE_AUTHOR("Zhang Yiqun "); MODULE_DESCRIPTION("ALSA SoC PMDK DP"); MODULE_LICENSE("GPL");