diff --git a/Documentation/devicetree/bindings/dma/phytium,gdma.yaml b/Documentation/devicetree/bindings/dma/phytium,gdma.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b5c70dba5609178ac5b121fcabfae0e47b3a07cd --- /dev/null +++ b/Documentation/devicetree/bindings/dma/phytium,gdma.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +title: Phytium GDMA Controller bindings + +description: + The Phytium GDMA is a general-purpose direct memory access + controller capable of supporting 16 independent DMA channels. + DMA clients connected to the GDMA controller must use the format + described in the dma-common.yaml file, using a one cell specifier for + each channel, just provide the channel id to use. + +maintainers: + - Huang Jie + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + "#dma-cells": + const: 1 + + compatible: + const: phytium,gdma + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + description: set interrupts according to the programming manual + + dma-channels: + minimum: 1 + maximum: 16 + + max-outstanding: + minimum: 1 + maximum: 64 + description: set interrupmax-outstandingts according to the programming manual + +required: + - compatible + - reg + - interrupts + - dma-channels + - max-outstanding + +unevaluatedProperties: false + +examples: + - | + gdma: gdma@32b34000 { + compatible = "phytium,gdma"; + dma-channels = <16>; + max-outstanding = <16>; + reg = <0x0 0x32b34000 0x0 0x1000>; + interrupts = ; + #dma-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/leds/phytnet_led.yaml b/Documentation/devicetree/bindings/leds/phytnet_led.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae0acf9ec5ec56117ff50e1b65fd084ebc7b5e77 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/phytnet_led.yaml @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/phytnet_led.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium mac led controller + +maintainers: + - LongShixiang + +description: + This modules is used to control mac led. + +properties: + compatible: + const: phytium,net_led + net_dev: + maxItems: 1 + description: Phandler of specified Net device + led-gpios: + minItems: 1 + maxItems: 2 + description: |- + the gpios used for led control based on net_dev condition. + One represents LINK condition, another represents ACT condition. + +required: + - compatible + - net_dev + - led-gpios + +examples: + - | + gpiochip0: gpop_controller{ + ... + } + eth0: ethernet{ + ... + } + phytium_net_led0 { + compatible = "phytium,net_led"; + net_dev = <ð0>; + led-gpios = <&gpiochip0 9 GPIO_ACTIVE_HIGH>, /* link */ + <&gpiochip0 11 GPIO_ACTIVE_HIGH>; /* act */ + }; diff --git a/Documentation/devicetree/bindings/mmc/phytium-mci.txt b/Documentation/devicetree/bindings/mmc/phytium-mci.txt index 129efb1fb2f6940e5f462e5ed5189ce357a5595e..4a67289b063365e563b27aed4f18fbe144cb1eaa 100644 --- a/Documentation/devicetree/bindings/mmc/phytium-mci.txt +++ b/Documentation/devicetree/bindings/mmc/phytium-mci.txt @@ -9,6 +9,12 @@ Required properties: - clocks : phandles to input clocks. - clock-names : should be "phytium_mci_clk". - interrupts : mmc controller interrupt. +- use-hold : enable hold registers. +- clk-set : enable clock phase setiing. +- clk-smpl-drv-25m : 25M clock phase setiing. +- clk-smpl-drv-50m : 50M clock phase setiing. +- clk-smpl-drv-66m : 66M clock phase setiing. +- clk-smpl-drv-100m : 100M clock phase setiing. Examples: - Within .dtsi: @@ -31,6 +37,9 @@ Examples: sd-uhs-sdr25; sd-uhs-sdr50; no-mmc; + use-hold; + clk-set; + clk-smpl-drv-50m = <0x0201>; status = "ok"; }; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 27168b116916324695cfe57a9cec224a09ff63c8..9f6ecd98443a601c989abd1ca3693d3967444998 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -15,7 +15,8 @@ Required properties: Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. - Use "cdns,phytium-gem" for Phytium SoCs. + Use "cdns,phytium-gem-1.0" for Phytium SoCs. + Use "cdns,phytium-gem-2.0" for Phytium SoCs. Or the generic form: "cdns,emac". - reg: Address and length of the register set for the device - interrupts: Should contain macb interrupt diff --git a/Documentation/devicetree/bindings/net/phytmac.txt b/Documentation/devicetree/bindings/net/phytmac.txt new file mode 100644 index 0000000000000000000000000000000000000000..f7b2e9378134239844850d1268dc675e58586c84 --- /dev/null +++ b/Documentation/devicetree/bindings/net/phytmac.txt @@ -0,0 +1,35 @@ +* Phytium xgmac Ethernet controller + +Required properties: +- compatible: Should be "phytium,gmac-[version]" + Use "phytium,gmac-1.0" for gmac version 1.0 on Phytium SoCs + Use "phytium,gmac-2.0" for gmac version 2.0 on Phytium SoCs + +- reg: Address and length of the register set for the device +- interrupts: Should contain phytmac interrupt +- queue-number: The number of queues for the device +- phy-mode: See ethernet.txt file in the same directory +- fixed-link:See ethernet.txt file in the same directory +- dma-coherent: Boolean property, must only be present if memory + accesses performed by the device are cache coherent. + +The MAC address will be determined using the optional properties +defined in ethernet.txt. + +Examples: + + eth0@36ce0000 { + compatible = "phytium,gmac-1.0"; + reg = <0x00 0x36ce0000 0x00 0x2000>; + interrupts = <0x00 0x20 0x04 0x00 0x21 0x04 0x00 0x22 0x04 0x00 0x23 0x04>; + queue-number = <0x04>; + magic-packet; + dma-coherent; + phy-mode = "usxgmii"; + status = "okay"; + + fixed-link { + speed = <0x2710>; + full-duplex; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 653ad05ff65fc41a84131e4114b37dbfbbda58a8..cbeff9b4d8c573ebbff686937594e4111530f783 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1845,6 +1845,80 @@ W: http://hackndev.com S: Maintained F: arch/arm/mach-pxa/palmz72.* +ARM/PHYTIUM SOC SUPPORT +M: Chen Baozi +S: Maintained +W: https://www.phytium.com.cn +F: arch/arm64/boot/dts/phytium/* +F: arch/arm64/include/asm/ras.h +F: arch/arm64/kernel/ras.c +F: arch/arm/include/asm/kvm_ras.h +F: arch/arm64/include/asm/kvm_ras.h +F: Documentation/devicetree/bindings/edac/phytium-pe220x-edac.txt +F: Documentation/devicetree/bindings/dma/phytium-ddma.yaml +F: Documentation/devicetree/bindings/dma/phytium,gdma.yaml +F: Documentation/devicetree/bindings/gpio/gpio-phytium.txt +F: Documentation/devicetree/bindings/gpio/gpio-phytium-sgpio.txt +F: Documentation/devicetree/bindings/gpu/phytium-display.txt +F: Documentation/devicetree/bindings/hwlock/phytium-hwspinlock.txt +F: Documentation/devicetree/bindings/hwmon/tacho-phytium.txt +F: Documentation/devicetree/bindings/i2c/i2c-phytium.txt +F: Documentation/devicetree/bindings/iio/adc/phytium-adc.txt +F: Documentation/devicetree/bindings/input/phytium-keypad.txt +F: Documentation/devicetree/bindings/interrupt-controller/phytium,ixic.txt +F: Documentation/devicetree/bindings/ipmi/phytium,bt-bmc.txt +F: Documentation/devicetree/bindings/ipmi/phytium,kcs-bmc.txt +F: Documentation/devicetree/bindings/leds/phytnet_led.yaml +F: Documentation/devicetree/bindings/mailbox/phytium-mailbox.txt +F: Documentation/devicetree/bindings/media/phytium-jpeg.txt +F: Documentation/devicetree/bindings/mmc/phytium-mci.txt +F: Documentation/devicetree/bindings/net/can/phytium-can.txt +F: Documentation/devicetree/bindings/net/phytmac.txt +F: Documentation/devicetree/bindings/pci/phytium,phytium-pcie-ep.txt +F: Documentation/devicetree/bindings/pwm/pwm-phytium.txt +F: Documentation/devicetree/bindings/rng/phytium-rng.txt +F: Documentation/devicetree/bindings/sound/phytium,hda.yaml +F: Documentation/devicetree/bindings/sound/phytium-i2s.txt +F: Documentation/devicetree/bindings/spi/spi-phytium.txt +F: Documentation/devicetree/bindings/w1/phytium-w1.txt +F: drivers/char/hw_random/phytium-rng.c +F: drivers/char/ipmi/bt_bmc_phytium.c +F: drivers/char/ipmi/kcs_bmc_phytium.c +F: drivers/char/phytnetled/* +F: drivers/dma/phytium/* +F: drivers/edac/phytium_edac.c +F: drivers/gpio/gpio-phytium* +F: drivers/gpio/gpio-phytium-sgpio.c +F: drivers/gpu/drm/phytium/* +F: drivers/hwmon/tacho-phytium.c +F: drivers/hwspinlock/phytium_hwspinlock.c +F: drivers/i2c/busses/i2c-phytium-* +F: drivers/iio/adc/phytium-adc.c +F: drivers/input/keyboard/phytium-keypad.c +F: drivers/input/serio/phytium-ps2.c +F: drivers/irqchip/irq-phytium-ixic.c +F: drivers/mailbox/phytium_mailbox.c +F: drivers/media/platform/phytium-jpeg/phytium_jpeg* +F: drivers/mfd/phytium_px210_i2s_lsd.c +F: drivers/mfd/phytium_px210_i2s_mmd.c +F: drivers/mmc/host/phytium-mci* +F: drivers/mmc/host/phytium-sdci.* +F: drivers/mtd/nand/raw/phytium_nand* +F: drivers/mtd/spi-nor/phytium-quadspi.c +F: drivers/net/can/phytium/* +F: drivers/net/ethernet/phytium/* +F: drivers/pci/controller/pcie-phytium* +F: drivers/perf/phytium/* +F: drivers/perf/phytium/phytium_pcie_pmu.c +F: drivers/pwm/pwm-phytium.c +F: drivers/spi/spi-phytium* +F: drivers/spi/spi-phytium-dma.c +F: drivers/tty/serial/phytium-uart.c +F: drivers/usb/phytium/* +F: drivers/w1/masters/phytium_w1.c +F: sound/pci/hda/hda_phytium.* +F: sound/soc/phytium/* + ARM/PLEB SUPPORT M: Peter Chubb W: http://www.disy.cse.unsw.edu.au/Hardware/PLEB diff --git a/README.md b/README.md index fe94b83cbda80ce4e37edc54257c4f2e47679b56..582ab5dbc644414386052e15fb2494d2a5234f44 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,21 @@ # linux-kernel-xenomai ## 1 项目简介 -Xenomai在Phytium CPU配套的开发板上配套的Linux kernel,推荐使用Linux kernel 5.10.153-dovetail3和Xenomai v3.2.2。
+Xenomai在Phytium CPU配套的开发板上配套的Linux kernel,推荐使用Linux kernel 5.10.209-dovetail1和Xenomai v3.2.4。
本开源仓库的内核代码支持E2000Q,飞腾派等飞腾CPU。
## 2 安装和使用教程 -### 2.1 Xenomai配套Linux kernel 5.10.153-dovetail3的用户手册和源代码获取路径 -Xenomai用户手册名称为xenomai_user_manual-v3.x.x.pdf,获取方法如下:
+### 2.1 Xenomai配套Linux kernel 5.10.209-dovetail1的用户手册和源代码获取路径 +Xenomai用户手册名称为 Xenomai_内核_5.10_用户使用手册_v3.x.x.pdf,获取方法如下:
https://gitee.com/phytium_embedded/phytium-embedded-docs/tree/master/linux/xenomai
Linux kernel源代码链接如下:
-https://gitee.com/phytium_embedded/linux-kernel-xenomai
+https://gitee.com/phytium_embedded/linux-kernel-xenomai/tree/5.10.209-dovetail1
-Xenomai版本是3.2.2,源代码链接如下:
-https://source.denx.de/Xenomai/xenomai/-/tree/v3.2.2
+Xenomai版本是3.2.4,源代码链接如下:
+https://source.denx.de/Xenomai/xenomai/-/tree/v3.2.4
### 2.2 Xenomai配套Linux kernel 4.19.209-cip59的用户手册和源代码获取路径 -Xenomai用户手册名称为xenomai_user_manual-v2.x.x.pdf,获取方法如下:
+Xenomai用户手册名称为 Xenomai_内核_4.19_用户使用手册_v2.x.x.pdf,获取方法如下:
https://gitee.com/phytium_embedded/phytium-embedded-docs/tree/master/linux/xenomai
Linux kernel源代码链接如下:
diff --git a/arch/arm64/boot/dts/phytium/pe220x.dtsi b/arch/arm64/boot/dts/phytium/pe220x.dtsi index 6cc44897f07df81099ed6681917c67648b37ea8b..17e597466cbb5d6d71061a3f695d6a95d60a83b1 100644 --- a/arch/arm64/boot/dts/phytium/pe220x.dtsi +++ b/arch/arm64/boot/dts/phytium/pe220x.dtsi @@ -958,6 +958,15 @@ cpu_scp_hpri: scp-shmem@1 { }; }; + gdma: gdma@32b34000 { + compatible = "phytium,gdma"; + dma-channels = <16>; + max-outstanding = <16>; + reg = <0x0 0x32b34000 0x0 0x1000>; + interrupts = ; + #dma-cells = <1>; + }; + hwspinlock: spinlock@32b36000 { compatible = "phytium,hwspinlock"; reg = <0x0 0x32b36000 0x0 0x1000>; diff --git a/arch/arm64/boot/dts/phytium/phytiumpi_firefly.dts b/arch/arm64/boot/dts/phytium/phytiumpi_firefly.dts index 6a1275dc4a0919535f1c7f01223d4affa5c64d4a..3f4b3202c019595d816d8d7ab0eba50bea7df76d 100644 --- a/arch/arm64/boot/dts/phytium/phytiumpi_firefly.dts +++ b/arch/arm64/boot/dts/phytium/phytiumpi_firefly.dts @@ -11,6 +11,7 @@ /memreserve/ 0x80000000 0x10000; #include "pe2204.dtsi" +#include "dt-bindings/gpio/gpio.h" /{ model = "Phytium Pi Board"; @@ -44,8 +45,12 @@ sound_card: sound { simple-audio-card,format = "i2s"; simple-audio-card,name = "phytium,pe220x-i2s-audio"; simple-audio-card,pin-switches = "mic-in"; - simple-audio-card,widgets = "Microphone","mic-in"; + simple-audio-card,widgets = + "Microphone","mic-in", + "Headphone","Headphones"; simple-audio-card,routing = "MIC2","mic-in"; + simple-audio-card,hp-det-gpio = <&gpio2 11 GPIO_ACTIVE_LOW>; + simple-audio-card,cpu { sound-dai = <&i2s0>; }; @@ -76,8 +81,6 @@ mio14: i2c@28030000 { status = "okay"; codec0: es8336@10 { - det-gpios = <&gpio2 11 0>; - sel-gpios = <&gpio2 7 0>; #sound-dai-cells = <0>; compatible = "everest,es8336"; reg = <0x10>; diff --git a/arch/arm64/configs/phytium_defconfig b/arch/arm64/configs/phytium_defconfig index 8d481046d5be2ef71c3aea9115fe2009965f3f08..45aa71002d793574fafab9f1847a8d9949242a00 100644 --- a/arch/arm64/configs/phytium_defconfig +++ b/arch/arm64/configs/phytium_defconfig @@ -1,4 +1,4 @@ -CONFIG_LOCALVERSION="-phytium-embeded-v2.1" +CONFIG_LOCALVERSION="-phytium-embedded-v2.2" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -104,6 +104,7 @@ CONFIG_KSM=y CONFIG_MEMORY_FAILURE=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA=y +CONFIG_ZSMALLOC=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -149,9 +150,9 @@ CONFIG_BT_LEDS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIUART=y CONFIG_BT_HCIUART_3WIRE=y -CONFIG_CFG80211=y +CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y -CONFIG_MAC80211=y +CONFIG_MAC80211=m CONFIG_MAC80211_LEDS=y CONFIG_RFKILL=y CONFIG_NET_9P=m @@ -177,6 +178,7 @@ CONFIG_MTD_NAND_PHYTIUM_PLAT=y CONFIG_MTD_SPI_NOR=y CONFIG_SPI_PHYTIUM_QUADSPI=y CONFIG_OF_CONFIGFS=y +CONFIG_ZRAM=m CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -214,6 +216,8 @@ CONFIG_AMD_XGBE=y CONFIG_ATL1C=m CONFIG_MACB=y CONFIG_THUNDER_NIC_PF=y +CONFIG_PHYTMAC=y +CONFIG_PHYTMAC_PLATFORM=y # CONFIG_NET_VENDOR_HISILICON is not set # CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_E1000=m @@ -448,6 +452,7 @@ CONFIG_DMADEVICES=y CONFIG_MV_XOR_V2=y CONFIG_PL330_DMA=y CONFIG_PHYTIUM_DDMA=y +CONFIG_PHYTIUM_GDMA=y CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m @@ -549,7 +554,10 @@ CONFIG_NLS_ISO8859_1=y CONFIG_SECURITY=y CONFIG_CRYPTO_USER=y CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_ARC4=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_USER_API_HASH=y diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e11b5da6f828f3bd8fffa23094b1a31e797d7c9c..424712491eae29c5cc5584fdaefbdaa856311e39 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -941,6 +941,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size) ncomp = (struct acpi_iort_named_component *)node->node_data; + if (!ncomp->memory_address_limit) { + pr_warn(FW_BUG "Named component missing memory address limit\n"); + return -EINVAL; + } + *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1ULL<memory_address_limit; @@ -960,6 +965,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size) rc = (struct acpi_iort_root_complex *)node->node_data; + if (!rc->memory_address_limit) { + pr_warn(FW_BUG "Root complex missing memory address limit\n"); + return -EINVAL; + } + *size = rc->memory_address_limit >= 64 ? U64_MAX : 1ULL<memory_address_limit; @@ -1016,8 +1026,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) * retrieved from firmware. */ dev->bus_dma_mask = mask; - dev->coherent_dma_mask = mask; - *dev->dma_mask = mask; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); + *dev->dma_mask = min(*dev->dma_mask, mask); } *dma_addr = dmaaddr; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 1df9cb8e659e2d9693a1300af93a1cb648900a07..e6c663ce4f30e2d4e71dd4988a69d96c3e01c444 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -552,6 +552,7 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. +source "drivers/char/phytnetled/Kconfig" endmenu config RANDOM_TRUST_CPU diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b8d42b4e979bbc225bec63b6f5ea1dcf10981c50..5ab7e1c6ee1cb0e9d3d447343fbad0013030f9f1 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -58,3 +58,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o + +obj-$(CONFIG_PHYTNET_LED) += phytnetled/ diff --git a/drivers/char/ipmi/bt_bmc_phytium.c b/drivers/char/ipmi/bt_bmc_phytium.c index 1d4a50c14fa3001315a8e3291b5c221fbc157a62..69567e7fe7886aae7e0f7b2d5ca462ac847b03ec 100644 --- a/drivers/char/ipmi/bt_bmc_phytium.c +++ b/drivers/char/ipmi/bt_bmc_phytium.c @@ -72,6 +72,8 @@ #define BT_BMC_BUFFER_SIZE 256 +#define BT_BMC_DRIVER_VERSION "1.1.1" + struct bt_bmc { struct device dev; struct miscdevice miscdev; @@ -531,3 +533,4 @@ module_platform_driver(bt_bmc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cheng Quan map, reg, &val); WARN(rc != 0, "regmap_read() failed: %d\n", rc); + if (reg == LPC_IDR1 || reg == LPC_IDR2 || + reg == LPC_IDR3 || reg == LPC_IDR4) + rc = regmap_read(priv->map, reg, &val); + return rc == 0 ? (u8) val : 0; } @@ -100,6 +106,10 @@ static void phytium_kcs_outb(struct kcs_bmc *kcs_bmc, u32 reg, u8 data) struct phytium_kcs_bmc *priv = kcs_bmc_priv(kcs_bmc); int rc; + if (reg == LPC_ODR1 || reg == LPC_ODR2 || + reg == LPC_ODR3 || reg == LPC_ODR4) + regmap_write(priv->map, reg, data); + rc = regmap_write(priv->map, reg, data); WARN(rc != 0, "regmap_write() failed: %d\n", rc); } @@ -322,3 +332,4 @@ module_platform_driver(phytium_kcs_bmc_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); MODULE_DESCRIPTION("Phytium device interface to the KCS BMC device"); +MODULE_VERSION(KCS_BMC_DRIVER_VERSION); diff --git a/drivers/char/phytnetled/Kconfig b/drivers/char/phytnetled/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..c868ef6f53b4fc640723881c7b6b5ace8b74e634 --- /dev/null +++ b/drivers/char/phytnetled/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# PCMCIA character device configuration +# +config PHYTNET_LED + tristate "Phytium mac led control module" + depends on PHYTMAC + depends on GPIO_PHYTIUM_PLAT + default m + help + If you have a network (Ethernet) controller of this type and + want to use it control port led say Y or M here. \ No newline at end of file diff --git a/drivers/char/phytnetled/Makefile b/drivers/char/phytnetled/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ea129f5e485bae8dbdba531dd9608256cb02cc20 --- /dev/null +++ b/drivers/char/phytnetled/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHYTNET_LED) += phytnet_led.o diff --git a/drivers/char/phytnetled/phytnet_led.c b/drivers/char/phytnetled/phytnet_led.c new file mode 100644 index 0000000000000000000000000000000000000000..0b5fbb750250fd32c3604a2fb29f1d0a91a768a0 --- /dev/null +++ b/drivers/char/phytnetled/phytnet_led.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022-2023 Phytium Technology Co.,Ltd. + * + */ +#include +#include +#include +#include +#include +#include +#include "phytnet_led.h" + +#define DRIVER_NAME "phytnet_led" +#define DRIVER_VERSION "1.0" +#define DRIVER_AUTHOR "LongShixiang " +#define DRIVER_DESC "net device led control module" +#define NET_DEV_PROPNAME "net_dev" +#define LED_OF_NAME "led" +#define CHECK_INTERVAL 125 /* Unit: ms */ +#define NDEV_CHECK_DELAY 30000 /* Unit: 30s */ +#define LED_ON 1 +#define LED_OFF 0 +#define LINK_OFFSET 0 +#define ACT_OFFSET 1 + +#if defined(CONFIG_OF) +static const struct of_device_id phytnet_led_of_ids[] = { + { .compatible = "phytium,net_led"}, + {} +}; + +MODULE_DEVICE_TABLE(of, phytnet_led_of_ids); +#endif /* CONFIG_OF */ + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytnet_acpi_ids[] = { + { .id = "PHYT800C"}, + {} +}; +MODULE_DEVICE_TABLE(acpi, phytnet_acpi_ids); +#else +#define phytnet_acpi_ids NULL +#endif /* CONFIG_ACPI */ + +static void +led_on(struct gpio_desc *gd) +{ + gpiod_set_value(gd, LED_ON); +} + +static void +led_off(struct gpio_desc *gd) +{ + gpiod_set_value(gd, LED_OFF); +} + +static void +led_blink(struct led_data *phytnet_led) +{ + phytnet_led->act_val = !phytnet_led->act_val; + gpiod_set_value(phytnet_led->act, phytnet_led->act_val); +} + +static int +port_is_linkup(struct led_data *phytnet_led) +{ + if (netif_carrier_ok(phytnet_led->ndev)) + return true; + else + return false; +} + +static bool +port_is_act(struct led_data *phytnet_led) +{ + bool ret = false; + + if (phytnet_led->ndev_rx != phytnet_led->ndev->stats.rx_packets) { + phytnet_led->ndev_rx = phytnet_led->ndev->stats.rx_packets; + ret = true; + } + + if (phytnet_led->ndev_tx != phytnet_led->ndev->stats.tx_packets) { + phytnet_led->ndev_tx = phytnet_led->ndev->stats.tx_packets; + ret = true; + } + + return ret; +} + +static void +led_control(struct led_data *phytnet_led) +{ + while (!phytnet_led->led_stop) { + msleep(CHECK_INTERVAL); + + if (!netif_running(phytnet_led->ndev)) { + led_off(phytnet_led->link); + led_off(phytnet_led->act); + continue; + } + + if (port_is_linkup(phytnet_led)) + led_on(phytnet_led->link); + else + led_off(phytnet_led->link); + + if (port_is_act(phytnet_led)) + led_blink(phytnet_led); + else + led_off(phytnet_led->act); + } +} + +static int +of_ndev_init(struct led_data *phytnet_led) +{ + struct device_node *net_node; + + net_node = of_parse_phandle(phytnet_led->pdev->dev.of_node, NET_DEV_PROPNAME, 0); + if (!net_node) { + dev_err(&phytnet_led->pdev->dev, "Failed to get netdev ofnode from device tree\n"); + return -ENODEV; + } + + phytnet_led->ndev = of_find_net_device_by_node(net_node); + + if (!phytnet_led->ndev) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi ndev\n"); + return -ENODEV; + } + + dev_info(&phytnet_led->pdev->dev, "Successfully get ndev...\n"); + dev_hold(phytnet_led->ndev); + + return 0; +} + + +static int +acpi_ndev_init(struct led_data *phytnet_led) +{ + int err; + struct net_device *find_ndev; + const char *ndev_acpi_path; + acpi_handle net_handler; + struct acpi_device *adev; + acpi_status status; + struct device *find_dev; + + err = device_property_read_string(&phytnet_led->pdev->dev, + NET_DEV_PROPNAME, &ndev_acpi_path); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to read net_dev property!\n"); + return -ENODEV; + } + + status = acpi_get_handle(NULL, (acpi_string)ndev_acpi_path, &net_handler); + if (ACPI_FAILURE(status)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi handler on path: %s\n", + ndev_acpi_path); + return -ENODEV; + } + + err = acpi_bus_get_device(net_handler, &adev); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to get adev dev\n"); + return -ENODEV; + } + + for_each_netdev(&init_net, find_ndev) { + if (find_ndev->dev.parent != NULL) { + find_dev = find_ndev->dev.parent; + if (&adev->fwnode == find_dev->fwnode) + phytnet_led->ndev = find_ndev; + } + } + + if (!phytnet_led->ndev) { + dev_err(&phytnet_led->pdev->dev, "Failed to get acpi ndev\n"); + return -ENODEV; + } + + dev_info(&phytnet_led->pdev->dev, "Successfully get ndev...\n"); + dev_hold(phytnet_led->ndev); + + return 0; +} + +static int +gpio_init(struct led_data *phytnet_led) +{ + int err; + + phytnet_led->link = gpiod_get_index(&phytnet_led->pdev->dev, LED_OF_NAME, + LINK_OFFSET, GPIOD_OUT_HIGH); + if (IS_ERR(phytnet_led->link)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get link led gpio, err code: %ld\n", + PTR_ERR(phytnet_led->link)); + + return PTR_ERR(phytnet_led->link); + } + + err = gpiod_direction_output(phytnet_led->link, LED_OFF); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to set linkled dir, err code: %ld\n", + PTR_ERR(phytnet_led->link)); + return err; + } + + phytnet_led->act = gpiod_get_index(&phytnet_led->pdev->dev, LED_OF_NAME, + ACT_OFFSET, GPIOD_OUT_HIGH); + if (IS_ERR(phytnet_led->act)) { + dev_err(&phytnet_led->pdev->dev, "Failed to get actled gpio, err code:%d\n", err); + return PTR_ERR(phytnet_led->act); + } + + err = gpiod_direction_output(phytnet_led->act, LED_OFF); + if (err) { + dev_err(&phytnet_led->pdev->dev, "Failed to set act led gpio dir, err:%d\n", err); + return err; + } + + return 0; +} + +static void +led_init_and_control(struct work_struct *work) +{ + int err = -1; + struct led_data *phytnet_led = container_of(work, struct led_data, led_control_work.work); + + if (phytnet_led->pdev->dev.of_node) + err = of_ndev_init(phytnet_led); + else if (has_acpi_companion(&phytnet_led->pdev->dev)) + err = acpi_ndev_init(phytnet_led); + + if (err) { + dev_err(&phytnet_led->pdev->dev, "ndev init wrong\n"); + return; + } + + err = gpio_init(phytnet_led); + if (err) { + dev_err(&phytnet_led->pdev->dev, "gpio init wrong\n"); + return; + } + + led_control(phytnet_led); +} + +static int +net_led_probe(struct platform_device *pdev) +{ + struct led_data *phytnet_led; + + phytnet_led = devm_kzalloc(&pdev->dev, sizeof(struct led_data), GFP_KERNEL); + if (!phytnet_led) + return -ENOMEM; + + platform_set_drvdata(pdev, phytnet_led); + + phytnet_led->act = LED_OFF; + phytnet_led->pdev = pdev; + phytnet_led->led_stop = 0; + + INIT_DELAYED_WORK(&phytnet_led->led_control_work, led_init_and_control); + schedule_delayed_work(&phytnet_led->led_control_work, msecs_to_jiffies(NDEV_CHECK_DELAY)); + + return 0; +} + +static int +net_led_remove(struct platform_device *pdev) +{ + struct led_data *phytnet_led = platform_get_drvdata(pdev); + + phytnet_led->led_stop = 1; + cancel_delayed_work_sync(&phytnet_led->led_control_work); + + if (phytnet_led->ndev) + dev_put(phytnet_led->ndev); + + if (phytnet_led->link) { + led_off(phytnet_led->link); + gpiod_put(phytnet_led->link); + } + + if (phytnet_led->act) { + led_off(phytnet_led->act); + gpiod_put(phytnet_led->act); + } + + if (&pdev->dev) + devm_kfree(&pdev->dev, phytnet_led); + + return 0; +} + +static struct platform_driver net_led_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(phytnet_led_of_ids), + .acpi_match_table = ACPI_PTR(phytnet_acpi_ids), + }, + .probe = net_led_probe, + .remove = net_led_remove, +}; + +module_platform_driver(net_led_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/char/phytnetled/phytnet_led.h b/drivers/char/phytnetled/phytnet_led.h new file mode 100644 index 0000000000000000000000000000000000000000..8f2fbadde0f8b0ceb336a45d883da5c4580c09e4 --- /dev/null +++ b/drivers/char/phytnetled/phytnet_led.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2022-2023 Phytium Technology Co.,Ltd. + * + */ +struct led_data { + struct platform_device *pdev; + struct net_device *ndev; + unsigned long ndev_rx, ndev_tx; + struct gpio_desc *link, *act; + struct delayed_work led_control_work; + int led_stop; + int act_val; +}; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 0338885332a75b3f8cecde087ca73b40810b51a8..d2a65d26eb92cd30adfe43704671f68662130444 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -61,7 +61,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret; - if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 * 1000 != rate) return -EIO; arch_set_freq_scale(policy->related_cpus, freq, diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f93bb04f41cdd0abe6f8a3f4dda8dffd5ae260a5..ee18628c0291b3c84e23825b40f0beae021c6fff 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -628,6 +628,18 @@ config PHYTIUM_DDMA help Enable support for Phytium PE220x DDMA controller. +config PHYTIUM_GDMA + bool "Phytium GDMA support" + depends on (ARCH_PHYTIUM || COMPILE_TEST) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for Phytium GDMA controller. + + This engine provides acceleration for memory copy operations, it does + not support peripherals to memory data transfer. Say Y here if you have + such a chipset. If unsure, say N. + # driver files source "drivers/dma/bestcomm/Kconfig" diff --git a/drivers/dma/phytium/Makefile b/drivers/dma/phytium/Makefile index 1dcd89b5c7923852f423c3f239ca99f1088d7a51..c2f7c9bafba547d1e27eaa63fc179b08d770d31b 100644 --- a/drivers/dma/phytium/Makefile +++ b/drivers/dma/phytium/Makefile @@ -1,3 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PHYTIUM_DDMA) += phytium-ddmac.o +obj-$(CONFIG_PHYTIUM_GDMA) += phytium-gdmac.o + diff --git a/drivers/dma/phytium/phytium-ddmac.c b/drivers/dma/phytium/phytium-ddmac.c index 734c8a9c680b847f6f848e34657d02338dc68ef1..1ed26b996eeb5dadfb52f19754c78931df42a86e 100644 --- a/drivers/dma/phytium/phytium-ddmac.c +++ b/drivers/dma/phytium/phytium-ddmac.c @@ -29,6 +29,7 @@ #include #include "phytium-ddmac.h" +#define DDMA_DRIVER_VERSION "1.1.1" static inline struct phytium_ddma_device *to_ddma_device(struct dma_chan *chan) { @@ -264,7 +265,7 @@ static void phytium_chan_start_xfer(struct phytium_ddma_chan *chan) chan->desc = to_ddma_desc(vdesc); chan->next_sg = 0; chan->current_sg = NULL; - dev_dbg(chan_to_dev(chan), "xfer start\n"); + dev_dbg(chan_to_dev(chan), "channel %d xfer start\n", chan->id); } if (chan->next_sg == chan->desc->num_sgs) @@ -321,7 +322,8 @@ static void phytium_chan_xfer_done(struct phytium_ddma_chan *chan) chan->busy = false; if (chan->next_sg == chan->desc->num_sgs) { - dev_dbg(chan_to_dev(chan), "xfer complete\n"); + dev_dbg(chan_to_dev(chan), + "channel %d xfer complete\n", chan->id); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; chan->current_sg = NULL; @@ -938,8 +940,21 @@ static struct platform_driver phytium_driver = { }, }; -module_platform_driver(phytium_driver); +static __init int phytium_ddma_init(void) +{ + return platform_driver_register(&phytium_driver); +} + +static void __exit phytium_ddma_exit(void) +{ + platform_driver_unregister(&phytium_driver); +} + +subsys_initcall(phytium_ddma_init); +module_exit(phytium_ddma_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Phytium DDMA Controller platform driver"); MODULE_AUTHOR("HuangJie "); +MODULE_VERSION(DDMA_DRIVER_VERSION); + diff --git a/drivers/dma/phytium/phytium-gdmac.c b/drivers/dma/phytium/phytium-gdmac.c new file mode 100644 index 0000000000000000000000000000000000000000..3fdf572196b0dd303b9c667d9a43320065c7f721 --- /dev/null +++ b/drivers/dma/phytium/phytium-gdmac.c @@ -0,0 +1,1028 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Device GDMA Controller driver. + * + * Copyright (c) 2023-2024 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-gdmac.h" + +#define PHYTIUM_GDMA_DRIVER_VERSION "1.0.2" + +static inline struct phytium_gdma_device *to_gdma_device(struct dma_chan *chan) +{ + return container_of(chan->device, struct phytium_gdma_device, dma_dev); +} + +static inline struct phytium_gdma_chan *to_gdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct phytium_gdma_chan, vchan.chan); +} + +static inline struct phytium_gdma_desc *to_gdma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct phytium_gdma_desc, vdesc); +} + +static inline struct device *chan_to_dev(struct phytium_gdma_chan *chan) +{ + return chan->vchan.chan.device->dev; +} + +static inline +struct phytium_gdma_device *chan_to_gdma(struct phytium_gdma_chan *chan) +{ + return to_gdma_device(&chan->vchan.chan); +} + +static inline void phytium_gdma_write(const struct phytium_gdma_device *gdma, + const u32 reg, const u32 val) +{ + iowrite32(val, gdma->base + reg); +} + +static inline u32 phytium_gdma_read(const struct phytium_gdma_device *gdma, + const u32 reg) +{ + return ioread32(gdma->base + reg); +} + +static inline void phytium_chan_write(const struct phytium_gdma_chan *chan, + const u32 reg, const u32 val) +{ + iowrite32(val, chan->base + reg); +} + +static inline u32 phytium_chan_read(const struct phytium_gdma_chan *chan, + const u32 reg) +{ + return ioread32(chan->base + reg); +} + +static void phytium_gdma_dump_reg(struct phytium_gdma_chan *chan) +{ + struct phytium_gdma_device *gdma = chan_to_gdma(chan); + + dev_dbg(chan_to_dev(chan), "gdma registers:\n"); + dev_dbg(chan_to_dev(chan), "\tDMA_CTL: 0x%08x\n", + phytium_gdma_read(gdma, DMA_CTL)); + dev_dbg(chan_to_dev(chan), "\tDMA_STATE: 0x%08x\n", + phytium_gdma_read(gdma, DMA_STATE)); + dev_dbg(chan_to_dev(chan), "\tDMA_INTR_CTL: 0x%08x\n", + phytium_gdma_read(gdma, DMA_INTR_CTL)); + dev_dbg(chan_to_dev(chan), "\tDMA_LP_CTL: 0x%08x\n", + phytium_gdma_read(gdma, DMA_LP_CTL)); + dev_dbg(chan_to_dev(chan), "\tDMA_QOS_CFG: 0x%08x\n", + phytium_gdma_read(gdma, DMA_QOS_CFG)); + + dev_dbg(chan_to_dev(chan), "gdma channel %d registers\n", chan->id); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_CTRL: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_CTRL)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_MODE: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_MODE)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_INTR_CTL: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_INTR_CTL)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_STATE: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_STATE)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_LVI: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_LVI)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_TS: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_TS)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_UPSADDR: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_UPSADDR)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_LWSADDR: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_LWSADDR)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_UPDADDR: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_UPDADDR)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_LWDADDR: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_LWDADDR)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_XFER_CFG: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_XFER_CFG)); + dev_dbg(chan_to_dev(chan), "\tDMA_C%d_LCP: 0x%08x\n", chan->id, + phytium_chan_read(chan, DMA_CX_LCP)); +} + +static void phytium_gdma_disable(const struct phytium_gdma_device *gdma) +{ + u32 val = phytium_gdma_read(gdma, DMA_CTL); + + dev_dbg(gdma->dev, "gdma disable\n"); + val &= ~DMA_CTL_EN; + phytium_gdma_write(gdma, DMA_CTL, !DMA_CTL_EN); +} + +static void phytium_gdma_enable(const struct phytium_gdma_device *gdma) +{ + u32 val = phytium_gdma_read(gdma, DMA_CTL); + + dev_dbg(gdma->dev, "gdma enable\n"); + val |= DMA_CTL_EN; + phytium_gdma_write(gdma, DMA_CTL, DMA_CTL_EN); +} + +static void phytium_gdma_reset(const struct phytium_gdma_device *gdma) +{ + u32 val = 0; + + dev_dbg(gdma->dev, "dma reset\n"); + val = phytium_gdma_read(gdma, DMA_CTL); + val |= DMA_CTL_SRST; + phytium_gdma_write(gdma, DMA_CTL, val); + + udelay(10); + val &= ~DMA_CTL_SRST; + phytium_gdma_write(gdma, DMA_CTL, val); +} + +static void phytium_gdma_irq_disable(const struct phytium_gdma_device *gdma) +{ + u32 val = 0; + + dev_dbg(gdma->dev, "gdma irq disable\n"); + val = phytium_gdma_read(gdma, DMA_INTR_CTL); + val &= ~DMA_INT_EN; + phytium_gdma_write(gdma, DMA_INTR_CTL, val); +} + +static void phytium_gdma_irq_enable(const struct phytium_gdma_device *gdma) +{ + u32 val = 0; + + dev_dbg(gdma->dev, "gdma irq enable\n"); + val = phytium_gdma_read(gdma, DMA_INTR_CTL); + val |= DMA_INT_EN; + phytium_gdma_write(gdma, DMA_INTR_CTL, val); +} + +static void __maybe_unused +phytium_gdma_read_mode_set(struct phytium_gdma_device *gdma, + enum arbitration_mode mode) +{ + u32 val = phytium_gdma_read(gdma, DMA_CTL); + + switch (mode) { + case POLLING_MODE: + dev_dbg(gdma->dev, "set read polling mode\n"); + val &= ~BIT(5); + break; + case QOS_MODE: + dev_dbg(gdma->dev, "set read qos mode\n"); + val |= BIT(5); + break; + } + phytium_gdma_write(gdma, DMA_CTL, val); +} + +static void phytium_gdma_write_mode_set(struct phytium_gdma_device *gdma, + enum arbitration_mode mode) +{ + u32 val = phytium_gdma_read(gdma, DMA_CTL); + + switch (mode) { + case POLLING_MODE: + dev_dbg(gdma->dev, "set write polling mode\n"); + val &= ~BIT(4); + break; + case QOS_MODE: + dev_dbg(gdma->dev, "set write qos mode\n"); + val |= BIT(4); + break; + } + phytium_gdma_write(gdma, DMA_CTL, val); +} + +static void __maybe_unused +phytium_gdma_outstanding_set(struct phytium_gdma_device *gdma, u32 outstanding) +{ + u32 val = 0; + + dev_dbg(gdma->dev, "set dma outstanding %d\n", outstanding); + + val = phytium_gdma_read(gdma, DMA_CTL); + val &= ~DMA_OUTSTANDING_MASK; + val |= ((outstanding - 1) << 8); + phytium_gdma_write(gdma, DMA_CTL, val); +} + +static void __maybe_unused +phytium_gdma_read_qos_set(struct phytium_gdma_device *gdma, u32 qos) +{ + u32 val = 0; + + dev_dbg(gdma->dev, "set read qos %d", qos); + val = phytium_gdma_read(gdma, DMA_QOS_CFG); + val &= ~DMA_READ_QOS_MASK; + val |= (qos << 4); + phytium_gdma_write(gdma, DMA_QOS_CFG, val); +} + +static void __maybe_unused +phytium_gdma_write_qos_set(struct phytium_gdma_device *gdma, u32 qos) +{ + u32 val = phytium_gdma_read(gdma, DMA_QOS_CFG); + + dev_dbg(gdma->dev, "set read qos %d", qos); + val &= ~DMA_WRITE_QOS_MASK; + val |= qos; + phytium_gdma_write(gdma, DMA_QOS_CFG, val); +} + +static void phytium_chan_irq_enable(struct phytium_gdma_chan *chan, u32 mask) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq enable\n", chan->id); + val = phytium_gdma_read(chan_to_gdma(chan), DMA_INTR_CTL); + val |= DMA_INT_CHAL_EN(chan->id); + phytium_gdma_write(chan_to_gdma(chan), DMA_INTR_CTL, val); + phytium_chan_write(chan, DMA_CX_INTR_CTL, mask); +} + +static void phytium_chan_irq_disable(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq disable\n", chan->id); + val = phytium_gdma_read(chan_to_gdma(chan), DMA_INTR_CTL); + val &= ~DMA_INT_CHAL_EN(chan->id); + phytium_gdma_write(chan_to_gdma(chan), DMA_INTR_CTL, val); +} + +static void phytium_chan_irq_clear(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq clear\n", chan->id); + + val = phytium_chan_read(chan, DMA_CX_STATE); + phytium_chan_write(chan, DMA_CX_STATE, val); +} + +static void phytium_chan_disable(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d disable\n", chan->id); + + /* disable channel */ + val = phytium_chan_read(chan, DMA_CX_CTRL); + val &= ~DMA_CHAL_EN; + phytium_chan_write(chan, DMA_CX_CTRL, val); +} + +static void phytium_chan_enable(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d enable\n", chan->id); + + /* enable channel */ + val = phytium_chan_read(chan, DMA_CX_CTRL); + val |= DMA_CHAL_EN; + phytium_chan_write(chan, DMA_CX_CTRL, val); +} + +static void phytium_chan_clk_enable(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d clk enable\n", chan->id); + + /* enable channel clock */ + val = phytium_gdma_read(chan_to_gdma(chan), DMA_LP_CTL); + val &= ~DMA_CX_CLK_EN(chan->id); + phytium_gdma_write(chan_to_gdma(chan), DMA_LP_CTL, val); +} + +static void phytium_chan_clk_disable(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d clk disable\n", chan->id); + + /* disable channel clock */ + val = phytium_gdma_read(chan_to_gdma(chan), DMA_LP_CTL); + val |= DMA_CX_CLK_EN(chan->id); + phytium_gdma_write(chan_to_gdma(chan), DMA_LP_CTL, val); +} + +static void phytium_chan_reset(struct phytium_gdma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d reset\n", chan->id); + val = phytium_chan_read(chan, DMA_CX_CTRL); + val |= DMA_CHAL_SRST; + phytium_chan_write(chan, DMA_CX_CTRL, val); + + udelay(10); + val = phytium_chan_read(chan, DMA_CX_CTRL); + val &= ~DMA_CHAL_SRST; + phytium_chan_write(chan, DMA_CX_CTRL, val); +} + +static void phytium_chan_set_bdl_mode(struct phytium_gdma_chan *chan, bool bdl) +{ + u32 val = 0; + + val = phytium_chan_read(chan, DMA_CX_MODE); + if (bdl) { + dev_dbg(chan_to_dev(chan), "channel %d set bdl mode\n", + chan->id); + val |= DMA_CHAN_BDL_MODE; + } else { + dev_dbg(chan_to_dev(chan), "channel %d set direct mode\n", + chan->id); + val &= ~DMA_CHAN_BDL_MODE; + } + phytium_chan_write(chan, DMA_CX_MODE, val); +} + +static u32 __maybe_unused +phytium_chan_get_bdl_num(struct phytium_gdma_chan *chan) +{ + u32 val = phytium_chan_read(chan, DMA_CX_LVI) + 1; + + return val; +} + +static u32 phytium_gdma_set_xfer_ctrl(u32 burst_width, u32 burst_length, + enum burst_type burst_type) +{ + u32 xfer_ctrl = 0; + + xfer_ctrl = burst_type; + xfer_ctrl |= (ffs(burst_width) - 1) << 4; + xfer_ctrl |= (burst_length - 1) << 8; + + return xfer_ctrl; +} + +static void phytium_chan_set_xfer_cfg(struct phytium_gdma_chan *chan, + u32 burst_width, u32 burst_length, + enum burst_type type) +{ + u32 xfer_ctl = phytium_gdma_set_xfer_ctrl(burst_width, burst_length, + type); + u32 val = xfer_ctl << 16 | xfer_ctl; + + dev_dbg(chan_to_dev(chan), "channel %d set xfer cfg 0x%08x", + chan->id, val); + phytium_chan_write(chan, DMA_CX_XFER_CFG, val); +} + +static void phytium_gdma_vdesc_free(struct virt_dma_desc *vd) +{ + struct phytium_gdma_desc *desc = to_gdma_desc(vd); + struct phytium_gdma_chan *chan = desc->chan; + + if (desc->bdl_mode) { + dma_free_coherent(chan_to_dev(desc->chan), + sizeof(struct phytium_gdma_bdl) * desc->bdl_size, + chan->bdl_list, chan->paddr); + } + kfree(desc); +} + +static void phytium_chan_start_desc(struct phytium_gdma_chan *chan) +{ + struct virt_dma_desc *vd = vchan_next_desc(&chan->vchan); + struct phytium_gdma_desc *desc = NULL; + + if (!vd) { + chan->desc = NULL; + return; + } + + list_del(&vd->node); + desc = to_gdma_desc(vd); + chan->desc = desc; + + phytium_chan_clk_enable(chan); + + if (desc->bdl_mode) { + phytium_chan_set_bdl_mode(chan, true); + phytium_chan_write(chan, DMA_CX_UPSADDR, + upper_32_bits(chan->paddr)); + phytium_chan_write(chan, DMA_CX_LWSADDR, + lower_32_bits(chan->paddr)); + phytium_chan_write(chan, DMA_CX_UPDADDR, 0); + phytium_chan_write(chan, DMA_CX_LWDADDR, 0); + phytium_chan_write(chan, DMA_CX_TS, 0); + phytium_chan_write(chan, DMA_CX_LVI, desc->bdl_size - 1); + phytium_chan_write(chan, DMA_CX_XFER_CFG, 0); + } else { + phytium_chan_set_bdl_mode(chan, false); + phytium_chan_set_xfer_cfg(chan, desc->burst_width, + desc->burst_length, BURST_INCR); + phytium_chan_write(chan, DMA_CX_UPSADDR, + upper_32_bits(desc->src)); + phytium_chan_write(chan, DMA_CX_LWSADDR, + lower_32_bits(desc->src)); + phytium_chan_write(chan, DMA_CX_UPDADDR, + upper_32_bits(desc->dst)); + phytium_chan_write(chan, DMA_CX_LWDADDR, + lower_32_bits(desc->dst)); + phytium_chan_write(chan, DMA_CX_TS, desc->len); + } + + phytium_gdma_outstanding_set(chan_to_gdma(chan), desc->outstanding); + phytium_gdma_write_mode_set(chan_to_gdma(chan), POLLING_MODE); + phytium_chan_irq_enable(chan, GDMA_CX_INT_CTRL_TRANS_END_ENABLE); + phytium_gdma_dump_reg(chan); + phytium_chan_enable(chan); +} + +static void phytium_gdma_hw_init(struct phytium_gdma_device *gdma) +{ + u32 i = 0; + + phytium_gdma_disable(gdma); + phytium_gdma_reset(gdma); + phytium_gdma_irq_enable(gdma); + phytium_gdma_enable(gdma); + + for (i = 0; i < gdma->dma_channels; i++) { + phytium_chan_irq_disable(&gdma->chan[i]); + phytium_chan_disable(&gdma->chan[i]); + phytium_chan_clk_disable(&gdma->chan[i]); + } +} + +static int phytium_gdma_terminate_all(struct dma_chan *chan) +{ + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + unsigned long flags = 0; + u32 val = 0; + int ret = 0; + LIST_HEAD(head); + + spin_lock_irqsave(&gdma_chan->vchan.lock, flags); + + phytium_gdma_dump_reg(gdma_chan); + + if (gdma_chan->desc) { + vchan_terminate_vdesc(&gdma_chan->desc->vdesc); + gdma_chan->desc = NULL; + phytium_chan_disable(gdma_chan); + ret = readl_poll_timeout(gdma_chan->base + DMA_CX_STATE, val, + ~(val & BIT(4)), 10, 10000); + if (ret) + dev_err(chan_to_dev(gdma_chan), + "failed to complete writes\n"); + phytium_chan_reset(gdma_chan); + } + + vchan_get_all_descriptors(&gdma_chan->vchan, &head); + spin_unlock_irqrestore(&gdma_chan->vchan.lock, flags); + vchan_dma_desc_free_list(&gdma_chan->vchan, &head); + + return 0; +} + +static int phytium_gdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct phytium_gdma_device *gdma = to_gdma_device(chan); + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + + /* prepare channel */ + phytium_chan_disable(gdma_chan); + phytium_chan_reset(gdma_chan); + phytium_chan_irq_clear(gdma_chan); + + dev_info(gdma->dev, "alloc channel %d\n", gdma_chan->id); + + return 0; +} + +static void phytium_gdma_free_chan_resources(struct dma_chan *chan) +{ + struct phytium_gdma_device *gdma = to_gdma_device(chan); + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + + phytium_chan_disable(gdma_chan); + phytium_chan_irq_disable(gdma_chan); + + vchan_free_chan_resources(&gdma_chan->vchan); + + dev_dbg(gdma->dev, "free channel %d\n", gdma_chan->id); +} + +static int phytium_gdma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + + gdma_chan->dma_config = *config; + + return 0; +} + + +static enum dma_status phytium_gdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + + return ret; +} + +static int phytium_gdma_calcu_burst_width(dma_addr_t src, dma_addr_t dst, + u32 len) +{ + enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_16_BYTES; + + for (max_width = DMA_SLAVE_BUSWIDTH_16_BYTES; + max_width > DMA_SLAVE_BUSWIDTH_1_BYTE; + max_width >>= 1) { + if ((((len | src | dst) & (max_width - 1)) == 0) && + len > max_width) + break; + } + + return max_width; +} + +static int phytium_gdma_xfer_bdl_mode(struct phytium_gdma_desc *desc, + dma_addr_t dst, dma_addr_t src) +{ + struct phytium_gdma_chan *chan = desc->chan; + struct phytium_gdma_bdl *bdl = NULL; + int i = 0; + int ret = 0; + u32 burst_width = 0; + u32 burst_length = 0; + size_t buf_len = desc->len; + + chan->bdl_list = dma_alloc_coherent(chan_to_dev(chan), + sizeof(struct phytium_gdma_bdl) * desc->bdl_size, + &chan->paddr, GFP_KERNEL); + if (!chan->bdl_list) { + dev_err(chan_to_dev(chan), "channel %d: unable to allocate bdl list\n", + chan->id); + ret = -ENOMEM; + goto error_bdl; + } + + for (i = 0; i < desc->bdl_size; i++) { + bdl = &chan->bdl_list[i]; + + /* set bdl info */ + bdl->src_addr_l = lower_32_bits(src); + bdl->src_addr_h = upper_32_bits(src); + bdl->dst_addr_l = lower_32_bits(dst); + bdl->dst_addr_h = upper_32_bits(dst); + + bdl->length = min_t(u32, buf_len, GDMA_MAX_LEN); + buf_len -= bdl->length; + src += bdl->length; + dst += bdl->length; + desc->len = bdl->length; + + /* calculate burst width and burst length */ + burst_width = phytium_gdma_calcu_burst_width(src, dst, + bdl->length); + burst_length = min_t(u32, GDMA_MAX_BURST_LENGTH, + bdl->length / burst_width); + bdl->src_xfer_cfg = phytium_gdma_set_xfer_ctrl(burst_width, + burst_length, BURST_INCR); + + bdl->dst_xfer_cfg = bdl->src_xfer_cfg; + + /* not trigger interrupt after bdl transferred */ + bdl->intr_ctl = 0; + + dev_dbg(chan_to_dev(chan), + "channel %d: bdl_mode, frame %d, len %d, burst_width %d, burst_length %d, xfer_cfg 0x%08x, outstanding: %d\n", + chan->id, i, bdl->length, burst_width, burst_length, + bdl->src_xfer_cfg, desc->outstanding); + } + +error_bdl: + return ret; +} + +static struct dma_async_tx_descriptor *phytium_gdma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + struct phytium_gdma_desc *desc = NULL; + struct dma_async_tx_descriptor *tx_desc = NULL; + u32 frames = 0; + u32 max_outstanding = 0; + int ret = 0; + + if (!src || !dst || !len) + return NULL; + + frames = DIV_ROUND_UP(len, GDMA_MAX_LEN); + + desc = kzalloc(sizeof(struct phytium_gdma_desc), GFP_KERNEL); + if (IS_ERR_OR_NULL(desc)) + return NULL; + + dev_dbg(chan_to_dev(gdma_chan), + "memcpy: src %llx, dst %llx, len %ld, frames %d\n", + src, dst, len, frames); + + desc->len = len; + desc->chan = gdma_chan; + max_outstanding = chan_to_gdma(gdma_chan)->max_outstanding; + + if (frames > 1) { + /* bdl xfer */ + desc->bdl_mode = true; + desc->bdl_size = frames; + desc->outstanding = min_t(u32, max_outstanding, + len / GDMA_MAX_BURST_SIZE / GDMA_MAX_BURST_LENGTH); + ret = phytium_gdma_xfer_bdl_mode(desc, dst, src); + } else { + /* direct xfer */ + desc->bdl_mode = false; + desc->burst_width = phytium_gdma_calcu_burst_width(src, dst, + len); + desc->burst_length = min_t(u32, GDMA_MAX_BURST_LENGTH, + len / desc->burst_width); + desc->src = src; + desc->dst = dst; + desc->outstanding = min_t(u32, max_outstanding, + len / desc->burst_length / desc->burst_width); + dev_dbg(chan_to_dev(gdma_chan), "channel %d: direct mode, len %ld, burst_width %d, burst_length %d, outstanding %d\n", + gdma_chan->id, desc->len, desc->burst_width, + desc->burst_length, desc->outstanding); + } + + if (!ret) + tx_desc = vchan_tx_prep(&gdma_chan->vchan, &desc->vdesc, flags); + + return tx_desc; +} + +static void phytium_gdma_issue_pending(struct dma_chan *chan) +{ + struct phytium_gdma_chan *gdma_chan = to_gdma_chan(chan); + unsigned long flags = 0; + + spin_lock_irqsave(&gdma_chan->vchan.lock, flags); + if (vchan_issue_pending(&gdma_chan->vchan) && !gdma_chan->desc) + phytium_chan_start_desc(gdma_chan); + + spin_unlock_irqrestore(&gdma_chan->vchan.lock, flags); +} + +static void phytium_chan_irq_handler(struct phytium_gdma_chan *chan) +{ + struct phytium_gdma_desc *desc = NULL; + unsigned long flags = 0; + u32 state = 0; + int ret = 0; + + dev_dbg(chan_to_dev(chan), "channel %d int state %08x", chan->id, + chan->state); + + /* transfer busy */ + if (chan->state & GDMA_CX_INT_STATE_BUSY) { + dev_info(chan_to_dev(chan), "gdma channel %d busy\n", chan->id); + ret = readl_poll_timeout_atomic(chan->base + DMA_CX_STATE, state, + state & GDMA_CX_INT_STATE_TRANS_END, + 10, 2000); + if (ret) { + dev_err(chan_to_dev(chan), + "gdma channel %d xfer timeout\n", chan->id); + } + } + + /* transfer complete */ + if (chan->state & GDMA_CX_INT_STATE_TRANS_END) { + dev_dbg(chan_to_dev(chan), "channel %d xfer complete\n", chan->id); + desc = chan->desc; + spin_lock_irqsave(&chan->vchan.lock, flags); + vchan_cookie_complete(&desc->vdesc); + phytium_chan_start_desc(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); + } + + /* fifo fulll */ + if (chan->state & GDMA_CX_INT_STATE_FIFO_FULL) { + dev_err(chan_to_dev(chan), + "gdma channel %d fifo full\n", chan->id); + } +} + +static irqreturn_t phytium_dma_interrupt(int irq, void *dev_id) +{ + struct phytium_gdma_device *gdma = dev_id; + struct phytium_gdma_chan *gdma_chan = NULL; + u32 state = 0; + int i = 0; + + state = phytium_gdma_read(gdma, DMA_STATE); + dev_dbg(gdma->dev, "gdma interrupt, state %04x", state); + + for (i = 0; i < gdma->dma_channels; i++) { + if (state & DMA_CX_INTR_STATE(i)) { + gdma_chan = &gdma->chan[i]; + gdma_chan->state = phytium_chan_read(gdma_chan, + DMA_CX_STATE); + phytium_chan_irq_clear(gdma_chan); + + if (gdma->chan[i].desc) { + phytium_chan_irq_handler(gdma_chan); + phytium_chan_disable(gdma_chan); + phytium_chan_clk_disable(gdma_chan); + } + } + } + + return IRQ_HANDLED; +} + +static struct dma_chan *phytium_gdma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct phytium_gdma_device *gdma = ofdma->of_dma_data; + struct device *dev = gdma->dev; + struct phytium_gdma_chan *chan = NULL; + struct dma_chan *c = NULL; + u32 channel_id = 0; + + channel_id = dma_spec->args[0]; + + if (channel_id > gdma->dma_channels) { + dev_err(dev, "bad channel %d\n", channel_id); + return NULL; + } + + chan = &gdma->chan[channel_id]; + c = dma_get_slave_channel(&chan->vchan.chan); + if (!c) { + dev_err(dev, "no more channels available\n"); + return NULL; + } + + return c; +} + +static int phytium_gdma_probe(struct platform_device *pdev) +{ + struct phytium_gdma_device *gdma; + struct dma_device *dma_dev; + struct resource *mem; + u32 i = 0; + u32 nr_channels = 0; + u32 max_outstanding = 0; + int irq_count = 0; + int irq = 0; + int ret = 0; + + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "Unable to set DMA mask\n"); + goto out; + } + + gdma = devm_kzalloc(&pdev->dev, sizeof(*gdma), GFP_KERNEL); + if (!gdma) { + ret = -ENOMEM; + goto out; + } + + dma_dev = &gdma->dma_dev; + gdma->dev = &pdev->dev; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gdma->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(gdma->base)) { + dev_err(&pdev->dev, "no resource address"); + ret = PTR_ERR(gdma->base); + goto out; + } + + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", + &nr_channels); + if (ret < 0) { + dev_err(&pdev->dev, + "can't get the number of dma channels: %d\n", ret); + goto out; + } + gdma->dma_channels = nr_channels; + + ret = of_property_read_u32(pdev->dev.of_node, "max-outstanding", + &max_outstanding); + if (ret < 0) { + dev_err(&pdev->dev, "can't get max outstanding %d\n", ret); + goto out; + } + gdma->max_outstanding = max_outstanding; + + irq_count = platform_irq_count(pdev); + if (irq_count <= 0) { + dev_err(&pdev->dev, "no irq found\n"); + ret = -EINVAL; + goto out; + } + for (i = 0; i < irq_count; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + dev_err(&pdev->dev, "can't get irq %d\n", i); + continue; + } + ret = devm_request_irq(&pdev->dev, irq, + phytium_dma_interrupt, IRQF_SHARED, + dev_name(&pdev->dev), gdma); + if (ret) { + dev_err(&pdev->dev, "could not to request irq %d", irq); + goto out; + } + } + + /* Set capabilities */ + dma_cap_set(DMA_MEMCPY, gdma->dma_dev.cap_mask); + + /* DMA capabilities */ + dma_dev->dev = gdma->dev; + dma_dev->chancnt = gdma->dma_channels; + dma_dev->max_burst = 8; + dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES); + dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES); + dma_dev->directions = BIT(DMA_MEM_TO_MEM); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + /* function callback */ + dma_dev->device_alloc_chan_resources = + phytium_gdma_alloc_chan_resources; + dma_dev->device_free_chan_resources = phytium_gdma_free_chan_resources; + dma_dev->device_tx_status = phytium_gdma_tx_status; + dma_dev->device_config = phytium_gdma_slave_config; + dma_dev->device_prep_dma_memcpy = phytium_gdma_prep_dma_memcpy; + dma_dev->device_issue_pending = phytium_gdma_issue_pending; + dma_dev->device_terminate_all = phytium_gdma_terminate_all; + + /* init dma channels */ + INIT_LIST_HEAD(&dma_dev->channels); + gdma->chan = devm_kcalloc(gdma->dev, gdma->dma_channels, + sizeof(*gdma->chan), GFP_KERNEL); + if (!gdma->chan) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < gdma->dma_channels; i++) { + gdma->chan[i].id = i; + gdma->chan[i].base = gdma->base + DMA_REG_OFFSET + + i * CHAN_REG_OFFSET; + gdma->chan[i].vchan.desc_free = phytium_gdma_vdesc_free; + gdma->chan[i].desc = NULL; + vchan_init(&gdma->chan[i].vchan, dma_dev); + } + + phytium_gdma_hw_init(gdma); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto out; + + ret = of_dma_controller_register(pdev->dev.of_node, + phytium_gdma_of_xlate, gdma); + if (ret < 0) { + dev_err(&pdev->dev, + "phytium gdma of register failed %d\n", ret); + goto err_unregister; + } + + platform_set_drvdata(pdev, gdma); + dev_info(gdma->dev, "phytium GDMA Controller registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dma_dev); + +out: + return ret; +} + +static void phytium_gdma_chan_remove(struct phytium_gdma_chan *chan) +{ + phytium_chan_irq_disable(chan); + phytium_chan_disable(chan); + + tasklet_kill(&chan->vchan.task); + list_del(&chan->vchan.chan.device_node); +} + +static int phytium_gdma_remove(struct platform_device *pdev) +{ + struct phytium_gdma_device *gdma = platform_get_drvdata(pdev); + struct phytium_gdma_chan *chan = NULL; + int i = 0; + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&gdma->dma_dev); + + for (i = 0; i < gdma->dma_channels; i++) { + chan = &gdma->chan[i]; + phytium_gdma_chan_remove(chan); + } + + phytium_gdma_irq_disable(gdma); + phytium_gdma_disable(gdma); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_gdma_suspend(struct device *dev) +{ + struct phytium_gdma_device *gdma = dev_get_drvdata(dev); + + phytium_gdma_irq_disable(gdma); + phytium_gdma_disable(gdma); + + return 0; +} + +static int phytium_gdma_resume(struct device *dev) +{ + struct phytium_gdma_device *gdma = dev_get_drvdata(dev); + int ret = 0; + + phytium_gdma_hw_init(gdma); + + return ret; +} +#endif + +static const struct dev_pm_ops phytium_gdma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_gdma_suspend, + phytium_gdma_resume) +}; + +static const struct of_device_id phytium_dma_of_id_table[] = { + { .compatible = "phytium,gdma" }, + {} +}; +MODULE_DEVICE_TABLE(of, phytium_dma_of_id_table); + +static struct platform_driver phytium_gdma_driver = { + .probe = phytium_gdma_probe, + .remove = phytium_gdma_remove, + .driver = { + .name = "phytium-gdma", + .of_match_table = of_match_ptr(phytium_dma_of_id_table), + .pm = &phytium_gdma_pm_ops, + }, +}; + +static __init int phytium_gdma_init(void) +{ + return platform_driver_register(&phytium_gdma_driver); +} + +static __exit void phytium_gdma_exit(void) +{ + return platform_driver_unregister(&phytium_gdma_driver); +} + +subsys_initcall(phytium_gdma_init); +module_exit(phytium_gdma_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium gdma Controller platform driver"); +MODULE_AUTHOR("HuangJie "); +MODULE_VERSION(PHYTIUM_GDMA_DRIVER_VERSION); diff --git a/drivers/dma/phytium/phytium-gdmac.h b/drivers/dma/phytium/phytium-gdmac.h new file mode 100644 index 0000000000000000000000000000000000000000..785f81351fba9b50870756e1d3601fc04bc4a3c1 --- /dev/null +++ b/drivers/dma/phytium/phytium-gdmac.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Phytium Device GDMA Controller driver. + * + * Copyright (c) 2023-2024 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_GDMAC_H +#define _PHYTIUM_GDMAC_H + +#include +#include +#include +#include +#include +#include "../virt-dma.h" + + +#define GDMA_MAX_LEN SZ_1G +#define GDMA_MAX_BURST_LENGTH 8 +#define GDMA_MAX_BURST_SIZE 16 + +/* GDMA register address offset */ +#define DMA_REG_OFFSET 0x20 +#define CHAN_REG_OFFSET 0x60 + +#define DMA_CTL 0x00 +#define DMA_STATE 0x04 +#define DMA_INTR_CTL 0x08 +#define DMA_LP_CTL 0x0C +#define DMA_QOS_CFG 0x10 + +#define DMA_CX_CTRL 0x00 +#define DMA_CX_MODE 0x04 +#define DMA_CX_INTR_CTL 0x08 +#define DMA_CX_STATE 0x0C +#define DMA_CX_LVI 0x10 +#define DMA_CX_TS 0x14 +#define DMA_CX_UPSADDR 0x18 +#define DMA_CX_LWSADDR 0x1C +#define DMA_CX_UPDADDR 0x20 +#define DMA_CX_LWDADDR 0x24 +#define DMA_CX_XFER_CFG 0x28 +#define DMA_CX_LCP 0x2C +#define DMA_CX_SECCTL 0x30 +#define DMA_CX_SEC_ATST 0x34 +#define DMA_CX_NSID_STRMID 0x38 +#define DMA_CX_AW_CFG 0x3C +#define DMA_CX_AR_CFG 0x40 +#define DMA_CX_SECRSP 0x44 + +/* DMA_CTL register */ +#define DMA_CTL_EN BIT(0) +#define DMA_CTL_SRST BIT(1) + +#define DMA_OUTSTANDING_MASK GENMASK(11, 8) +#define DMA_WRITE_QOS_MASK GENMASK(3, 0) +#define DMA_READ_QOS_MASK GENMASK(7, 4) + +/* DMA_STATE register */ +#define DMA_CX_INTR_STATE(id) BIT(id) + +/* DMA_INTR_CTL register */ +#define DMA_INT_EN BIT(31) +#define DMA_INT_CHAL_EN(id) BIT(id) + +/* DMA_LP_CTL register */ +#define DMA_CX_CLK_EN(id) BIT(id) + +/* DMA_CX_MODE register */ +#define DMA_CHAN_BDL_MODE BIT(0) + +/* DMA_CX_CTRL register */ +#define DMA_CHAL_EN BIT(0) +#define DMA_CHAL_SRST BIT(4) + +/* DMA_CX_INTR_CTL register, channel interrupt control bits */ +#define GDMA_CX_INT_CTRL_TRANS_END_ENABLE BIT(3) +#define GDMA_CX_INT_CTRL_BDL_END_ENABLE BIT(2) +#define GDMA_CX_INT_CTRL_FIFO_FULL_ENABLE BIT(1) +#define GDMA_CX_INT_CTRL_FIFO_EMPTY_ENABLE BIT(0) + + +/* DMA_CX_STATE register, channel interrupt states */ +#define GDMA_CX_INT_STATE_BUSY BIT(4) +#define GDMA_CX_INT_STATE_TRANS_END BIT(3) +#define GDMA_CX_INT_STATE_BDL_END BIT(2) +#define GDMA_CX_INT_STATE_FIFO_FULL BIT(1) +#define GDMA_CX_INT_STATE_FIFO_EMPTY BIT(0) + + +/* channel modes */ +enum arbitration_mode { + POLLING_MODE, + QOS_MODE +}; + +/* burst type */ +enum burst_type { + BURST_FIX, + BURST_INCR +}; + +/** + * @brief struct phytium_gdma_bdl - describe for bdl list + * @src_addr_l: low 32 bits of src addr + * @src_addr_h: high 32 bits of src addr + * @dst_addr_l: low 32 bits of dst addr + * @dst_addr_h: high 32 bits of dst addr + * @src_xfer_cfg: xfer config for read + * @dst_xfer_cfg: xfer config for write + * @length: xfer length + * @intr_ctl: interrupt enable control + */ +struct phytium_gdma_bdl { + u32 src_addr_l; + u32 src_addr_h; + u32 dst_addr_l; + u32 dst_addr_h; + u32 src_xfer_cfg; + u32 dst_xfer_cfg; + u32 length; + u32 intr_ctl; +}; + +/** + * struct phytium_gdma_desc - the struct holding info describing gdma request + * descriptor + * @chan: the channel belonging to this descriptor + * @vdesc: gdma request descriptor + * @len: total len of dma request + * @bdl_size: store bdl size in bdl mode + * @bdl_mode: the bdl xfer mode flag + * @burst_widht: store burst width in direct xfer mode + * @burst_length: store burst length in direct xfer mode + * @dst: store dst addr in direct xfer mode + * @src: store src addr in direct xfer mode + * @outstanding: store outstanding + */ +struct phytium_gdma_desc { + struct phytium_gdma_chan *chan; + struct virt_dma_desc vdesc; + size_t len; + u32 bdl_size; + bool bdl_mode; + u32 burst_width; + u32 burst_length; + dma_addr_t dst; + dma_addr_t src; + u32 outstanding; +}; + +/** + * struct phytium_gdma_chan - the struct holding info describing dma channel + * @vchan: virtual dma channel + * @dma_config: config parameters for dma channel + * @desc: the transform request descriptor + * @bdl_list: the pointer of bdl + * @paddr: the dma address of bdl + * @id: the id of gdma physical channel + * @base: the mapped register I/O of dma physical channel + * @state: channel transfer state + */ +struct phytium_gdma_chan { + struct virt_dma_chan vchan; + struct dma_slave_config dma_config; + struct phytium_gdma_desc *desc; + struct phytium_gdma_bdl *bdl_list; + dma_addr_t paddr; + + void __iomem *base; + u32 id; + u32 state; +}; + +/** + * struct phytium_gdma_device - the struct holding info describing gdma device + * @dma_dev: an instance for struct dma_device + * @base: the mapped register I/O base of this gdma + * @dma_channels: the number of gdma channels + * @max_outstanding: the max outstanding support + * @chan: the phyical channels of gdma + */ +struct phytium_gdma_device { + struct dma_device dma_dev; + struct device *dev; + void __iomem *base; + u32 dma_channels; + u32 max_outstanding; + struct phytium_gdma_chan *chan; +}; + +#endif /* _PHYTIUM_GDMAC_H */ diff --git a/drivers/edac/phytium_edac.c b/drivers/edac/phytium_edac.c index 55a0f968b78e52a0a039cc10bf78d48ef17cf471..bcfca6a3f2bcfd971be70cbe1efe9bb5f9db232f 100644 --- a/drivers/edac/phytium_edac.c +++ b/drivers/edac/phytium_edac.c @@ -38,6 +38,8 @@ #define MAX_ERR_GROUP 3 +#define EDAC_DRIVER_VERSION "1.1.1" + struct phytium_edac { struct device *dev; void __iomem **ras_base; @@ -481,4 +483,5 @@ module_platform_driver(phytium_edac_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Huangjie "); +MODULE_VERSION(EDAC_DRIVER_VERSION); MODULE_DESCRIPTION("Phytium Pe220x EDAC driver"); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index c619582368c857dca952661743aa8c34bccb590c..937a930ce87de63d45d7e8d44434efbe62a9d3de 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -91,7 +91,6 @@ struct scmi_xfer { struct scmi_msg tx; struct scmi_msg rx; struct completion done; - spinlock_t lock; }; void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index fc5d110b0fd95b75c231d5a12b2c7aa679a7c388..e29d133931f2c132d6463d27629a61ea71e6947f 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -54,6 +54,8 @@ enum scmi_error_codes { static LIST_HEAD(scmi_list); /* Protection for the entire list */ static DEFINE_MUTEX(scmi_list_mutex); +/* Protection for scmi xfer, prevent transmission timeout */ +static DEFINE_MUTEX(scmi_xfer_mutex); /** * struct scmi_xfers_info - Structure to manage transfer information @@ -404,7 +406,6 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) struct scmi_info *info = handle_to_scmi_info(handle); struct device *dev = info->dev; struct scmi_chan_info *cinfo; - unsigned long flags; cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id); if (unlikely(!cinfo)) @@ -414,8 +415,11 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) xfer->hdr.poll_completion = true; #endif + /* lock scmi xfer, too many scmi xfers may cause timeout */ + mutex_lock(&scmi_xfer_mutex); ret = mbox_send_message(cinfo->chan, xfer); if (ret < 0) { + mutex_unlock(&scmi_xfer_mutex); dev_dbg(dev, "mbox send fail %d\n", ret); return ret; } @@ -424,19 +428,16 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) ret = 0; if (xfer->hdr.poll_completion) { - ktime_t stop; - - spin_lock_irqsave(&xfer->lock, flags); - stop = ktime_add_ms(ktime_get(), - info->desc->max_rx_timeout_ms); + ktime_t stop = ktime_add_ms(ktime_get(), + info->desc->max_rx_timeout_ms); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - if (ktime_before(ktime_get(), stop)) + if (ktime_before(ktime_get(), stop) || + scmi_xfer_poll_done(cinfo, xfer)) scmi_fetch_response(xfer, cinfo->payload); else ret = -ETIMEDOUT; - spin_unlock_irqrestore(&xfer->lock, flags); } else { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); @@ -457,6 +458,7 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer) * received our message. */ mbox_client_txdone(cinfo->chan, ret); + mutex_unlock(&scmi_xfer_mutex); return ret; } @@ -672,7 +674,6 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo) xfer->tx.buf = xfer->rx.buf; init_completion(&xfer->done); - spin_lock_init(&xfer->lock); } spin_lock_init(&info->xfer_lock); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index c0929a1fc5a176caea81aa6b43f4b9b3bdb41984..b54d9a9795c0fd746038163ae31196552c3d4971 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -81,7 +81,7 @@ config GPIO_GENERIC # This symbol is selected by both MMIO and PCI expanders config GPIO_PHYTIUM_CORE - tristate + bool # This symbol is selected by both I2C and SPI expanders config GPIO_MAX730X @@ -409,7 +409,7 @@ config GPIO_OMAP Say yes here to enable GPIO support for TI OMAP SoCs. config GPIO_PHYTIUM_PLAT - tristate "Phytium GPIO Platform support" + bool "Phytium GPIO Platform support" default y if ARCH_PHYTIUM depends on ARM64 select GPIO_PHYTIUM_CORE @@ -1334,7 +1334,7 @@ config GPIO_PCIE_IDIO_24 filters are deactivated by this driver. config GPIO_PHYTIUM_PCI - tristate "Phytium GPIO PCI support" + bool "Phytium GPIO PCI support" select GPIO_PHYTIUM_CORE select IRQ_DOMAIN select GENERIC_IRQ_CHIP diff --git a/drivers/gpio/gpio-phytium-core.c b/drivers/gpio/gpio-phytium-core.c index 29b422667dc56ea48b1afda72400ed7adeef69e4..e3a71d178339f6c513471a5ec28fb3d40745c743 100644 --- a/drivers/gpio/gpio-phytium-core.c +++ b/drivers/gpio/gpio-phytium-core.c @@ -11,6 +11,8 @@ #include #include "gpio-phytium-core.h" +#define GPIO_CORE_DRIVER_VERSION "1.1.2" + static int get_pin_location(struct phytium_gpio *gpio, unsigned int offset, struct pin_loc *pl) { @@ -128,14 +130,13 @@ int phytium_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, return -EINVAL; ddr = gpio->regs + GPIO_SWPORTA_DDR + (loc.port * GPIO_PORT_STRIDE); + phytium_gpio_set(gc, offset, value); raw_spin_lock_irqsave(&gpio->lock, flags); writel(readl(ddr) | BIT(loc.offset), ddr); raw_spin_unlock_irqrestore(&gpio->lock, flags); - phytium_gpio_set(gc, offset, value); - return 0; } EXPORT_SYMBOL_GPL(phytium_gpio_direction_output); @@ -360,3 +361,4 @@ EXPORT_SYMBOL_GPL(phytium_gpio_irq_set_affinity); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Phytium GPIO Controller core"); +MODULE_VERSION(GPIO_CORE_DRIVER_VERSION); diff --git a/drivers/gpio/gpio-phytium-pci.c b/drivers/gpio/gpio-phytium-pci.c index 46d4cf58d53fb124fd5b86a4e8ee94eacc6a060b..5e68d9a4490c90eee3a975c4851d550fda941599 100644 --- a/drivers/gpio/gpio-phytium-pci.c +++ b/drivers/gpio/gpio-phytium-pci.c @@ -13,6 +13,8 @@ #include "gpio-phytium-core.h" +#define GPIO_PCI_DRIVER_VERSION "1.1.1" + static int phytium_gpio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -185,3 +187,4 @@ module_pci_driver(phytium_gpio_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); MODULE_DESCRIPTION("Phytium GPIO PCI Driver"); +MODULE_VERSION(GPIO_PCI_DRIVER_VERSION); diff --git a/drivers/gpio/gpio-phytium-platform.c b/drivers/gpio/gpio-phytium-platform.c index 61ee2a4a5fda49f149cd0b824c0a77a1e448f4c9..ae817061ccc52230e84e19c95275c3f61a843252 100644 --- a/drivers/gpio/gpio-phytium-platform.c +++ b/drivers/gpio/gpio-phytium-platform.c @@ -18,6 +18,8 @@ #include "gpio-phytium-core.h" +#define GPIO_PLAT_DRIVER_VERSION "1.1.2" + static const struct of_device_id phytium_gpio_of_match[] = { { .compatible = "phytium,gpio", }, { } @@ -104,7 +106,7 @@ static int phytium_gpio_probe(struct platform_device *pdev) #endif girq->default_type = IRQ_TYPE_NONE; - for (irq_count = 0; irq_count < gpio->ngpio[0]; irq_count++) { + for (irq_count = 0; irq_count < platform_irq_count(pdev); irq_count++) { gpio->irq[irq_count] = -ENXIO; gpio->irq[irq_count] = platform_get_irq(pdev, irq_count); if (gpio->irq[irq_count] < 0) { @@ -208,3 +210,4 @@ module_platform_driver(phytium_gpio_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium GPIO driver"); +MODULE_VERSION(GPIO_PLAT_DRIVER_VERSION); diff --git a/drivers/gpio/gpio-phytium-sgpio.c b/drivers/gpio/gpio-phytium-sgpio.c index 74ec3a98347909ae516588cf020a63b1f9ace64e..220de30fd2aca3f01bd3294ec40b39146fb7a562 100644 --- a/drivers/gpio/gpio-phytium-sgpio.c +++ b/drivers/gpio/gpio-phytium-sgpio.c @@ -47,6 +47,8 @@ #define GPIO_OFFSET(x) ((x) & GENMASK(5, 0)) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x) >> 1) +#define GPIO_SGPIO_DRIVER_VERSION "1.1.1" + struct phytium_sgpio { struct gpio_chip gc; void __iomem *regs; @@ -300,3 +302,4 @@ module_platform_driver(phytium_sgpio_driver); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium SGPIO driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(GPIO_SGPIO_DRIVER_VERSION); diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile index 2dc7ea1118cd4cbf141215a7f377e258ae2593ba..2e730a02f1666b4b98936af4b64e3f19d44989ef 100644 --- a/drivers/gpu/drm/phytium/Makefile +++ b/drivers/gpu/drm/phytium/Makefile @@ -16,3 +16,8 @@ phytium-dc-drm-y := phytium_display_drv.o \ obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only +ifeq ($(ARCH), x86) + FPU_CFLAGS += -mhard-float + FPU_CFLAGS += -msse -msse2 + CFLAGS_phytium_crtc.o += $(FPU_CFLAGS) +endif \ No newline at end of file diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c index 5f7f527a744cdeed50a7df7f34c6551c20f64e61..0fab6d45d666f60370ec04527c649b489e042389 100644 --- a/drivers/gpu/drm/phytium/pe220x_dc.c +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -7,7 +7,9 @@ #include #include +#if defined(__arm__) || defined(__aarch64__) #include +#endif #include #include "phytium_display_drv.h" #include "pe220x_reg.h" @@ -56,6 +58,10 @@ static const unsigned int pe220x_primary_formats[] = { DRM_FORMAT_NV21, }; +static const unsigned int pe220x_bmc_primary_formats[] = { + DRM_FORMAT_XRGB8888, +}; + static uint64_t pe220x_primary_formats_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID @@ -104,49 +110,76 @@ void pe220x_dc_hw_reset(struct drm_crtc *crtc) int config = 0; int phys_pipe = phytium_crtc->phys_pipe; - /* disable pixel clock for bmc mode */ - if (phys_pipe == 0) - pe220x_dc_hw_disable(crtc); - config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); - config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); - if (phys_pipe == 0) { - phytium_writel_reg(priv, config | DC0_CORE_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC0_CORE_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); + if (priv->info.bmc_mode) { + pe220x_dc_hw_disable(crtc); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AHB_RESET)); + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } + } else { - phytium_writel_reg(priv, config | DC1_CORE_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config | DC1_CORE_RESET, - 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); - phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); - udelay(20); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } } } @@ -211,6 +244,15 @@ void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, *format_count = ARRAY_SIZE(pe220x_primary_formats); } +void pe220x_dc_bmc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_bmc_primary_formats; + *format_count = ARRAY_SIZE(pe220x_bmc_primary_formats); +} + void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, const uint32_t **formats, uint32_t *format_count) diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h index af17f99a0b12ae35b3d5e116d058dd5d6194997e..6e5e8cb3113e0d0822f3c2189d71814f897f73f6 100644 --- a/drivers/gpu/drm/phytium/pe220x_dc.h +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -22,6 +22,9 @@ extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, const uint32_t **formats, uint32_t *format_count); +extern void pe220x_dc_bmc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, const uint32_t **formats, uint32_t *format_count); diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c index 08597ae2f2aac1b934cf8c1e063622dbd25b5d40..b56484361084d2a5f70a8ab4e71f5586333b291e 100644 --- a/drivers/gpu/drm/phytium/pe220x_dp.c +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -5,6 +5,9 @@ * Copyright (c) 2021-2024 Phytium Technology Co., Ltd. */ +#include +#include +#include #include "phytium_display_drv.h" #include "pe220x_reg.h" #include "phytium_dp.h" @@ -379,13 +382,9 @@ static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) { struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; int ret = 0; - phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, - 0, PE220X_DC_CMD_REGISTER(port)); - ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), - FLAG_REQUEST, FLAG_REPLY); + gpiod_set_value(priv->edp_power_en, 1); if (ret < 0) DRM_ERROR("%s: failed to poweron panel\n", __func__); } @@ -394,13 +393,9 @@ static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) { struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; int ret = 0; - phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, - 0, PE220X_DC_CMD_REGISTER(port)); - ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), - FLAG_REQUEST, FLAG_REPLY); + gpiod_set_value(priv->edp_power_en, 0); if (ret < 0) DRM_ERROR("%s: failed to poweroff panel\n", __func__); } @@ -409,12 +404,15 @@ static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) { struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port, ret = 0; + struct pwm_state state; + int ret = 0; - phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, - 0, PE220X_DC_CMD_REGISTER(port)); - ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), - FLAG_REQUEST, FLAG_REPLY); + pwm_get_state(phytium_dp->pwm, &state); + state.enabled = true; + pwm_set_relative_duty_cycle(&state, 50, 100); + ret = pwm_apply_state(phytium_dp->pwm, &state); + + gpiod_set_value(priv->edp_bl_en, 1); if (ret < 0) DRM_ERROR("%s: failed to enable backlight\n", __func__); } @@ -423,45 +421,41 @@ static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) { struct drm_device *dev = phytium_dp->dev; struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; int ret = 0; - phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, - 0, PE220X_DC_CMD_REGISTER(port)); - ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), - FLAG_REQUEST, FLAG_REPLY); + gpiod_set_value(priv->edp_bl_en, 0); if (ret < 0) - DRM_ERROR("%s: failed to disable backlight\n", __func__); + DRM_ERROR("%s: failed to disable backlight, ret = %d\n", __func__, ret); } static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int config; - uint32_t group_offset = priv->address_transform_base; + struct pwm_state state; + uint32_t level; - config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); - return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); + pwm_get_state(phytium_dp->pwm, &state); + level = pwm_get_relative_duty_cycle(&state, 100); + return level; } static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) { - struct drm_device *dev = phytium_dp->dev; - struct phytium_display_private *priv = dev->dev_private; - int port = phytium_dp->port; - int config = 0; + struct pwm_state state; int ret = 0; if (level > PE220X_DP_BACKLIGHT_MAX) { ret = -EINVAL; goto out; } + pwm_get_state(phytium_dp->pwm, &state); + state.enabled = true; + state.period = phytium_dp->pwm->args.period; + if (state.period == 0) + DRM_ERROR("%s: set pwm period to 0\n", __func__); + + pwm_set_relative_duty_cycle(&state, level, 100); - config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); - phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); - ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), - FLAG_REQUEST, FLAG_REPLY); + ret = pwm_apply_state(phytium_dp->pwm, &state); if (ret < 0) DRM_ERROR("%s: failed to set backlight\n", __func__); out: diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h index 78bb26c7b75cc3fb90bee1e85a30dd269a582adc..f9cf8a0d4f51d3fa5c589c3ac36f4a8f1751db65 100644 --- a/drivers/gpu/drm/phytium/pe220x_dp.h +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -8,7 +8,8 @@ #ifndef __PE220X_DP_H__ #define __PE220X_DP_H__ -#define PE220X_DP_BACKLIGHT_MAX 100 +#define PE220X_DP_BACKLIGHT_MAX 99 +#define PE220X_DP_BACKLIGHT_MIN 2 void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); #endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c index f220c9a35cd55963de8e55939e37c1e76e092ff8..67a2b21d4deb28833d91eb20063327ffec49fb06 100644 --- a/drivers/gpu/drm/phytium/phytium_crtc.c +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -6,7 +6,11 @@ #include #include +#if defined(__arm__) || defined(__aarch64__) #include +#elif defined(__x86_64) +#include +#endif #include #include "phytium_display_drv.h" #include "phytium_crtc.h" @@ -276,14 +280,22 @@ static void phytium_dc_scaling_config(struct drm_crtc *crtc, memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); kernel_info_width.kernelStates = tmp; memset(kernel_info_width.kernelStates, 0, KERNELSTATES); +#if defined(__arm__) || defined(__aarch64__) kernel_neon_begin(); +#elif defined(__x86_64__) + kernel_fpu_begin(); +#endif dc_calculate_sync_table(FRAMEBUFFER_HORIZONTAL_FILTER_TAP, phytium_crtc->src_width, phytium_crtc->dst_width, &kernel_info_width); memset(kernelStates, 0, sizeof(kernelStates)); memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); +#if defined(__arm__) || defined(__aarch64__) kernel_neon_end(); +#elif defined(__x86_64__) + kernel_fpu_end(); +#endif phytium_writel_reg(priv, HORI_FILTER_INDEX, group_offset, PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX); for (i = 0; i < 128; i++) { @@ -294,12 +306,20 @@ static void phytium_dc_scaling_config(struct drm_crtc *crtc, memset(&kernel_info_width, 0, sizeof(struct filter_blit_array)); kernel_info_width.kernelStates = tmp; memset(kernel_info_width.kernelStates, 0, KERNELSTATES); +#if defined(__arm__) || defined(__aarch64__) kernel_neon_begin(); +#elif defined(__x86_64__) + kernel_fpu_begin(); +#endif dc_calculate_sync_table(FRAMEBUFFER_FILTER_TAP, phytium_crtc->src_height, phytium_crtc->dst_height, &kernel_info_width); memset(kernelStates, 0, sizeof(kernelStates)); memcpy(kernelStates, kernel_info_width.kernelStates + 1, KERNELSTATES - 4); +#if defined(__arm__) || defined(__aarch64__) kernel_neon_end(); +#elif defined(__x86_64__) + kernel_fpu_end(); +#endif phytium_writel_reg(priv, VERT_FILTER_INDEX, group_offset, PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX); for (i = 0; i < 128; i++) @@ -547,7 +567,8 @@ phytium_crtc_atomic_enable(struct drm_crtc *crtc, else config &= (~FRAMEBUFFER_SCALE_ENABLE); - config |= FRAMEBUFFER_GAMMA_ENABLE; + if (!priv->info.bmc_mode) + config |= FRAMEBUFFER_GAMMA_ENABLE; if (crtc->state->gamma_lut) phytium_crtc_gamma_set(crtc); @@ -724,6 +745,7 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) struct phytium_crtc_state *phytium_crtc_state; struct phytium_plane *phytium_primary_plane = NULL; struct phytium_plane *phytium_cursor_plane = NULL; + struct drm_plane *cursor_base = NULL; struct phytium_display_private *priv = dev->dev_private; int ret; @@ -766,16 +788,21 @@ int phytium_crtc_init(struct drm_device *dev, int phys_pipe) goto failed_create_primary; } - phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); - if (IS_ERR(phytium_cursor_plane)) { - ret = PTR_ERR(phytium_cursor_plane); - DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); - goto failed_create_cursor; + if (priv->info.bmc_mode) { + cursor_base = NULL; + } else { + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + cursor_base = &phytium_cursor_plane->base; } ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, &phytium_primary_plane->base, - &phytium_cursor_plane->base, + cursor_base, &phytium_crtc_funcs, "phys_pipe %d", phys_pipe); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c index 9e7261064a6806680bc148788b4a883f9c111412..05bb4621440ecceb6cd055e6f89f032d997474d8 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.c +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -53,7 +53,7 @@ int phytium_wait_cmd_done(struct phytium_display_private *priv, uint32_t request_bit, uint32_t reply_bit) { - int timeout = 500, config = 0, ret = 0; + int timeout = 1000, config = 0, ret = 0; do { mdelay(1); @@ -315,8 +315,11 @@ static const struct vm_operations_struct phytium_vm_ops = { * The device specific ioctl range is 0x40 to 0x79. */ #define DRM_PHYTIUM_VRAM_TYPE_DEVICE 0x0 +#define DRM_PHYTIUM_BMC_DEVICE 0x1 #define DRM_IOCTL_PHYTIUM_VRAM_TYPE_DEVICE DRM_IO(DRM_COMMAND_BASE\ + DRM_PHYTIUM_VRAM_TYPE_DEVICE) +#define DRM_IOCTL_PHYTIUM_IS_BMC_DEVICE DRM_IO(DRM_COMMAND_BASE\ + + DRM_PHYTIUM_BMC_DEVICE) static int phytium_ioctl_check_vram_device(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -326,10 +329,20 @@ static int phytium_ioctl_check_vram_device(struct drm_device *dev, void *data, return ((priv->support_memory_type == MEMORY_TYPE_VRAM_DEVICE) ? 1 : 0); } +static int phytium_ioctl_check_bmc_device(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct phytium_display_private *priv = dev->dev_private; + + return priv->info.bmc_mode ? 1 : 0; +} + static const struct drm_ioctl_desc phytium_ioctls[] = { /* for test, none so far */ DRM_IOCTL_DEF_DRV(PHYTIUM_VRAM_TYPE_DEVICE, phytium_ioctl_check_vram_device, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(PHYTIUM_IS_BMC_DEVICE, phytium_ioctl_check_bmc_device, + DRM_AUTH|DRM_UNLOCKED), }; static const struct file_operations phytium_drm_driver_fops = { @@ -490,5 +503,6 @@ module_init(phytium_display_init); module_exit(phytium_display_exit); MODULE_LICENSE("GPL"); +MODULE_VERSION(DC_DRIVER_VERSION); MODULE_AUTHOR("Yang Xun "); MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h index 973591a66e87449f312567b3d37808723d1ad156..3f9ce44bcbf338363b0e4f231c2ce93d09451623 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.h +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -9,6 +9,7 @@ #include #include +#include #include #define DEBUG_LOG 0 @@ -21,6 +22,7 @@ #define DRV_DATE "20201220" #define DRV_MAJOR 1 #define DRV_MINOR 1 +#define DC_DRIVER_VERSION "1.0.0" /* come from GPU */ #define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 @@ -66,11 +68,15 @@ struct phytium_device_info { unsigned char num_pipes; unsigned char total_pipes; unsigned char edp_mask; + bool bmc_mode; + unsigned char reserve[2]; unsigned int crtc_clock_max; unsigned int hdisplay_max; unsigned int vdisplay_max; unsigned int backlight_max; + unsigned int backlight_min; unsigned long address_mask; + struct pwm_device *pwm; }; struct phytium_display_private { @@ -116,6 +122,8 @@ struct phytium_display_private { /* DMA info */ int dma_inited; struct dma_chan *dma_chan; + /*BL GPIO info*/ + struct gpio_desc *edp_bl_en, *edp_power_en; }; static inline unsigned int diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c index 2af682d6a96811f37d8f99633cc769ee9f0620cd..05d4df10904fadc820f6cb8fe4015bed70ff882a 100644 --- a/drivers/gpu/drm/phytium/phytium_dp.c +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -2223,13 +2223,13 @@ phytium_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display case 8: break; default: - DRM_INFO("not support bpc(%d)\n", display_info->bpc); + DRM_DEBUG_KMS("not support bpc(%d)\n", display_info->bpc); display_info->bpc = 8; break; } if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { - DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + DRM_DEBUG_KMS("not support color_format(%d)\n", display_info->color_formats); display_info->color_formats = DRM_COLOR_FORMAT_RGB444; } @@ -2597,6 +2597,7 @@ int phytium_dp_init(struct drm_device *dev, int port) if (phytium_dp_is_edp(phytium_dp, port)) { phytium_dp->is_edp = true; type = DRM_MODE_CONNECTOR_eDP; + phytium_dp->pwm = priv->info.pwm; phytium_dp_panel_init_backlight_funcs(phytium_dp); phytium_edp_backlight_off(phytium_dp); phytium_edp_panel_poweroff(phytium_dp); diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h index 047357a0d523be1d073fe1c5192cce3f4aa8a447..bbcf133d66638b6e3ff7ffdfd2e58e37d228fe60 100644 --- a/drivers/gpu/drm/phytium/phytium_dp.h +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -112,6 +112,7 @@ struct phytium_dp_device { struct phytium_panel panel; struct drm_display_mode native_mode; + struct pwm_device *pwm; }; union phytium_phy_tp { diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c index 2068bd40fa9eb852294599db7b39a3745d5e723d..948c4cbf35ebfa634ebab67976e05e90d8219454 100644 --- a/drivers/gpu/drm/phytium/phytium_gem.c +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -94,7 +94,7 @@ phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) DRM_ERROR("failed to allocate sg\n"); goto sgt_free; } - page = phys_to_page(phytium_gem_obj->phys_addr); + page = pfn_to_page(__phys_to_pfn(phytium_gem_obj->phys_addr)); sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, @@ -447,7 +447,7 @@ struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, uns DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); goto failed_dma_alloc; } - page = phys_to_page(phytium_gem_obj->phys_addr); + page = pfn_to_page(__phys_to_pfn(phytium_gem_obj->phys_addr)); phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c index efb839ae02ea78a3092f5d098b13299da8217169..60a20b9e645ced07eb4be482bfca7e78cb3dad12 100644 --- a/drivers/gpu/drm/phytium/phytium_panel.c +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -195,7 +195,7 @@ static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) struct phytium_display_private *priv = dev->dev_private; panel->max = priv->info.backlight_max; - panel->min = 0; + panel->min = priv->info.backlight_min; panel->level = phytium_dp_hw_get_backlight(panel); } @@ -211,7 +211,7 @@ void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; } else { - DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + DRM_DEBUG_KMS("PWM Backlight Control Supported!\n"); phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c index 58f5f69336ea415f2a855d8f34b159253679add7..3b99eabaedc08bbb43ed872cdd5e19e3413260da 100644 --- a/drivers/gpu/drm/phytium/phytium_pci.c +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -6,6 +6,7 @@ #include #include +#include #include #include "phytium_display_drv.h" #include "phytium_pci.h" @@ -238,11 +239,39 @@ phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *p devm_kfree(&pdev->dev, pci_priv); } +static int phytium_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = pci_resource_start(pdev, 2); + ap->ranges[0].size = pci_resource_len(pdev, 2); + + drm_fb_helper_remove_conflicting_framebuffers(ap, "phytiumdrmfb", + false); + kfree(ap); + + return 0; +} + static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct phytium_display_private *priv = NULL; struct drm_device *dev = NULL; int ret = 0; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + + if (phytium_info) { + if (phytium_info->platform_mask & BIT(PHYTIUM_PLATFORM_PE220X)) + phytium_display_drm_driver.name = "pe220x"; + } + + ret = phytium_kick_out_firmware_fb(pdev); + if (ret) + DRM_ERROR("failed to remove conflicting framebuffers\n"); dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); if (IS_ERR(dev)) { @@ -384,6 +413,8 @@ static const struct phytium_device_info px210_info = { .vdisplay_max = PX210_DC_VDISPLAY_MAX, .address_mask = PX210_DC_ADDRESS_MASK, .backlight_max = PX210_DP_BACKLIGHT_MAX, + .backlight_min = PX210_DP_BACKLIGHT_MIN, + .bmc_mode = false, }; static const struct phytium_device_info pe220x_info = { @@ -394,6 +425,8 @@ static const struct phytium_device_info pe220x_info = { .vdisplay_max = PE220X_DC_VDISPLAY_MAX, .address_mask = PE220X_DC_ADDRESS_MASK, .backlight_max = PE220X_DP_BACKLIGHT_MAX, + .backlight_min = PE220X_DP_BACKLIGHT_MIN, + .bmc_mode = true, }; static const struct pci_device_id phytium_display_pci_ids[] = { diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c index 1ae2ac88475686faab7571d72c001987d6c02f8e..41e8c116dfe31b1d1ed4a369fbd44542bf44a6ac 100644 --- a/drivers/gpu/drm/phytium/phytium_plane.c +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -588,7 +588,12 @@ struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int p phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; phytium_plane->dc_hw_update_cursor_hi_addr = NULL; } else if (IS_PE220X(priv)) { - phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + if (priv->info.bmc_mode) + phytium_plane->dc_hw_plane_get_format = + pe220x_dc_bmc_hw_plane_get_primary_format; + else + phytium_plane->dc_hw_plane_get_format = + pe220x_dc_hw_plane_get_primary_format; phytium_plane->dc_hw_update_dcreq = NULL; phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; phytium_plane->dc_hw_update_cursor_hi_addr = NULL; diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c index d17a4ca947dbcf225c655ea6e93a82f779ea4a6f..7b5a5f368ca4d8f278c0eacbdb8c1704df596090 100644 --- a/drivers/gpu/drm/phytium/phytium_platform.c +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -10,6 +10,9 @@ #include #include #include +#include +#include +#include #include "phytium_display_drv.h" #include "phytium_platform.h" #include "phytium_dp.h" @@ -125,6 +128,27 @@ phytium_platform_private_init(struct platform_device *pdev) dev_err(&pdev->dev, "missing edp_mask property from dts\n"); goto failed; } + if (priv->info.edp_mask) { + priv->info.pwm = devm_pwm_get(&pdev->dev, NULL); + if (IS_ERR(priv->info.pwm)) { + dev_err(&pdev->dev, "Failed to request PWM device: %ld\n", + PTR_ERR(priv->info.pwm)); + goto failed; + } + priv->edp_bl_en = gpiod_get(&pdev->dev, "edp-bl-en", GPIOD_OUT_HIGH); + if (!priv->edp_bl_en) { + dev_err(&pdev->dev, "Failed to get edp_en gpio\n"); + goto failed; + } + priv->edp_power_en = gpiod_get(&pdev->dev, "edp-power-en", GPIOD_OUT_HIGH); + if (!priv->edp_power_en) { + dev_err(&pdev->dev, "Failed to get edp_pwr_en gpio\n"); + goto failed; + } + // set GPIO pin output + gpiod_direction_output(priv->edp_power_en, 0); + gpiod_direction_output(priv->edp_bl_en, 0); + } } else if (has_acpi_companion(&pdev->dev)) { phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); if (!phytium_info) { @@ -144,6 +168,27 @@ phytium_platform_private_init(struct platform_device *pdev) dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); goto failed; } + if (priv->info.edp_mask) { + priv->info.pwm = devm_pwm_get(&pdev->dev, NULL); + if (IS_ERR(priv->info.pwm)) { + dev_err(&pdev->dev, "Failed to request PWM device: %ld\n", + PTR_ERR(priv->info.pwm)); + goto failed; + } + priv->edp_bl_en = gpiod_get(&pdev->dev, "edp-bl-en", GPIOD_OUT_HIGH); + if (!priv->edp_bl_en) { + dev_err(&pdev->dev, "Failed to get edp_en gpio\n"); + goto failed; + } + priv->edp_power_en = gpiod_get(&pdev->dev, "edp-power-en", GPIOD_OUT_HIGH); + if (!priv->edp_power_en) { + dev_err(&pdev->dev, "Failed to get edp_pwr_en gpio\n"); + goto failed; + } + // set GPIO pin output + gpiod_direction_output(priv->edp_power_en, 0); + gpiod_direction_output(priv->edp_bl_en, 0); + } } priv->info.num_pipes = 0; @@ -193,8 +238,23 @@ static int phytium_platform_probe(struct platform_device *pdev) { struct phytium_display_private *priv = NULL; struct drm_device *dev = NULL; + struct phytium_device_info *phytium_info = NULL; int ret = 0; + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (phytium_info) { + if (phytium_info->platform_mask & BIT(PHYTIUM_PLATFORM_PE220X)) + phytium_display_drm_driver.name = "pe220x"; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (phytium_info) { + if (phytium_info->platform_mask & BIT(PHYTIUM_PLATFORM_PE220X)) + phytium_display_drm_driver.name = "pe220x"; + } + } + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); if (IS_ERR(dev)) { DRM_ERROR("failed to allocate drm_device\n"); @@ -287,6 +347,8 @@ static const struct phytium_device_info pe220x_info = { .vdisplay_max = PE220X_DC_VDISPLAY_MAX, .address_mask = PE220X_DC_ADDRESS_MASK, .backlight_max = PE220X_DP_BACKLIGHT_MAX, + .backlight_min = PE220X_DP_BACKLIGHT_MIN, + .bmc_mode = false, }; static const struct of_device_id display_of_match[] = { diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h index 4dca6c02312792035be8bc57d1f68ebd0420011d..f5d4a6945c35b6313156955c56ef4367f86da5dc 100644 --- a/drivers/gpu/drm/phytium/phytium_reg.h +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -268,7 +268,7 @@ #define PHYTIUM_DP_INTERRUPT_MASK 0x0144 #define HPD_IRQ_MASK (1<<1) #define HPD_EVENT_MASK (1<<0) - #define HPD_OTHER_MASK 0x3c + #define HPD_OTHER_MASK 0x7c #define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 #define PHYTIUM_DP_AUX_STATUS 0x014C #define REPLY_RECEIVED 0x1 diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c index c9447ccaa98f3a0636fa349a64428bfbd07fe09c..b12132a63ae26970d54ef9f8c89e092fa15031f2 100644 --- a/drivers/gpu/drm/phytium/px210_dc.c +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -6,7 +6,9 @@ #include #include +#if defined(__arm__) || defined(__aarch64__) #include +#endif #include #include "phytium_display_drv.h" #include "px210_reg.h" diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h index 07e40265f02c894f5e789250c29ec37622679ef8..b62a3c833ac7fbb74be44d35f9e227d103f0a887 100644 --- a/drivers/gpu/drm/phytium/px210_dp.h +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -8,6 +8,7 @@ #define __PX210_DP_H__ #define PX210_DP_BACKLIGHT_MAX 100 +#define PX210_DP_BACKLIGHT_MIN 0 void px210_dp_func_register(struct phytium_dp_device *phytium_dp); #endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 68cfa25674e509960e51f9c6c27a9d40cea1325c..75f3f30ec59d08d7ad5bdc7add53e18ea2c5266e 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -450,8 +450,10 @@ static void ttm_tt_add_mapping(struct ttm_tt *ttm) if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; - for (i = 0; i < ttm->num_pages; ++i) + for (i = 0; i < ttm->num_pages; ++i) { ttm->pages[i]->mapping = ttm->bdev->dev_mapping; + page_ref_inc(ttm->pages[i]); + } } int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) @@ -481,6 +483,7 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) { (*page)->mapping = NULL; (*page++)->index = 0; + page_ref_dec(ttm->pages[i]); } } diff --git a/drivers/hwmon/tacho-phytium.c b/drivers/hwmon/tacho-phytium.c index c89251f3595a3a3146832e735d6ef06ebd932d46..7604f68129c8e59c1b99793f26c4328b945462f4 100644 --- a/drivers/hwmon/tacho-phytium.c +++ b/drivers/hwmon/tacho-phytium.c @@ -56,6 +56,8 @@ #define TIMER_INT_CLR_MASK GENMASK(5, 0) +#define TACHO_DRIVER_VERSION "1.1.1" + enum tacho_modes { tacho_mode = 1, capture_mode, @@ -389,3 +391,5 @@ module_platform_driver(phytium_tacho_driver); MODULE_AUTHOR("Zhang Yiqun "); MODULE_DESCRIPTION("Phytium tachometer driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(TACHO_DRIVER_VERSION); + diff --git a/drivers/hwspinlock/phytium_hwspinlock.c b/drivers/hwspinlock/phytium_hwspinlock.c index 0294c820a28f4170597249a5b9a6d3c47a04117e..9494c60c21eb92841763729c4e3449420f3e9e0f 100644 --- a/drivers/hwspinlock/phytium_hwspinlock.c +++ b/drivers/hwspinlock/phytium_hwspinlock.c @@ -20,6 +20,8 @@ #include #include "hwspinlock_internal.h" +#define HWSPINLOCK_PHYTIUM_VERSION "1.1.0" + /* Spinlock register offsets */ #define LOCK_BASE 0x10 @@ -182,3 +184,4 @@ module_exit(phytium_hwspinlock_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Hardware spinlock driver for Phytium"); MODULE_AUTHOR("Chen Baozi "); +MODULE_VERSION(HWSPINLOCK_PHYTIUM_VERSION); diff --git a/drivers/i2c/busses/i2c-phytium-common.c b/drivers/i2c/busses/i2c-phytium-common.c index 78f2c8b21ebc536fe7d8bc69228e66590e2a2de9..381e373b0d5ed07ab7b1fcf976946dbd1d7da9c0 100644 --- a/drivers/i2c/busses/i2c-phytium-common.c +++ b/drivers/i2c/busses/i2c-phytium-common.c @@ -200,4 +200,5 @@ void i2c_phytium_disable_int(struct phytium_i2c_dev *dev) MODULE_AUTHOR("Cheng Quan "); MODULE_DESCRIPTION("Phytium I2C bus adapter core"); +MODULE_VERSION(I2C_PHYTIUM_DRV_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-core.h b/drivers/i2c/busses/i2c-phytium-core.h index 726a4f9f4f17285c1a60be38f047d4419df5c24c..5f2e6dd1ea9dd41794fc7ac48b2261922f2a792f 100644 --- a/drivers/i2c/busses/i2c-phytium-core.h +++ b/drivers/i2c/busses/i2c-phytium-core.h @@ -9,6 +9,8 @@ #include #include +#define I2C_PHYTIUM_DRV_VERSION "1.1.0" + #define IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \ I2C_FUNC_SMBUS_BYTE | \ I2C_FUNC_SMBUS_BYTE_DATA | \ diff --git a/drivers/i2c/busses/i2c-phytium-pci.c b/drivers/i2c/busses/i2c-phytium-pci.c index 23feee308d375cf3a773c6314de3295586162069..6b8afb08c37edd87ded0691a55352d9eb7da7c7d 100644 --- a/drivers/i2c/busses/i2c-phytium-pci.c +++ b/drivers/i2c/busses/i2c-phytium-pci.c @@ -23,6 +23,10 @@ #define DRV_NAME "i2c-phytium-pci" +static int i2c_msi_enable; +module_param(i2c_msi_enable, int, 0644); +MODULE_PARM_DESC(i2c_msi_enable, "Enable I2C msi interrupt (0-disabled; 1-enabled; default-0)"); + enum phytium_pci_ctl_id_t { octopus_i2c, }; @@ -153,6 +157,16 @@ static int i2c_phytium_pci_probe(struct pci_dev *pdev, goto out; } + if (i2c_msi_enable) { + pci_set_master(pdev); + + ret = pci_enable_msi(pdev); + if (ret) { + dev_dbg(&pdev->dev, "Error enabling MSI. ret = %d\n", ret); + goto out; + } + } + dev->controller = controller; dev->get_clk_rate_khz = i2c_phytium_get_clk_rate_khz; dev->base = pcim_iomap_table(pdev)[0]; @@ -234,4 +248,5 @@ module_pci_driver(phytium_i2c_driver); MODULE_ALIAS("i2c-phytium-pci"); MODULE_AUTHOR("Cheng Quan "); MODULE_DESCRIPTION("Phytium PCI I2C bus adapter"); +MODULE_VERSION(I2C_PHYTIUM_DRV_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-phytium-platform.c b/drivers/i2c/busses/i2c-phytium-platform.c index ea0a1a6437fbce7520a75b409e65130edbf9f83c..47db5e500759c2ae9219f5f28db64f05794caa87 100644 --- a/drivers/i2c/busses/i2c-phytium-platform.c +++ b/drivers/i2c/busses/i2c-phytium-platform.c @@ -164,7 +164,6 @@ static int phytium_i2c_plat_probe(struct platform_device *pdev) 0, 100000, 400000, 1000000, 3400000 }; - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -361,4 +360,5 @@ module_platform_driver(phytium_i2c_driver); MODULE_ALIAS("platform:i2c-phytium"); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium I2C bus adapter"); +MODULE_VERSION(I2C_PHYTIUM_DRV_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/phytium-adc.c b/drivers/iio/adc/phytium-adc.c index 5f6e7541685a213745a8c43c12bb0182565f64aa..2a9914d2f1f98abc96afe649503d42c5f3864433 100644 --- a/drivers/iio/adc/phytium-adc.c +++ b/drivers/iio/adc/phytium-adc.c @@ -23,6 +23,8 @@ #include #include +#define ADC_PHYTIUM_DRV_VERSION "1.1.0" + /* ADC register */ #define ADC_CTRL_REG 0x00 #define ADC_CTRL_REG_PD_EN BIT(31) @@ -688,3 +690,4 @@ module_platform_driver(phytium_adc_driver); MODULE_AUTHOR("Yang Liu "); MODULE_DESCRIPTION("Phytium ADC driver"); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(ADC_PHYTIUM_DRV_VERSION); diff --git a/drivers/input/keyboard/phytium-keypad.c b/drivers/input/keyboard/phytium-keypad.c index c87e7ff6467c12f9142b90bcb175f64e7b4358ef..e1b8a246a1ffa85c6a1cddb84a1623c031721bd2 100644 --- a/drivers/input/keyboard/phytium-keypad.c +++ b/drivers/input/keyboard/phytium-keypad.c @@ -19,6 +19,8 @@ #include #include #include + +#define KEYPAD_PHYTIUM_DRV_VERSION "1.1.0" /* * Keypad Controller registers */ @@ -583,3 +585,4 @@ MODULE_AUTHOR("Song Wenting "); MODULE_DESCRIPTION("PHYTIUM Keypad Port Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:phytium-keypad"); +MODULE_VERSION(KEYPAD_PHYTIUM_DRV_VERSION); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index cf7524c086268548c7b18129dd1c84e3295fef39..29ee7d5741d7d43d82b6c457dc991f45f41f63c3 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -190,12 +190,13 @@ #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1 #define ARM_SMMU_MEMATTR_OIWB 0xf -#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) -#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) -#define Q_OVERFLOW_FLAG (1 << 31) -#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG) +#define Q_IDX(llq, p) ((p) & ((1 << (llq)->max_n_shift) - 1)) +#define Q_WRP(llq, p) ((p) & (1 << (llq)->max_n_shift)) +#define Q_OVERFLOW_FLAG (1U << 31) +#define Q_OVF(p) ((p) & Q_OVERFLOW_FLAG) #define Q_ENT(q, p) ((q)->base + \ - Q_IDX(q, p) * (q)->ent_dwords) + Q_IDX(&((q)->llq), p) * \ + (q)->ent_dwords) #define Q_BASE_RWA (1UL << 62) #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) @@ -314,6 +315,8 @@ #define CMDQ_ERR_CERROR_ILL_IDX 1 #define CMDQ_ERR_CERROR_ABT_IDX 2 +#define CMDQ_PROD_OWNED_FLAG Q_OVERFLOW_FLAG + #define CMDQ_0_OP GENMASK_ULL(7, 0) #define CMDQ_0_SSV (1UL << 11) @@ -370,9 +373,8 @@ #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12) /* High-level queue structures */ -#define ARM_SMMU_POLL_TIMEOUT_US 100 -#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */ -#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10 +#define ARM_SMMU_POLL_TIMEOUT_US 1000000 /* 1s! */ +#define ARM_SMMU_POLL_SPIN_COUNT 10 #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -460,13 +462,29 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_CMD_SYNC 0x46 struct { - u32 msidata; u64 msiaddr; } sync; }; }; +struct arm_smmu_ll_queue { + union { + u64 val; + struct { + u32 prod; + u32 cons; + }; + struct { + atomic_t prod; + atomic_t cons; + } atomic; + u8 __pad[SMP_CACHE_BYTES]; + } ____cacheline_aligned_in_smp; + u32 max_n_shift; +}; + struct arm_smmu_queue { + struct arm_smmu_ll_queue llq; int irq; /* Wired interrupt */ __le64 *base; @@ -474,17 +492,23 @@ struct arm_smmu_queue { u64 q_base; size_t ent_dwords; - u32 max_n_shift; - u32 prod; - u32 cons; u32 __iomem *prod_reg; u32 __iomem *cons_reg; }; +struct arm_smmu_queue_poll { + ktime_t timeout; + unsigned int delay; + unsigned int spin_cnt; + bool wfe; +}; + struct arm_smmu_cmdq { struct arm_smmu_queue q; - spinlock_t lock; + atomic_long_t *valid_map; + atomic_t owner_prod; + atomic_t lock; }; struct arm_smmu_evtq { @@ -577,8 +601,6 @@ struct arm_smmu_device { int gerr_irq; int combined_irq; - u32 sync_nr; - u8 prev_cmd_opcode; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -597,12 +619,6 @@ struct arm_smmu_device { struct arm_smmu_strtab_cfg strtab_cfg; - /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */ - union { - u32 sync_count; - u64 padding; - }; - /* IOMMU core code handle */ struct iommu_device iommu; bool bypass; @@ -678,85 +694,99 @@ static void parse_driver_options(struct arm_smmu_device *smmu) } /* Low-level queue manipulation functions */ -static bool queue_full(struct arm_smmu_queue *q) +static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) +{ + u32 space, prod, cons; + + prod = Q_IDX(q, q->prod); + cons = Q_IDX(q, q->cons); + + if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) + space = (1 << q->max_n_shift) - (prod - cons); + else + space = cons - prod; + + return space >= n; +} + +static bool queue_full(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) != Q_WRP(q, q->cons); } -static bool queue_empty(struct arm_smmu_queue *q) +static bool queue_empty(struct arm_smmu_ll_queue *q) { return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && Q_WRP(q, q->prod) == Q_WRP(q, q->cons); } -static void queue_sync_cons(struct arm_smmu_queue *q) +static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) { - q->cons = readl_relaxed(q->cons_reg); + return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || + ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && + (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); } -static void queue_inc_cons(struct arm_smmu_queue *q) +static void queue_sync_cons_out(struct arm_smmu_queue *q) { - u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; - - q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); - /* * Ensure that all CPU accesses (reads and writes) to the queue * are complete before we update the cons pointer. */ mb(); - writel_relaxed(q->cons, q->cons_reg); + writel_relaxed(q->llq.cons, q->cons_reg); +} + +static void queue_inc_cons(struct arm_smmu_ll_queue *q) +{ + u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; + + q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); } -static int queue_sync_prod(struct arm_smmu_queue *q) +static int queue_sync_prod_in(struct arm_smmu_queue *q) { int ret = 0; u32 prod = readl_relaxed(q->prod_reg); - if (Q_OVF(q, prod) != Q_OVF(q, q->prod)) + if (Q_OVF(prod) != Q_OVF(q->llq.prod)) ret = -EOVERFLOW; - q->prod = prod; + q->llq.prod = prod; return ret; } -static void queue_inc_prod(struct arm_smmu_queue *q) +static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) { - u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1; + u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; - q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); - writel(q->prod, q->prod_reg); + return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); } -/* - * Wait for the SMMU to consume items. If drain is true, wait until the queue - * is empty. Otherwise, wait until there is at least one free slot. - */ -static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe) +static void queue_poll_init(struct arm_smmu_device *smmu, + struct arm_smmu_queue_poll *qp) { - ktime_t timeout; - unsigned int delay = 1, spin_cnt = 0; - - /* Wait longer if it's a CMD_SYNC */ - timeout = ktime_add_us(ktime_get(), sync ? - ARM_SMMU_CMDQ_SYNC_TIMEOUT_US : - ARM_SMMU_POLL_TIMEOUT_US); + qp->delay = 1; + qp->spin_cnt = 0; + qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + qp->timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); +} - while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) { - if (ktime_compare(ktime_get(), timeout) > 0) - return -ETIMEDOUT; +static int queue_poll(struct arm_smmu_queue_poll *qp) +{ + if (ktime_compare(ktime_get(), qp->timeout) > 0) + return -ETIMEDOUT; - if (wfe) { - wfe(); - } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) { - cpu_relax(); - continue; - } else { - udelay(delay); - delay *= 2; - spin_cnt = 0; - } + if (qp->wfe) { + wfe(); + } else if (++qp->spin_cnt < ARM_SMMU_POLL_SPIN_COUNT) { + cpu_relax(); + } else { + udelay(qp->delay); + qp->delay *= 2; + qp->spin_cnt = 0; } return 0; @@ -770,16 +800,6 @@ static void queue_write(__le64 *dst, u64 *src, size_t n_dwords) *dst++ = cpu_to_le64(*src++); } -static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent) -{ - if (queue_full(q)) - return -ENOSPC; - - queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords); - queue_inc_prod(q); - return 0; -} - static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) { int i; @@ -790,11 +810,12 @@ static void queue_read(__le64 *dst, u64 *src, size_t n_dwords) static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) { - if (queue_empty(q)) + if (queue_empty(&q->llq)) return -EAGAIN; - queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords); - queue_inc_cons(q); + queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); + queue_inc_cons(&q->llq); + queue_sync_cons_out(q); return 0; } @@ -854,20 +875,14 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); break; case CMDQ_OP_CMD_SYNC: - if (ent->sync.msiaddr) + if (ent->sync.msiaddr) { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_IRQ); - else + cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; + } else { cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV); + } cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH); cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB); - /* - * Commands are written little-endian, but we want the SMMU to - * receive MSIData, and thus write it back to memory, in CPU - * byte order, so big-endian needs an extra byteswap here. - */ - cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, - cpu_to_le32(ent->sync.msidata)); - cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; break; default: return -ENOENT; @@ -876,6 +891,27 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } +static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu, + u32 prod) +{ + struct arm_smmu_queue *q = &smmu->cmdq.q; + struct arm_smmu_cmdq_ent ent = { + .opcode = CMDQ_OP_CMD_SYNC, + }; + + /* + * Beware that Hi16xx adds an extra 32 bits of goodness to its MSI + * payload, so the write will zero the entire command on that platform. + */ + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) { + ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * + q->ent_dwords * 8; + } + + arm_smmu_cmdq_build_cmd(cmd, &ent); +} + static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) { static const char *cerror_str[] = { @@ -925,108 +961,440 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); } -static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd) +/* + * Command queue locking. + * This is a form of bastardised rwlock with the following major changes: + * + * - The only LOCK routines are exclusive_trylock() and shared_lock(). + * Neither have barrier semantics, and instead provide only a control + * dependency. + * + * - The UNLOCK routines are supplemented with shared_tryunlock(), which + * fails if the caller appears to be the last lock holder (yes, this is + * racy). All successful UNLOCK routines have RELEASE semantics. + */ +static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) { - struct arm_smmu_queue *q = &smmu->cmdq.q; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); + int val; + + /* + * We can try to avoid the cmpxchg() loop by simply incrementing the + * lock counter. When held in exclusive state, the lock counter is set + * to INT_MIN so these increments won't hurt as the value will remain + * negative. + */ + if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) + return; + + do { + val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); + } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); +} + +static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) +{ + (void)atomic_dec_return_release(&cmdq->lock); +} + +static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) +{ + if (atomic_read(&cmdq->lock) == 1) + return false; + + arm_smmu_cmdq_shared_unlock(cmdq); + return true; +} + +#define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \ +({ \ + bool __ret; \ + local_irq_save(flags); \ + __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) + +#define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \ +({ \ + atomic_set_release(&cmdq->lock, 0); \ + local_irq_restore(flags); \ +}) + + +/* + * Command queue insertion. + * This is made fiddly by our attempts to achieve some sort of scalability + * since there is one queue shared amongst all of the CPUs in the system. If + * you like mixed-size concurrency, dependency ordering and relaxed atomics, + * then you'll *love* this monstrosity. + * + * The basic idea is to split the queue up into ranges of commands that are + * owned by a given CPU; the owner may not have written all of the commands + * itself, but is responsible for advancing the hardware prod pointer when + * the time comes. The algorithm is roughly: + * + * 1. Allocate some space in the queue. At this point we also discover + * whether the head of the queue is currently owned by another CPU, + * or whether we are the owner. + * + * 2. Write our commands into our allocated slots in the queue. + * + * 3. Mark our slots as valid in arm_smmu_cmdq.valid_map. + * + * 4. If we are an owner: + * a. Wait for the previous owner to finish. + * b. Mark the queue head as unowned, which tells us the range + * that we are responsible for publishing. + * c. Wait for all commands in our owned range to become valid. + * d. Advance the hardware prod pointer. + * e. Tell the next owner we've finished. + * + * 5. If we are inserting a CMD_SYNC (we may or may not have been an + * owner), then we need to stick around until it has completed: + * a. If we have MSIs, the SMMU can write back into the CMD_SYNC + * to clear the first 4 bytes. + * b. Otherwise, we spin waiting for the hardware cons pointer to + * advance past our command. + * + * The devil is in the details, particularly the use of locking for handling + * SYNC completion and freeing up space in the queue before we think that it is + * full. + */ +static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod, bool set) +{ + u32 swidx, sbidx, ewidx, ebidx; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = sprod, + }; + + ewidx = BIT_WORD(Q_IDX(&llq, eprod)); + ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG; + + while (llq.prod != eprod) { + unsigned long mask; + atomic_long_t *ptr; + u32 limit = BITS_PER_LONG; + + swidx = BIT_WORD(Q_IDX(&llq, llq.prod)); + sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG; - smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]); + ptr = &cmdq->valid_map[swidx]; - while (queue_insert_raw(q, cmd) == -ENOSPC) { - if (queue_poll_cons(q, false, wfe)) - dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + if ((swidx == ewidx) && (sbidx < ebidx)) + limit = ebidx; + + mask = GENMASK(limit - 1, sbidx); + + /* + * The valid bit is the inverse of the wrap bit. This means + * that a zero-initialised queue is invalid and, after marking + * all entries as valid, they become invalid again when we + * wrap. + */ + if (set) { + atomic_long_xor(mask, ptr); + } else { /* Poll */ + unsigned long valid; + + valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask; + atomic_long_cond_read_relaxed(ptr, (VAL & mask) == valid); + } + + llq.prod = queue_inc_prod_n(&llq, limit - sbidx); } } -static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq_ent *ent) +/* Mark all entries in the range [sprod, eprod) as valid */ +static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true); +} + +/* Wait for all entries in the range [sprod, eprod) to become valid */ +static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq, + u32 sprod, u32 eprod) +{ + __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false); +} + +/* Wait for the command queue to become non-full */ +static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; unsigned long flags; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + int ret = 0; - if (arm_smmu_cmdq_build_cmd(cmd, ent)) { - dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", - ent->opcode); - return; + /* + * Try to update our copy of cons by grabbing exclusive cmdq access. If + * that fails, spin until somebody else updates it for us. + */ + if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) { + WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); + arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags); + llq->val = READ_ONCE(cmdq->q.llq.val); + return 0; } - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + do { + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + if (!queue_full(llq)) + break; + + ret = queue_poll(&qp); + } while (!ret); + + return ret; } /* - * The difference between val and sync_idx is bounded by the maximum size of - * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic. + * Wait until the SMMU signals a CMD_SYNC completion MSI. + * Must be called with the cmdq lock held in some capacity. */ -static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx) +static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - ktime_t timeout; - u32 val; + int ret = 0; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); - timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US); - val = smp_cond_load_acquire(&smmu->sync_count, - (int)(VAL - sync_idx) >= 0 || - !ktime_before(ktime_get(), timeout)); + queue_poll_init(smmu, &qp); - return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0; + /* + * The MSI won't generate an event, since it's being written back + * into the command queue. + */ + qp.wfe = false; + smp_cond_load_relaxed(cmd, !VAL || (ret = queue_poll(&qp))); + llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1); + return ret; } -static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu) +/* + * Wait until the SMMU cons index passes llq->prod. + * Must be called with the cmdq lock held in some capacity. + */ +static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) { - u64 cmd[CMDQ_ENT_DWORDS]; - unsigned long flags; - struct arm_smmu_cmdq_ent ent = { - .opcode = CMDQ_OP_CMD_SYNC, - .sync = { - .msiaddr = virt_to_phys(&smmu->sync_count), - }, - }; + struct arm_smmu_queue_poll qp; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + u32 prod = llq->prod; + int ret = 0; - spin_lock_irqsave(&smmu->cmdq.lock, flags); + queue_poll_init(smmu, &qp); + llq->val = READ_ONCE(smmu->cmdq.q.llq.val); + do { + if (queue_consumed(llq, prod)) + break; - /* Piggy-back on the previous command if it's a SYNC */ - if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) { - ent.sync.msidata = smmu->sync_nr; - } else { - ent.sync.msidata = ++smmu->sync_nr; - arm_smmu_cmdq_build_cmd(cmd, &ent); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - } + ret = queue_poll(&qp); + + /* + * This needs to be a readl() so that our subsequent call + * to arm_smmu_cmdq_shared_tryunlock() can fail accurately. + * + * Specifically, we need to ensure that we observe all + * shared_lock()s by other CMD_SYNCs that share our owner, + * so that a failing call to tryunlock() means that we're + * the last one out and therefore we can safely advance + * cmdq->q.llq.cons. Roughly speaking: + * + * CPU 0 CPU1 CPU2 (us) + * + * if (sync) + * shared_lock(); + * + * dma_wmb(); + * set_valid_map(); + * + * if (owner) { + * poll_valid_map(); + * + * writel(prod_reg); + * + * readl(cons_reg); + * tryunlock(); + * + * Requires us to see CPU 0's shared_lock() acquisition. + */ + llq->cons = readl(cmdq->q.cons_reg); + } while (!ret); + + return ret; +} - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); +static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu, + struct arm_smmu_ll_queue *llq) +{ + if (smmu->features & ARM_SMMU_FEAT_MSI && + smmu->features & ARM_SMMU_FEAT_COHERENCY) + return __arm_smmu_cmdq_poll_until_msi(smmu, llq); - return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata); + return __arm_smmu_cmdq_poll_until_consumed(smmu, llq); } -static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds, + u32 prod, int n) { - u64 cmd[CMDQ_ENT_DWORDS]; + int i; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + .prod = prod, + }; + + for (i = 0; i < n; ++i) { + u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; + + prod = queue_inc_prod_n(&llq, i); + queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); + } +} + +static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, + u64 *cmds, int n, bool sync) +{ + u64 cmd_sync[CMDQ_ENT_DWORDS]; + u32 prod; unsigned long flags; - bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); - struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC }; - int ret; + bool owner; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + struct arm_smmu_ll_queue llq = { + .max_n_shift = cmdq->q.llq.max_n_shift, + }, head = llq; + int ret = 0; - arm_smmu_cmdq_build_cmd(cmd, &ent); + /* 1. Allocate some space in the queue */ + local_irq_save(flags); + llq.val = READ_ONCE(cmdq->q.llq.val); + do { + u64 old; + + while (!queue_has_space(&llq, n + sync)) { + local_irq_restore(flags); + if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq)) + dev_err_ratelimited(smmu->dev, "CMDQ timeout\n"); + local_irq_save(flags); + } + + head.cons = llq.cons; + head.prod = queue_inc_prod_n(&llq, n + sync) | + CMDQ_PROD_OWNED_FLAG; + + old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); + if (old == llq.val) + break; - spin_lock_irqsave(&smmu->cmdq.lock, flags); - arm_smmu_cmdq_insert_cmd(smmu, cmd); - ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); - spin_unlock_irqrestore(&smmu->cmdq.lock, flags); + llq.val = old; + } while (1); + owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG); + head.prod &= ~CMDQ_PROD_OWNED_FLAG; + llq.prod &= ~CMDQ_PROD_OWNED_FLAG; + /* + * 2. Write our commands into the queue + * Dependency ordering from the cmpxchg() loop above. + */ + arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); + if (sync) { + prod = queue_inc_prod_n(&llq, n); + arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod); + queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); + + /* + * In order to determine completion of our CMD_SYNC, we must + * ensure that the queue can't wrap twice without us noticing. + * We achieve that by taking the cmdq lock as shared before + * marking our slot as valid. + */ + arm_smmu_cmdq_shared_lock(cmdq); + } + + /* 3. Mark our slots as valid, ensuring commands are visible first */ + dma_wmb(); + arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); + + /* 4. If we are the owner, take control of the SMMU hardware */ + if (owner) { + /* a. Wait for previous owner to finish */ + atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod); + + /* b. Stop gathering work by clearing the owned flag */ + prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG, + &cmdq->q.llq.atomic.prod); + prod &= ~CMDQ_PROD_OWNED_FLAG; + + /* + * c. Wait for any gathered work to be written to the queue. + * Note that we read our own entries so that we have the control + * dependency required by (d). + */ + arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod); + + /* + * d. Advance the hardware prod pointer + * Control dependency ordering from the entries becoming valid. + */ + writel_relaxed(prod, cmdq->q.prod_reg); + + /* + * e. Tell the next owner we're done + * Make sure we've updated the hardware first, so that we don't + * race to update prod and potentially move it backwards. + */ + atomic_set_release(&cmdq->owner_prod, prod); + } + + /* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */ + if (sync) { + llq.prod = queue_inc_prod_n(&llq, n); + ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq); + if (ret) { + dev_err_ratelimited(smmu->dev, + "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n", + llq.prod, + readl_relaxed(cmdq->q.prod_reg), + readl_relaxed(cmdq->q.cons_reg)); + } + + /* + * Try to unlock the cmq lock. This will fail if we're the last + * reader, in which case we can safely update cmdq->q.llq.cons + */ + if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { + WRITE_ONCE(cmdq->q.llq.cons, llq.cons); + arm_smmu_cmdq_shared_unlock(cmdq); + } + } + + local_irq_restore(flags); return ret; } -static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { - int ret; - bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && - (smmu->features & ARM_SMMU_FEAT_COHERENCY); + u64 cmd[CMDQ_ENT_DWORDS]; - ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) - : __arm_smmu_cmdq_issue_sync(smmu); - if (ret) - dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n"); + if (arm_smmu_cmdq_build_cmd(cmd, ent)) { + dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n", + ent->opcode); + return -EINVAL; + } + + return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false); +} + +static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); } /* Context descriptor manipulation functions */ @@ -1260,6 +1628,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) int i; struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->evtq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[EVTQ_ENT_DWORDS]; do { @@ -1277,12 +1646,13 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) * Not much we can do on overflow, so scream and pretend we're * trying harder. */ - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); return IRQ_HANDLED; } @@ -1328,19 +1698,21 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev) { struct arm_smmu_device *smmu = dev; struct arm_smmu_queue *q = &smmu->priq.q; + struct arm_smmu_ll_queue *llq = &q->llq; u64 evt[PRIQ_ENT_DWORDS]; do { while (!queue_remove_raw(q, evt)) arm_smmu_handle_ppr(smmu, evt); - if (queue_sync_prod(q) == -EOVERFLOW) + if (queue_sync_prod_in(q) == -EOVERFLOW) dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n"); - } while (!queue_empty(q)); + } while (!queue_empty(llq)); /* Sync our overflow flag, as we believe we're up to speed */ - q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons); - writel(q->cons, q->cons_reg); + llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) | + Q_IDX(llq, llq->cons); + queue_sync_cons_out(q); return IRQ_HANDLED; } @@ -1436,6 +1808,13 @@ static void arm_smmu_tlb_inv_context(void *cookie) cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } + /* + * NOTE: when io-pgtable is in non-strict mode, we may get here with + * PTEs previously cleared by unmaps on the current CPU not yet visible + * to the SMMU. We are relying on the dma_wmb() implicit during cmd + * insertion to guarantee those are observed before the TLBI. Do be + * careful, 007. + */ arm_smmu_cmdq_issue_cmd(smmu, &cmd); __arm_smmu_tlb_sync(smmu); } @@ -2061,13 +2440,13 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, size_t qsz; do { - qsz = ((1 << q->max_n_shift) * dwords) << 3; + qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL); if (q->base || qsz < PAGE_SIZE) break; - q->max_n_shift--; + q->llq.max_n_shift--; } while (1); if (!q->base) { @@ -2079,7 +2458,7 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, if (!WARN_ON(q->base_dma & (qsz - 1))) { dev_info(smmu->dev, "allocated %u entries for %s\n", - 1 << q->max_n_shift, name); + 1 << q->llq.max_n_shift, name); } q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); @@ -2088,24 +2467,56 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, q->q_base = Q_BASE_RWA; q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; - q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->max_n_shift); + q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); - q->prod = q->cons = 0; + q->llq.prod = q->llq.cons = 0; return 0; } +static void arm_smmu_cmdq_free_bitmap(void *data) +{ + unsigned long *bitmap = data; + + bitmap_free(bitmap); +} + +static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) +{ + int ret = 0; + struct arm_smmu_cmdq *cmdq = &smmu->cmdq; + unsigned int nents = 1 << cmdq->q.llq.max_n_shift; + atomic_long_t *bitmap; + + atomic_set(&cmdq->owner_prod, 0); + atomic_set(&cmdq->lock, 0); + + bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); + if (!bitmap) { + dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); + ret = -ENOMEM; + } else { + cmdq->valid_map = bitmap; + devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); + } + + return ret; +} + static int arm_smmu_init_queues(struct arm_smmu_device *smmu) { int ret; /* cmdq */ - spin_lock_init(&smmu->cmdq.lock); ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS, "cmdq"); if (ret) return ret; + ret = arm_smmu_cmdq_init(smmu); + if (ret) + return ret; + /* evtq */ ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS, @@ -2530,8 +2941,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) /* Command queue */ writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); - writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD); - writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS); + writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); + writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2558,9 +2969,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, + writel_relaxed(smmu->evtq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); - writel_relaxed(smmu->evtq.q.cons, + writel_relaxed(smmu->evtq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; @@ -2575,9 +2986,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) if (smmu->features & ARM_SMMU_FEAT_PRI) { writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); - writel_relaxed(smmu->priq.q.prod, + writel_relaxed(smmu->priq.q.llq.prod, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); - writel_relaxed(smmu->priq.q.cons, + writel_relaxed(smmu->priq.q.llq.cons, arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; @@ -2721,18 +3132,24 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } /* Queue sizes, capped to ensure natural alignment */ - smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_CMDQS, reg)); - if (!smmu->cmdq.q.max_n_shift) { - /* Odd alignment restrictions on the base, so ignore for now */ - dev_err(smmu->dev, "unit-length command queue not supported\n"); + smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_CMDQS, reg)); + if (smmu->cmdq.q.llq.max_n_shift < ilog2(BITS_PER_LONG)) { + /* + * The cmdq valid_map relies on the total number of entries + * being a multiple of BITS_PER_LONG. There's also no way + * we can handle the weird alignment restrictions on the + * base pointer for a unit-length queue. + */ + dev_err(smmu->dev, "command queue size < %d entries not supported\n", + BITS_PER_LONG); return -ENXIO; } - smmu->evtq.q.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_EVTQS, reg)); - smmu->priq.q.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, - FIELD_GET(IDR1_PRIQS, reg)); + smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_EVTQS, reg)); + smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, + FIELD_GET(IDR1_PRIQS, reg)); /* SID/SSID sizes */ smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 421f8281a5d7cfa8742d2b278fb0b2fd5c5ecca5..6b9bcccb8f384fa31eb71d0d00996349c7c520af 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -971,7 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, if (!gic_dist_supports_lpis()) return -EPERM; irq_domain_set_info(d, irq, hw, chip, d->host_data, - handle_fasteoi_irq, NULL, NULL); + handle_fasteoi_edge_irq, NULL, NULL); } /* Prevents SW retriggers which mess up the ACK/EOI ordering */ diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c index 35e2891af54fcc3702739cda8c5a0a277d944a09..abd0ea90fead655a6ec2bede74cf02d3fa6c725a 100644 --- a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.c @@ -71,9 +71,13 @@ static u32 phytium_jpeg_header[PHYTIUM_JPEG_HEADER_SIZE] = { static char yuv_mode_str[YUV_MODE_STR_LEN] = { "yuv444" }; module_param_string(yuv_mode, yuv_mode_str, sizeof(yuv_mode_str), 0444); -MODULE_PARM_DESC(yuv_mode, "Users select one mode from such modes as" - " 'yuv444', or 'yuv422', or 'yuv420'. If no mode is set," - " the driver adapts defaults mode 'yuv444'."); +MODULE_PARM_DESC(yuv_mode, "Users select one mode from such modes as\n" + " \t\t'yuv444', or 'yuv422', or 'yuv420'. If no mode is set,\n" + " \t\tthe driver adapts defaults mode 'yuv444'."); + +/* The below global variables are used to filter same log-print lines */ +static bool first_invalid = true; +static bool cur_non_zero = true; static u32 phytium_jpeg_read(struct phytium_jpeg_dev *jpeg_dev, u32 reg) { @@ -168,18 +172,19 @@ static void phytium_jpeg_off(struct phytium_jpeg_dev *jpeg_dev) u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; - if (!test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) { - dev_info(jpeg_dev->dev, "JPEG Engine is already off.\n"); - return; - } - /* disable all interrupt */ phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, disable_all_interrupt); /* clear all interrupt */ phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + /* disable JPEG engine */ phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, TRANSINFO_ENABLE_ENGINE, 0); + if (!test_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status)) { + dev_info(jpeg_dev->dev, "JPEG Engine is already off.\n"); + return; + } + clear_bit(VIDEO_CLOCKS_ON, &jpeg_dev->status); /* wait 50 ms */ mdelay(50); @@ -220,21 +225,28 @@ static void phytium_jpeg_get_resolution(struct phytium_jpeg_dev *jpeg_dev) if (width * height != 0) { detected_timings->width = width; detected_timings->height = height; + jpeg_dev->v4l2_input_status = 0; + cur_non_zero = true; + } else { + /* filter some repeated log-print lines */ + first_invalid = cur_non_zero; + cur_non_zero = false; } - jpeg_dev->v4l2_input_status = 0; - /* * Resolution is changed will trigger an interrupt, resolution detecting * also is disable during process interrupt. So re-enable. */ phytium_jpeg_enable_source_detecting(jpeg_dev); - dev_info(jpeg_dev->dev, "Change resolution: %uX%u\n", width, height); + + if (cur_non_zero == true || first_invalid == true) { + dev_info(jpeg_dev->dev, "Change resolution: %uX%u\n", width, height); + } } static void phytium_jpeg_set_resolution(struct phytium_jpeg_dev *jpeg_dev) { - struct v4l2_bt_timings *active_timings = &jpeg_dev->active_timings; + struct v4l2_bt_timings *active_timings = &jpeg_dev->active_timings; int i; int src_addrs[OCM_BUF_NUM]; /* @@ -481,6 +493,9 @@ static int phytium_jpeg_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { int ret; + u32 source_info; + u32 width; + u32 height; struct phytium_jpeg_dev *jpeg_dev = video_drvdata(file); /* @@ -502,6 +517,15 @@ static int phytium_jpeg_query_dv_timings(struct file *file, void *priv, timings->type = V4L2_DV_BT_656_1120; timings->bt = jpeg_dev->detected_timings; + /* Get resolution from SRC_VGA_INFO_REG */ + source_info = phytium_jpeg_read(jpeg_dev, SRC_VGA_INFO_REG); + width = (source_info & SRC_HOR_PIXELS) >> SRC_WIDTH_SHIFT; + height = (source_info & SRC_VER_PIXELS) >> SRC_HEIGHT_SHIFT; + + /* Check if that the current resolution is zero. */ + if (width == 0 || height == 0) + jpeg_dev->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + return jpeg_dev->v4l2_input_status ? -ENOLINK : 0; } @@ -740,8 +764,10 @@ static int phytium_jpeg_start_frame(struct phytium_jpeg_dev *jpeg_dev) unsigned long status; struct phytium_jpeg_buffer *jpeg_buf; + /* JPEG Engine shouldn't be enable to compress in the case no signal is input JPEG Engine. + * V4L2_IN_ST_NO_SIGNAL + */ if (jpeg_dev->v4l2_input_status) { - dev_err(jpeg_dev->dev, "No signal; needn't start frame\n"); return 0; } @@ -857,7 +883,7 @@ static int phytium_jpeg_buf_prepare(struct vb2_buffer *vb) static inline struct phytium_jpeg_buffer * phytium_vb2buf_to_dstbuf(struct vb2_v4l2_buffer *buf) { - return container_of(buf, struct phytium_jpeg_buffer, vb); + return container_of(buf, struct phytium_jpeg_buffer, vb); } static void phytium_jpeg_buf_queue(struct vb2_buffer *vb) @@ -902,7 +928,6 @@ static void phytium_jpeg_irq_res_change(struct phytium_jpeg_dev *jpeg_dev, ulong delay) { dev_info(jpeg_dev->dev, "Source resolution is changed, resetting\n"); - set_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); phytium_jpeg_off(jpeg_dev); @@ -917,7 +942,12 @@ static irqreturn_t phytium_jpeg_irq(int irq, void *arg) u32 frame_size; if (test_bit(VIDEO_POWEROFF, &jpeg_dev->status)) { - dev_info(jpeg_dev->dev, "jpeg engine is requested to poweroff\n"); + dev_info(jpeg_dev->dev, "jpeg engine is requested to poweroff 0x%x\n", + phytium_jpeg_read(jpeg_dev, INT_STATUS_CTRL_REG)); + /* Disable interruption */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + /* clear all interruption of the hardware's buffers */ + phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, INT_JPEG_ENCODE_COMPLETE, 1); return IRQ_HANDLED; } @@ -1068,7 +1098,7 @@ static irqreturn_t phytium_jpeg_timer31_irq(int irq, void *arg) /* clear timer interrupt status */ writel(0x8, jpeg_dev->timer31_addr + 0x2c); - /* clear JPEG Engine's poweroff status */ + /* clear JPEG Engine's poweroff status */ clear_bit(VIDEO_POWEROFF, &jpeg_dev->status); dev_info(jpeg_dev->dev, "timer31 set jpeg status 0x%lx\n", jpeg_dev->status); @@ -1108,17 +1138,28 @@ static irqreturn_t phytium_jpeg_timer30_irq(int irq, void *arg) struct phytium_jpeg_dev *jpeg_dev = arg; struct arm_smccc_res res; + u32 disable_all_interrupt = 0; + u32 clear_all_interrupt = INT_FIFO_OVERFLOW | INT_OCM_BUF_OVERFLOW | + INT_JPEG_ENCODE_COMPLETE | INT_VIDEO_FORMAT_CHANGE; + /* disable timer interrupt */ writel(0, jpeg_dev->timer30_addr); /* clear timer interrupt status */ writel(0x8, jpeg_dev->timer30_addr + 0x2c); - /* Disable interruption */ - phytium_jpeg_update(jpeg_dev, INT_STATUS_CTRL_REG, STS_VE_JPEG_CODE_COMP_EN, 0); + /* disable all interrupts */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, disable_all_interrupt); + udelay(5); + /* clear all interrupts */ + phytium_jpeg_write(jpeg_dev, INT_STATUS_CTRL_REG, clear_all_interrupt); + + /* disable JPEG engine */ + phytium_jpeg_update(jpeg_dev, TRANSFORM_INFO_REG, 0, 0); /* call SE to poweroff JPEG Engine */ arm_smccc_smc(0xc300fff4, 0x9, 0x2, 0x80000020, 0, 0, 0, 0, &res); + set_bit(VIDEO_RES_CHANGE, &jpeg_dev->status); /* set JPEG Engine's status is poweroff */ set_bit(VIDEO_POWEROFF, &jpeg_dev->status); dev_info(jpeg_dev->dev, "timer30 set jpeg status 0x%lx\n", jpeg_dev->status); @@ -1203,12 +1244,44 @@ static int phytium_jpeg_init(struct phytium_jpeg_dev *jpeg_dev) } +/* The function is provided for user space adjusts the sampling mode. */ +static int phytium_jpeg_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct phytium_jpeg_dev *jpeg_dev = container_of(ctrl->handler, + struct phytium_jpeg_dev, + ctrl_handler); + if (ctrl->id != V4L2_CID_JPEG_CHROMA_SUBSAMPLING) + return -EINVAL; + + switch (ctrl->val) { + case V4L2_JPEG_CHROMA_SUBSAMPLING_420: + strncpy(yuv_mode_str, "yuv420", sizeof(yuv_mode_str)); + break; + case V4L2_JPEG_CHROMA_SUBSAMPLING_422: + strncpy(yuv_mode_str, "yuv422", sizeof(yuv_mode_str)); + break; + default: + strncpy(yuv_mode_str, "yuv444", sizeof(yuv_mode_str)); + } + phytium_jpeg_set_yuv_mode(jpeg_dev); + dev_info(jpeg_dev->dev, "current sample mode is %s\n", yuv_mode_str); + return 0; +} + +static const struct v4l2_ctrl_ops phytium_jpeg_ctrl_ops = { + .s_ctrl = phytium_jpeg_set_ctrl, +}; + + static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) { struct v4l2_device *v4l2_dev = &jpeg_dev->v4l2_dev; struct vb2_queue *dst_vq = &jpeg_dev->queue; struct video_device *vdev = &jpeg_dev->vdev; + const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) | + BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_422) | + BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_420)); int ret; jpeg_dev->pix_fmt.pixelformat = V4L2_PIX_FMT_JPEG; @@ -1224,6 +1297,20 @@ static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) } /* Register how many v4l2 controls to a handler */ + v4l2_ctrl_handler_init(&jpeg_dev->ctrl_handler, 1); + v4l2_ctrl_new_std_menu(&jpeg_dev->ctrl_handler, &phytium_jpeg_ctrl_ops, + V4L2_CID_JPEG_CHROMA_SUBSAMPLING, + V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask, + V4L2_JPEG_CHROMA_SUBSAMPLING_444); + + if (jpeg_dev->ctrl_handler.error) { + v4l2_ctrl_handler_free(&jpeg_dev->ctrl_handler); + dev_err(jpeg_dev->dev, "Failed to init v4l2 controls:%d\n", + jpeg_dev->ctrl_handler.error); + goto err_v4l2_register; + } + v4l2_dev->ctrl_handler = &jpeg_dev->ctrl_handler; + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; dst_vq->dev = v4l2_dev->dev; @@ -1236,6 +1323,7 @@ static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) dst_vq->min_buffers_needed = CAPTURE_BUF_NUMBER; ret = vb2_queue_init(dst_vq); if (ret) { + v4l2_ctrl_handler_free(&jpeg_dev->ctrl_handler); dev_err(jpeg_dev->dev, "Failed to init vb2 queue\n"); goto err_v4l2_register; } @@ -1265,7 +1353,7 @@ static int phytium_jpeg_setup_video(struct phytium_jpeg_dev *jpeg_dev) err_video_register: vb2_queue_release(dst_vq); - + v4l2_ctrl_handler_free(&jpeg_dev->ctrl_handler); err_v4l2_register: v4l2_device_unregister(v4l2_dev); return ret; @@ -1354,10 +1442,14 @@ static int phytium_jpeg_remove(struct platform_device *pdev) phytium_jpeg_off(jpeg_dev); + phytium_jpeg_write(jpeg_dev, TRANSFORM_INFO_REG, 0); + video_unregister_device(&jpeg_dev->vdev); vb2_queue_release(&jpeg_dev->queue); + v4l2_ctrl_handler_free(&jpeg_dev->ctrl_handler); + v4l2_device_unregister(v4l2_dev); of_reserved_mem_device_release(dev); @@ -1377,5 +1469,6 @@ static struct platform_driver phytium_jpeg_driver = { module_platform_driver(phytium_jpeg_driver); MODULE_DESCRIPTION("Phytium JPEG Encoder driver"); +MODULE_VERSION(JPEG_DRIVER_VERSION); MODULE_AUTHOR("Wang Min "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h index c2849278ce6011ed8afdb065cfbedd32a5a1a814..111cfefb979fb368bf82c07a038f40a24d07c401 100644 --- a/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h +++ b/drivers/media/platform/phytium-jpeg/phytium_jpeg_core.h @@ -36,6 +36,7 @@ #include #define PHYTIUM_JPEG_NAME "phytium-jpeg" +#define JPEG_DRIVER_VERSION "1.0.0" #define MAX_FRAME_RATE 60 #define MAX_HEIGHT 1080 #define MAX_WIDTH 1920 @@ -45,7 +46,7 @@ #define MAX_PIXEL_CLOCK (1920 * 1080 * 60) /* 1920 x 1080 x 60Hz */ #define SOURCE_RESOLUTION_DETECT_TIMEOUT msecs_to_jiffies(500) -#define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(50) +#define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(250) #define INVALID_RESOLUTION_DELAY msecs_to_jiffies(250) #define STOP_TIMEOUT msecs_to_jiffies(1000) @@ -128,6 +129,7 @@ struct phytium_jpeg_dev { unsigned int frame_rate; void __iomem *timer30_addr; void __iomem *timer31_addr; + struct v4l2_ctrl_handler ctrl_handler; }; struct phytium_jpeg_config { diff --git a/drivers/mmc/host/phytium-mci-pci.c b/drivers/mmc/host/phytium-mci-pci.c index 08b20ad57deb6ac8092f3aa7155a806498050d06..4d497eb2479a3cba11b7266bf3ef976abf54507b 100644 --- a/drivers/mmc/host/phytium-mci-pci.c +++ b/drivers/mmc/host/phytium-mci-pci.c @@ -14,9 +14,10 @@ #include #include "phytium-mci.h" +#define PHYTIUM_MMC_PCI_DRIVER_VERSION "1.0.0" + static u32 sd_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | - MMC_CAP_CMD23 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR50| - MMC_CAP_4_BIT_DATA; + MMC_CAP_CMD23 | MMC_CAP_4_BIT_DATA; static u32 sd_caps2 = MMC_CAP2_NO_MMC; static u32 emmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA | MMC_CAP_WAIT_WHILE_BUSY | @@ -177,3 +178,4 @@ module_pci_driver(phytium_mci_pci_driver); MODULE_DESCRIPTION("Phytium Multimedia Card Interface PCI driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_PCI_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci-plat.c b/drivers/mmc/host/phytium-mci-plat.c index b4a7b070902c30912dab8db400499f6ae887ad22..e7c0d202152cf215de5d3b86b979ad7383c1ae88 100644 --- a/drivers/mmc/host/phytium-mci-plat.c +++ b/drivers/mmc/host/phytium-mci-plat.c @@ -14,7 +14,9 @@ #include #include "phytium-mci.h" -static u32 mci_caps = MMC_CAP_CMD23 | MMC_CAP_ERASE | MMC_CAP_WAIT_WHILE_BUSY; +#define PHYTIUM_MMC_PLAT_DRIVER_VERSION "1.0.0" + +static u32 mci_caps = MMC_CAP_CMD23; #if defined CONFIG_PM && defined CONFIG_PM_SLEEP @@ -45,6 +47,26 @@ static int phytium_mci_probe(struct platform_device *pdev) if (ret) goto host_free; + if (device_property_read_bool(dev, "use-hold")) + host->use_hold = 1; + + if (device_property_read_bool(dev, "clk-set")) + host->clk_set = 1; + + if (host->clk_set) { + host->clk_smpl_drv_25m = -1; + host->clk_smpl_drv_50m = -1; + host->clk_smpl_drv_66m = -1; + host->clk_smpl_drv_100m = -1; + device_property_read_u32(dev, "clk-smpl-drv-25m", &host->clk_smpl_drv_25m); + device_property_read_u32(dev, "clk-smpl-drv-50m", &host->clk_smpl_drv_50m); + device_property_read_u32(dev, "clk-smpl-drv-66m", &host->clk_smpl_drv_66m); + device_property_read_u32(dev, "clk-smpl-drv-100m", &host->clk_smpl_drv_100m); + } + dev_info(dev, "mci clk set %d %d 0x%x 0x%x 0x%x 0x%x\n", + host->use_hold, host->clk_set, host->clk_smpl_drv_25m, + host->clk_smpl_drv_50m, host->clk_smpl_drv_66m, host->clk_smpl_drv_100m); + if (dev->of_node) { host->src_clk = devm_clk_get(&pdev->dev, "phytium_mci_clk"); if (IS_ERR(host->src_clk)) { @@ -173,3 +195,4 @@ module_platform_driver(phytium_mci_driver); MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_PLAT_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci.c b/drivers/mmc/host/phytium-mci.c index 943259e8780d86a7d02fd5d63682116e31e58faa..035b36492bd53ea72c5d2e1d985f039274ed8d01 100644 --- a/drivers/mmc/host/phytium-mci.c +++ b/drivers/mmc/host/phytium-mci.c @@ -37,6 +37,8 @@ #include #include "phytium-mci.h" +#define PHYTIUM_MMC_DRIVER_VERSION "1.0.0" + static const u32 cmd_ints_mask = MCI_INT_MASK_RE | MCI_INT_MASK_CMD | MCI_INT_MASK_RCRC | MCI_INT_MASK_RTO | MCI_INT_MASK_HTO | MCI_RAW_INTS_HLE; @@ -150,10 +152,17 @@ static void phytium_mci_send_cmd(struct phytium_mci_host *host, u32 cmd, u32 arg static void phytium_mci_update_cmd11(struct phytium_mci_host *host, u32 cmd) { + int rc; + u32 data; + writel(MCI_CMD_START | cmd, host->base + MCI_CMD); - while (readl(host->base + MCI_CMD) & MCI_CMD_START) - cpu_relax(); + rc = readl_relaxed_poll_timeout(host->base + MCI_CMD, + data, + !(data & MCI_CMD_START), + 0, 100 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_cmd: 0x%08x\n", __func__, __LINE__, data); } static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *ios) @@ -195,16 +204,35 @@ static void phytium_mci_set_clk(struct phytium_mci_host *host, struct mmc_ios *i clk_rate = host->clk_rate; first_uhs_div = 1 + ((tmp_ext_reg >> 8)&0xFF); div = clk_rate / (2 * first_uhs_div * ios->clock); - if (div > 2) { - sample = div / 2 + 1; - drv = sample - 1; - writel((sample << 16) | (drv << 8) | (div & 0xff), - host->base + MCI_CLKDIV); - } else if (div == 2) { - drv = 0; - sample = 1; - writel((drv << 8) | (sample << 16) | (div & 0xff), - host->base + MCI_CLKDIV); + + if (host->clk_smpl_drv_25m >= 0 + && ios->clock == 25000000 && host->clk_set) { + writel((host->clk_smpl_drv_25m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_50m >= 0 + && ios->clock == 50000000 && host->clk_set){ + writel((host->clk_smpl_drv_50m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_66m >= 0 + && ios->clock == 66000000 && host->clk_set){ + writel((host->clk_smpl_drv_66m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (host->clk_smpl_drv_100m >= 0 + && ios->clock == 100000000 && host->clk_set){ + writel((host->clk_smpl_drv_100m << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else { + if (div > 2) { + sample = div / 2 + 1; + drv = sample - 1; + writel((sample << 16) | (drv << 8) | (div & 0xff), + host->base + MCI_CLKDIV); + } else if (div == 2) { + drv = 0; + sample = 1; + writel((drv << 8) | (sample << 16) | (div & 0xff), + host->base + MCI_CLKDIV); + } } dev_dbg(host->dev, "UHS_REG_EXT ext: %x, CLKDIV: %x\n", @@ -293,6 +321,9 @@ u32 phytium_mci_cmd_prepare_raw_cmd(struct phytium_mci_host *host, rawcmd |= (0x1 << 10); } + if (host->use_hold) + rawcmd |= (0x1 << 29); + return (rawcmd | (0x1 << 31)); } @@ -848,6 +879,13 @@ static void phytium_mci_ops_request(struct mmc_host *mmc, struct mmc_request *mr if (rc == -ETIMEDOUT) pr_debug("%s %d, timeout mci_status: 0x%08x\n", __func__, __LINE__, data); + rc = readl_relaxed_poll_timeout(host->base + MCI_CMD, + data, + !(data & MCI_CMD_START), + 0, 500 * 1000); + if (rc == -ETIMEDOUT) + pr_debug("%s %d, timeout mci_cmd.start_cmd bit clear: 0x%08x\n", __func__, __LINE__, data); + dev_dbg(host->dev, "%s %d: cmd:%d arg:0x%x\n", __func__, __LINE__, mrq->cmd->opcode, mrq->cmd->arg); @@ -1198,12 +1236,12 @@ static irqreturn_t phytium_mci_irq(int irq, void *dev_id) goto irq_out; } - if ((events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { + if (cmd && (events & MCI_MASKED_INTS_DTO) && (events & MCI_MASKED_INTS_CMD)) { phytium_mci_cmd_done(host, events, mrq, cmd); phytium_mci_data_xfer_done(host, (events & data_ints_mask) | (dmac_events & dmac_ints_mask), mrq, data); - } else if (events & MCI_MASKED_INTS_CMD || - ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE))) { + } else if (cmd && (events & MCI_MASKED_INTS_CMD || + ((events & MCI_INT_MASK_HTO) && (cmd->opcode == SD_SWITCH_VOLTAGE)))) { phytium_mci_cmd_done(host, events, mrq, cmd); } else if (events & MCI_MASKED_INTS_DTO) { phytium_mci_data_xfer_done(host, (events & data_ints_mask) | @@ -1555,3 +1593,4 @@ EXPORT_SYMBOL(phytium_mci_common_probe); MODULE_DESCRIPTION("Phytium Multimedia Card Interface driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); +MODULE_VERSION(PHYTIUM_MMC_DRIVER_VERSION); diff --git a/drivers/mmc/host/phytium-mci.h b/drivers/mmc/host/phytium-mci.h index c35e6f3b3a859872f9c0702ff715004163c21f91..90f51b9ee135d7911d5beb100da51a896765dd87 100644 --- a/drivers/mmc/host/phytium-mci.h +++ b/drivers/mmc/host/phytium-mci.h @@ -344,7 +344,12 @@ struct phytium_mci_host { unsigned long irq_flags; unsigned long flags; #define MCI_CARD_NEED_INIT 1 - + bool use_hold; /*use hold reg*/ + bool clk_set; /*clock set function enable*/ + s32 clk_smpl_drv_25m; /*25M clk smpl & drv*/ + s32 clk_smpl_drv_50m; /*50M clk smpl & drv*/ + s32 clk_smpl_drv_66m; /*66M clk smpl & drv*/ + s32 clk_smpl_drv_100m; /*100M clk smpl & drv*/ }; int phytium_mci_common_probe(struct phytium_mci_host *host); diff --git a/drivers/mmc/host/phytium-sdci.c b/drivers/mmc/host/phytium-sdci.c index 2c10bf3e234b5013d964dfd67abb00644e3f2c31..d38bc89953ab72e0e59087fdd98bafb3cd2a7f77 100644 --- a/drivers/mmc/host/phytium-sdci.c +++ b/drivers/mmc/host/phytium-sdci.c @@ -39,6 +39,8 @@ #include "phytium-sdci.h" +#define PHYTIUM_SDCI_DRIVER_VERSION "1.0.0" + static const u32 cmd_ints_mask = SDCI_SDCI_NORMAL_ISER_ECC_EN | SDCI_SDCI_NORMAL_ISER_EEI_EN; static const u32 data_ints_mask = SDCI_BD_ISER_ETRS_EN; static const u32 err_ints_mask = SDCI_ERROR_ISER_ECTE_EN | SDCI_ERROR_ISR_CCRCE_EN | @@ -1440,3 +1442,4 @@ MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cheng Quan "); MODULE_AUTHOR("Chen Baozi "); MODULE_DESCRIPTION("Phytium SD Card Interface driver"); +MODULE_VERSION(PHYTIUM_SDCI_DRIVER_VERSION); diff --git a/drivers/mtd/nand/raw/phytium_nand_pci.c b/drivers/mtd/nand/raw/phytium_nand_pci.c index a8f054d669cc60765e2a0887030185f3d36bc6d6..0fe28bb58f235e087930c42e3dc2816d01f4c710 100644 --- a/drivers/mtd/nand/raw/phytium_nand_pci.c +++ b/drivers/mtd/nand/raw/phytium_nand_pci.c @@ -11,6 +11,7 @@ #include "phytium_nand.h" #define DRV_NAME "phytium_nand_pci" +#define DRV_VERSION "1.0.0" static struct mtd_partition partition_info[] = { { @@ -156,3 +157,4 @@ module_pci_driver(phytium_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PCI driver for Phytium NAND controller"); MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/mtd/nand/raw/phytium_nand_plat.c b/drivers/mtd/nand/raw/phytium_nand_plat.c index 65504785f33ba1b441e5fa06302a016f7be7d009..9679cf322fe2146f728d8241dcade0bd6000e89b 100644 --- a/drivers/mtd/nand/raw/phytium_nand_plat.c +++ b/drivers/mtd/nand/raw/phytium_nand_plat.c @@ -20,6 +20,7 @@ #include "phytium_nand.h" #define DRV_NAME "phytium_nand_plat" +#define DRV_VERSION "1.0.0" static int phytium_nfc_plat_probe(struct platform_device *pdev) { @@ -136,3 +137,4 @@ module_platform_driver(phytium_nfc_plat_driver) MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Phytium NAND controller Platform driver"); MODULE_AUTHOR("Zhu Mingshuai "); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/mtd/spi-nor/phytium-quadspi.c b/drivers/mtd/spi-nor/phytium-quadspi.c index 4a3c5e713cdba56f2bcb25e24683b9cfe1e67790..041057a3f1175fe78101c32dbfaf5190fa6fca8a 100644 --- a/drivers/mtd/spi-nor/phytium-quadspi.c +++ b/drivers/mtd/spi-nor/phytium-quadspi.c @@ -23,6 +23,8 @@ #include #include +#define DRIVER_VERSION "1.0.0" + #define QSPI_FLASH_CAP_REG 0x000 #define QSPI_RD_CFG_REG 0x004 #define QSPI_WR_CFG_REG 0x008 @@ -150,6 +152,8 @@ #define PHYTIUM_FMODE_MM 0x01 #define PHYTIUM_FMODE_IN 0x02 +#define WR_CFG_NODIRMAP_VALUE 0x5000000 + /* * the codes of the different commands */ @@ -655,6 +659,86 @@ static ssize_t phytium_qspi_write(struct spi_nor *nor, loff_t to, size_t len, return len; } +static ssize_t phytium_qspi_nodirmap_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct phytium_qspi_flash *flash = nor->priv; + struct device *dev = flash->qspi->dev; + struct phytium_qspi *qspi = flash->qspi; + u32 cmd = nor->program_opcode; + u32 addr = (u32)to; + int i; + u_char tmp[8] = {0}; + size_t mask = 0x03; + size_t mask_p = 0x07; + u8 len_p; + + if (addr & 0x03) { + dev_err(dev, "Addr not four-byte aligned!\n"); + return -EINVAL; + } + + cmd = cmd << QSPI_CMD_PORT_CMD_SHIFT; + cmd |= flash->cs << QSPI_CMD_PORT_CS_SHIFT; + cmd |= BIT(QSPI_CMD_PORT_CMD_ADDR_SHIFT); + cmd |= BIT(QSPI_CMD_PORT_DATA_TRANSFER_SHIFT); + cmd |= flash->clk_div & QSPI_CMD_PORT_SCK_SEL_MASK; + cmd |= 0x07 << QSPI_CMD_PORT_RW_NUM_SHIFT; + + switch (nor->program_opcode) { + case CMD_PP: + case CMD_QPP: + cmd &= ~(0x1 << QSPI_CMD_PORT_SEL_SHIFT); + break; + case CMD_4PP: + case CMD_4QPP: + cmd |= BIT(QSPI_CMD_PORT_SEL_SHIFT); + break; + default: + dev_err(qspi->dev, "Not support program command:%#x\n", + nor->erase_opcode); + return -EINVAL; + } + + for (i = 0; i < len/8; i++) { + phytium_qspi_write_enable(qspi, flash); + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + writel_relaxed(*(u32 *)(buf + 4 + 8 * i), + qspi->io_base + QSPI_HD_PORT_REG); + writel_relaxed(*(u32 *)(buf + 8 * i), + qspi->io_base + QSPI_LD_PORT_REG); + phytium_qspi_wait_cmd(qspi, flash); + phytium_qspi_write_disable(qspi, flash); + addr += 8; + } + + len_p = (u8)(len & mask_p); + if (len_p) { + phytium_qspi_write_enable(qspi, flash); + cmd &= ~(0x07 << QSPI_CMD_PORT_RW_NUM_SHIFT); + cmd |= (len_p - 1) << QSPI_CMD_PORT_RW_NUM_SHIFT; + writel_relaxed(cmd, qspi->io_base + QSPI_CMD_PORT_REG); + writel_relaxed(addr, qspi->io_base + QSPI_ADDR_PORT_REG); + if ((len_p - 4) > 0) { + memcpy(tmp, buf + 4 + (len / 8) * 8, len_p - 4); + writel_relaxed(*(u32 *)tmp, qspi->io_base + QSPI_HD_PORT_REG); + memcpy(tmp, buf + (len / 8) * 8, 4); + writel_relaxed(*(u32 *)tmp, qspi->io_base + QSPI_LD_PORT_REG); + } else if (len_p == 4) { + memcpy(tmp, buf + (len / 8) * 8, 4); + writel_relaxed(*(u32 *)tmp, qspi->io_base + QSPI_LD_PORT_REG); + } else { + memcpy(tmp, buf + (len / 8) * 8, len & mask); + writel_relaxed(*(u32 *)tmp, qspi->io_base + QSPI_LD_PORT_REG); + } + } + + phytium_qspi_wait_cmd(qspi, flash); + + return len; +} + static int phytium_qspi_erase(struct spi_nor *nor, loff_t offs) { struct phytium_qspi_flash *flash = nor->priv; @@ -785,6 +869,7 @@ static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, struct mtd_info *mtd; struct device_node *of_node; int ret; + bool dirmap_write = false; fwnode_property_read_u32(np, "reg", &cs_num); if (cs_num >= PHYTIUM_MAX_NORCHIP) @@ -816,6 +901,9 @@ static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, } else if (width != 1) return -EINVAL; + if (fwnode_property_read_bool(np, "dirmap-write")) + dirmap_write = true; + flash = &qspi->flash[cs_num]; flash->qspi = qspi; flash->cs = cs_num; @@ -833,7 +921,10 @@ static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, mtd = &flash->nor.mtd; flash->nor.read = phytium_qspi_read; - flash->nor.write = phytium_qspi_write; + if (dirmap_write) + flash->nor.write = phytium_qspi_write; + else + flash->nor.write = phytium_qspi_nodirmap_write; flash->nor.erase = phytium_qspi_erase; flash->nor.read_reg = phytium_qspi_read_reg; flash->nor.write_reg = phytium_qspi_write_reg; @@ -859,6 +950,9 @@ static int phytium_qspi_flash_setup(struct phytium_qspi *qspi, flash_cap |= ret; writel_relaxed(flash_cap, qspi->io_base + QSPI_FLASH_CAP_REG); + if (!dirmap_write) + writel_relaxed(WR_CFG_NODIRMAP_VALUE, qspi->io_base + QSPI_WR_CFG_REG); + flash->read_mode = PHYTIUM_FMODE_MM; ret = mtd_device_register(mtd, NULL, 0); @@ -1059,3 +1153,4 @@ module_platform_driver(phytium_qspi_driver); MODULE_AUTHOR("Mingshuai Zhu "); MODULE_DESCRIPTION("Phytium QuadSPI driver"); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index f081ec4f70ab9ee3266e8b7eba4d3ff7ed1e05d1..9b8c52228b0ea8410dc924d9584e4c226659dc2c 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1053,6 +1053,11 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { + "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | @@ -1248,6 +1253,11 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { + "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, diff --git a/drivers/net/can/phytium/phytium_can.c b/drivers/net/can/phytium/phytium_can.c index 1b2859ac106751b9bb80293d0199cf7be30c3ca5..0ae117e2b0d305204fb07e8b24d2bfa108f1269d 100644 --- a/drivers/net/can/phytium/phytium_can.c +++ b/drivers/net/can/phytium/phytium_can.c @@ -1196,4 +1196,4 @@ MODULE_AUTHOR("Cheng Quan "); MODULE_AUTHOR("Chen Baozi "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Phytium CAN controller"); - +MODULE_VERSION(CAN_PHYTIUM_DRV_VERSION); diff --git a/drivers/net/can/phytium/phytium_can.h b/drivers/net/can/phytium/phytium_can.h index 52bddeddc5cfb13b5bf7a96fcd16f00cdc118848..1374becbb08d5112972823f52a8931455ae4edf5 100644 --- a/drivers/net/can/phytium/phytium_can.h +++ b/drivers/net/can/phytium/phytium_can.h @@ -29,6 +29,8 @@ #define STOP_QUEUE_TRUE 1 #define STOP_QUEUE_FALSE 0 +#define CAN_PHYTIUM_DRV_VERSION "1.1.0" + enum phytium_can_ip_type { PHYTIUM_CAN = 0, PHYTIUM_CANFD, diff --git a/drivers/net/can/phytium/phytium_can_pci.c b/drivers/net/can/phytium/phytium_can_pci.c index a41833b7e8af67555142c6b2df54281e6983ccb4..751917fd9d2182468d765dab91c0d04cbbdb2efc 100644 --- a/drivers/net/can/phytium/phytium_can_pci.c +++ b/drivers/net/can/phytium/phytium_can_pci.c @@ -81,22 +81,18 @@ static int phytium_can_pci_probe(struct pci_dev *pdev, const struct pci_device_i pci_set_drvdata(pdev, cdev->net); - if (!pm_runtime_enabled(cdev->dev)) - pm_runtime_enable(cdev->dev); - ret = pm_runtime_get_sync(cdev->dev); - if (ret < 0) { - netdev_err(cdev->net, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); - goto err_pmdisable; - } + pm_runtime_get_noresume(cdev->dev); + pm_runtime_set_active(cdev->dev); + pm_runtime_enable(cdev->dev); ret = phytium_can_register(cdev); if (ret) - goto err; + goto err_pmdisable; return 0; err_pmdisable: pm_runtime_disable(&pdev->dev); + phytium_can_free_dev(cdev->net); err: return ret; } @@ -106,9 +102,8 @@ static void phytium_can_pci_remove(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct phytium_can_dev *cdev = netdev_priv(dev); - pm_runtime_disable(cdev->dev); - phytium_can_unregister(cdev); + pm_runtime_disable(cdev->dev); phytium_can_free_dev(cdev->net); } @@ -146,3 +141,4 @@ module_pci_driver(phytium_can_pci_driver); MODULE_AUTHOR("Cheng Quan can.clock.freq = clk_get_rate(cdev->clk); + clk_prepare_enable(cdev->clk); of_id = of_match_device(phytium_can_of_ids, &pdev->dev); if (of_id && of_id->data) @@ -145,15 +146,10 @@ static int phytium_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cdev->net); - if (!pm_runtime_enabled(cdev->dev)) - pm_runtime_enable(cdev->dev); - ret = pm_runtime_get_sync(cdev->dev); - if (ret < 0) { - netdev_err(cdev->net, "%s: pm_runtime_get failed(%d)\n", - __func__, ret); - goto out_runtime_disable; - } - + pm_runtime_get_noresume(cdev->dev); + pm_runtime_set_active(cdev->dev); + pm_runtime_enable(cdev->dev); + /*This functiong call the pm_runtime_put_sync*/ ret = phytium_can_register(cdev); if (ret) goto out_runtime_disable; @@ -183,7 +179,7 @@ static int phytium_can_plat_remove(struct platform_device *pdev) struct phytium_can_dev *cdev = netdev_priv(dev); phytium_can_unregister(cdev); - + pm_runtime_disable(cdev->dev); phytium_can_free_dev(cdev->net); return 0; @@ -229,3 +225,4 @@ module_platform_driver(phytium_can_plat_driver); MODULE_AUTHOR("Cheng Quan "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Phytium CAN driver for IO Mapped controllers"); +MODULE_VERSION(CAN_PHYTIUM_DRV_VERSION); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6fde68aa13a40de376f472df200f36752f6a247e..395861dd47b8367136eaaaa80b739d52424d045f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -42,6 +42,7 @@ source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" source "drivers/net/ethernet/cortina/Kconfig" +source "drivers/net/ethernet/phytium/Kconfig" config CX_ECAT tristate "Beckhoff CX5020 EtherCAT master support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b45d5f626b592356d222e9967c3b68a96dfb7c3f..81e974e7c19865cd95b7a013334105d5536d976d 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ +obj-$(CONFIG_NET_VENDOR_PHYTIUM) += phytium/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index bbe50e920a8e72050d9a12ec0e88d859d6953258..f3ab1e9673aa0ee85f6b5d6ccb6f42fe7a8d663c 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -85,6 +85,7 @@ #define GEM_DMACFG 0x0010 /* DMA Configuration */ #define GEM_JML 0x0048 /* Jumbo Max Length */ #define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */ +#define GEM_AXI_PIPE 0x0054 /* Axi max pipeline register*/ #define GEM_HRB 0x0080 /* Hash Bottom */ #define GEM_HRT 0x0084 /* Hash Top */ #define GEM_SA1B 0x0088 /* Specific1 Bottom */ @@ -705,11 +706,11 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_SEL_CLK 0x00000800 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 -#define MACB_CAPS_SEL_CLK_HW 0x00001000 /* LSO settings */ #define MACB_LSO_UFO_ENABLE 0x01 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5b20a14c6680039febd3b1164c71bf570c6f6333..364d99d887fc137cb0147fa6c4fc218fbcb5fde3 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -40,6 +40,9 @@ #include #include "macb.h" +#define MAX_RING_ADDR_ALLOC_TIMES 3 +#define RING_ADDR_INTERVAL 128 + #define MACB_RX_BUFFER_SIZE 128 #define RX_BUFFER_MULTIPLE 64 /* bytes */ @@ -422,11 +425,14 @@ static int phytium_mac_config(struct macb *bp) netdev_dbg(bp->dev, "phytium mac config"); - old_ncr = ncr = macb_readl(bp, NCR); - old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); + old_ncr = macb_readl(bp, NCR); + ncr = old_ncr; + old_ctrl = macb_or_gem_readl(bp, NCFGR); + ctrl = old_ctrl; ncr &= ~(GEM_BIT(ENABLE_HS_MAC) | MACB_BIT(2PT5G)); ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL) | MACB_BIT(SPD) | MACB_BIT(FD)); + if (macb_is_gem(bp)) ctrl &= ~GEM_BIT(GBE); @@ -473,6 +479,7 @@ static void phytium_usx_pcs_link_up(struct macb *bp) config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); // reset + config &= ~(GEM_BIT(SIGNAL_OK) | GEM_BIT(TX_EN)); config |= GEM_BIT(RX_SYNC_RESET); gem_writel(bp, USX_CONTROL, config); @@ -484,7 +491,7 @@ static void phytium_usx_pcs_link_up(struct macb *bp) gem_writel(bp, USX_CONTROL, config); } -static void phytium_pe220x_sel_clk(struct macb *bp) +static void phytium_gem1p0_sel_clk(struct macb *bp) { int speed = 0; dev_dbg(&bp->pdev->dev, "phytium pe220x sel clk config\n"); @@ -609,6 +616,32 @@ static void phytium_pe220x_sel_clk(struct macb *bp) return; } +static void phytium_gem2p0_sel_clk(struct macb *bp) +{ + if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { + if (bp->speed == SPEED_100 || bp->speed == SPEED_10) { + gem_writel(bp, SRC_SEL_LN, 0x1); /*0x1c04*/ + gem_writel(bp, DIV_SEL1_LN, 0x1); /*0x1c0c*/ + } + } + + if (bp->speed == SPEED_100 || bp->speed == SPEED_10) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_100M, + gem_readl(bp, HS_MAC_CONFIG))); + else if (bp->speed == SPEED_1000) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_1000M, + gem_readl(bp, HS_MAC_CONFIG))); + else if (bp->speed == SPEED_2500) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_2500M, + gem_readl(bp, HS_MAC_CONFIG))); + else if (bp->speed == SPEED_5000) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_5000M, + gem_readl(bp, HS_MAC_CONFIG))); + else if (bp->speed == SPEED_10000) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, + gem_readl(bp, HS_MAC_CONFIG))); +} + static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); @@ -664,7 +697,7 @@ static void macb_handle_link_change(struct net_device *dev) macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); /* phytium need hwclock */ - if ((bp->caps & MACB_CAPS_SEL_CLK_HW) && bp->sel_clk_hw) + if ((bp->caps & MACB_CAPS_SEL_CLK) && bp->sel_clk_hw) bp->sel_clk_hw(bp); netif_carrier_on(dev); @@ -1154,6 +1187,7 @@ static void gem_rx_refill(struct macb_queue *queue) break; } + queue->rx_prepared_head++; queue->rx_skbuff[entry] = skb; if (entry == bp->rx_ring_size - 1) @@ -1172,7 +1206,6 @@ static void gem_rx_refill(struct macb_queue *queue) dma_wmb(); desc->addr &= ~MACB_BIT(RX_USED); } - queue->rx_prepared_head++; } /* Make descriptor updates visible to hardware */ @@ -1244,6 +1277,15 @@ static int gem_rx(struct macb_queue *queue, int budget) queue->stats.rx_dropped++; break; } + + len = ctrl & bp->rx_frm_len_mask; + if (unlikely(len <= 0 || len > bp->rx_buffer_size)) { + netdev_err(bp->dev, "illegal skb len: %d\n", len); + bp->dev->stats.rx_dropped++; + queue->stats.rx_dropped++; + break; + } + skb = queue->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(bp->dev, @@ -1254,7 +1296,6 @@ static int gem_rx(struct macb_queue *queue, int budget) } /* now everything is ready for receiving packet */ queue->rx_skbuff[entry] = NULL; - len = ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); @@ -2174,27 +2215,48 @@ static void macb_free_rx_buffers(struct macb *bp) static void macb_free_consistent(struct macb *bp) { + struct macb_dma_desc *tx_ring_base = NULL; + struct macb_dma_desc *rx_ring_base = NULL; + dma_addr_t tx_ring_base_addr; + dma_addr_t rx_ring_base_addr; struct macb_queue *queue; unsigned int q; int size; bp->macbgem_ops.mog_free_rx_buffers(bp); + queue = bp->queues; + if (queue->tx_ring) { + tx_ring_base = queue->tx_ring; + tx_ring_base_addr = queue->tx_ring_dma; + } + if (queue->rx_ring) { + rx_ring_base = queue->rx_ring; + rx_ring_base_addr = queue->rx_ring_dma; + } + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { kfree(queue->tx_skb); queue->tx_skb = NULL; - if (queue->tx_ring) { - size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; - dma_free_coherent(&bp->pdev->dev, size, - queue->tx_ring, queue->tx_ring_dma); + if (queue->tx_ring) queue->tx_ring = NULL; - } - if (queue->rx_ring) { - size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; - dma_free_coherent(&bp->pdev->dev, size, - queue->rx_ring, queue->rx_ring_dma); + if (queue->rx_ring) queue->rx_ring = NULL; - } + } + + if (tx_ring_base) { + size = bp->num_queues * (TX_RING_BYTES(bp) + + bp->tx_bd_rd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(&bp->pdev->dev, size, tx_ring_base, + tx_ring_base_addr); + } + if (rx_ring_base) { + size = bp->num_queues * (RX_RING_BYTES(bp) + + bp->rx_bd_rd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(&bp->pdev->dev, size, rx_ring_base, + rx_ring_base_addr); } } @@ -2234,17 +2296,87 @@ static int macb_alloc_rx_buffers(struct macb *bp) return 0; } +static int macb_queue_phyaddr_check(struct macb *bp, dma_addr_t ring_base_addr, + int offset) +{ + u32 bus_addr_high; + int i; + + bus_addr_high = upper_32_bits(ring_base_addr); + for (i = 1; i < bp->num_queues; i++) { + ring_base_addr += offset; + if (bus_addr_high != upper_32_bits(ring_base_addr)) + return -1; + } + + return 0; +} + static int macb_alloc_consistent(struct macb *bp) { + struct macb_dma_desc *tx_ring_base, *rx_ring_base; + dma_addr_t tx_ring_base_addr, rx_ring_base_addr; struct macb_queue *queue; + int tx_offset, rx_offset; + int tx_size, rx_size; unsigned int q; + int ret, i; int size; + tx_offset = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch + + RING_ADDR_INTERVAL; + tx_size = bp->num_queues * tx_offset; + for (i = 0; i < MAX_RING_ADDR_ALLOC_TIMES + 1; i++) { + if (i == MAX_RING_ADDR_ALLOC_TIMES) + return -ENOMEM; + + tx_ring_base = dma_alloc_coherent(&bp->pdev->dev, tx_size, + &tx_ring_base_addr, + GFP_KERNEL); + if (!tx_ring_base) + continue; + + ret = macb_queue_phyaddr_check(bp, tx_ring_base_addr, + tx_offset); + if (ret) { + dma_free_coherent(&bp->pdev->dev, tx_size, tx_ring_base, + tx_ring_base_addr); + continue; + } else { + break; + } + } + + rx_offset = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch + + RING_ADDR_INTERVAL; + rx_size = bp->num_queues * rx_offset; + for (i = 0; i < MAX_RING_ADDR_ALLOC_TIMES + 1; i++) { + if (i == MAX_RING_ADDR_ALLOC_TIMES) { + dma_free_coherent(&bp->pdev->dev, tx_size, tx_ring_base, + tx_ring_base_addr); + return -ENOMEM; + } + + rx_ring_base = dma_alloc_coherent(&bp->pdev->dev, rx_size, + &rx_ring_base_addr, + GFP_KERNEL); + if (!rx_ring_base) + continue; + + ret = macb_queue_phyaddr_check(bp, rx_ring_base_addr, + rx_offset); + if (ret) { + dma_free_coherent(&bp->pdev->dev, rx_size, rx_ring_base, + rx_ring_base_addr); + continue; + } else { + break; + } + } + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; - queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, - &queue->tx_ring_dma, - GFP_KERNEL); + queue->tx_ring = (void *)tx_ring_base + q * tx_offset; + queue->tx_ring_dma = tx_ring_base_addr + q * tx_offset; if (!queue->tx_ring) goto out_err; netdev_dbg(bp->dev, @@ -2253,13 +2385,12 @@ static int macb_alloc_consistent(struct macb *bp) queue->tx_ring); size = bp->tx_ring_size * sizeof(struct macb_tx_skb); - queue->tx_skb = kmalloc(size, GFP_KERNEL); + queue->tx_skb = kzalloc(size, GFP_KERNEL); if (!queue->tx_skb) goto out_err; - size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; - queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, - &queue->rx_ring_dma, GFP_KERNEL); + queue->rx_ring = (void *)rx_ring_base + q * rx_offset; + queue->rx_ring_dma = rx_ring_base_addr + q * rx_offset; if (!queue->rx_ring) goto out_err; netdev_dbg(bp->dev, @@ -2293,6 +2424,15 @@ static void gem_init_rings(struct macb *bp) queue->tx_head = 0; queue->tx_tail = 0; + for (i = 0; i < bp->rx_ring_size; i++) { + desc = macb_rx_desc(queue, i); + desc->ctrl = 0; + /* make sure ctrl is cleared first, + * and bit RX_USED is set to avoid a race. + */ + dma_wmb(); + desc->addr |= MACB_BIT(RX_USED); + } queue->rx_tail = 0; queue->rx_prepared_head = 0; @@ -2496,11 +2636,12 @@ static void macb_init_hw(struct macb *bp) if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) gem_writel(bp, JML, bp->jumbo_max_len); + gem_writel(bp, AXI_PIPE, 0x1010); if (bp->phy_interface == PHY_INTERFACE_MODE_USXGMII || bp->phy_interface == PHY_INTERFACE_MODE_5GBASER || bp->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { /* phytium need hwclock */ - if (bp->caps & MACB_CAPS_SEL_CLK_HW) + if (bp->caps & MACB_CAPS_SEL_CLK) bp->sel_clk_hw(bp); phytium_mac_config(bp); if (bp->link) @@ -3409,6 +3550,72 @@ static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) return ret; } +static int macb_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *kset) +{ + int ret = 0; + struct macb *bp = netdev_priv(ndev); + u32 supported = 0; + u32 advertising = 0; + + if (!ndev->phydev) { + if (bp->phy_interface == PHY_INTERFACE_MODE_USXGMII || + bp->phy_interface == PHY_INTERFACE_MODE_XGMII) { + supported = SUPPORTED_10000baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_10000baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + } else if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { + supported = SUPPORTED_2500baseX_Full | SUPPORTED_1000baseT_Full + | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full + | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + } else if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) { + supported = SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_10baseT_Full | SUPPORTED_TP; + advertising = ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full + | ADVERTISED_10baseT_Full | ADVERTISED_TP; + } else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) { + supported = SUPPORTED_100baseT_Full + | SUPPORTED_10baseT_Full | SUPPORTED_TP; + advertising = ADVERTISED_100baseT_Full + | ADVERTISED_10baseT_Full | ADVERTISED_TP; + } + + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.advertising, + advertising); + kset->base.speed = bp->speed; + kset->base.duplex = bp->duplex; + } else { + phy_ethtool_get_link_ksettings(ndev, kset); + } + + return ret; +} + +static int macb_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *kset) +{ + int ret = 0; + + if (!ndev->phydev) { + netdev_err(ndev, "fixed link interface not supported set link\n"); + ret = -EOPNOTSUPP; + } else { + phy_ethtool_set_link_ksettings(ndev, kset); + } + + return ret; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, @@ -3430,8 +3637,8 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link_ksettings = macb_get_link_ksettings, + .set_link_ksettings = macb_set_link_ksettings, .get_ringparam = macb_get_ringparam, .set_ringparam = macb_set_ringparam, .get_rxnfc = gem_get_rxnfc, @@ -4254,17 +4461,30 @@ static const struct macb_config zynq_config = { .init = macb_init, }; -static const struct macb_config phytium_pe220x_config = { +static const struct macb_config phytium_gem1p0_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | - MACB_CAPS_SEL_CLK_HW, + MACB_CAPS_SEL_CLK, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, .jumbo_max_len = 16360, - .sel_clk_hw = phytium_pe220x_sel_clk, + .sel_clk_hw = phytium_gem1p0_sel_clk, +}; + +static const struct macb_config phytium_gem2p0_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | + MACB_CAPS_JUMBO | + MACB_CAPS_GEM_HAS_PTP | + MACB_CAPS_BD_RD_PREFETCH | + MACB_CAPS_SEL_CLK, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .jumbo_max_len = 10240, + .sel_clk_hw = phytium_gem2p0_sel_clk, }; static const struct of_device_id macb_dt_ids[] = { @@ -4282,7 +4502,9 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,emac", .data = &emac_config }, { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, - { .compatible = "cdns,phytium-gem", .data = &phytium_pe220x_config }, + { .compatible = "cdns,phytium-gem", .data = &phytium_gem1p0_config }, /* old version */ + { .compatible = "cdns,phytium-gem-1.0", .data = &phytium_gem1p0_config }, + { .compatible = "cdns,phytium-gem-2.0", .data = &phytium_gem2p0_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); @@ -4290,7 +4512,7 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id macb_acpi_ids[] = { - { .id = "PHYT0036", .driver_data = (kernel_ulong_t)&phytium_pe220x_config }, + { .id = "PHYT0036", .driver_data = (kernel_ulong_t)&phytium_gem1p0_config }, { } }; @@ -4421,6 +4643,7 @@ static int macb_probe(struct platform_device *pdev) bp->tsu_clk = tsu_clk; if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; + if (macb_config) bp->sel_clk_hw = macb_config->sel_clk_hw; diff --git a/drivers/net/ethernet/phytium/Kconfig b/drivers/net/ethernet/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..03d4afbd1a3b2e03146b14743d32b467df036ac7 --- /dev/null +++ b/drivers/net/ethernet/phytium/Kconfig @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Phytium device configuration +# + +config NET_VENDOR_PHYTIUM + bool "Phytium devices" + depends on HAS_IOMEM + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + remaining Cadence network card questions. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_PHYTIUM + +config PHYTMAC + tristate "Phytium GMAC support" + depends on HAS_DMA + #depends on PTP_1588_CLOCK_OPTIONAL + select PHYLINK + select CRC32 + help + If you have a network (Ethernet) controller of this type, say Y + or M here. + + To compile this driver as a module, choose M here: the module + will be phytmac. + +config PHYTMAC_ENABLE_PTP + bool "Enable IEEE 1588 hwstamp" + depends on PHYTMAC + depends on PTP_1588_CLOCK + default y + help + Enable IEEE 1588 PTP support for PHYTMAC. + +config PHYTMAC_PLATFORM + tristate "Phytmac Platform support" + depends on PHYTMAC + help + This is Platform driver. + + To compile this driver as a module, choose M here: the module + will be called phytmac_platform. + +config PHYTMAC_PCI + tristate "Phytmac PCI support" + depends on PHYTMAC && PCI + help + This is PCI driver. + + To compile this driver as a module, choose M here: the module + will be called phytmac_pci. + +endif # NET_VENDOR_PHYTIUM diff --git a/drivers/net/ethernet/phytium/Makefile b/drivers/net/ethernet/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6e710d4d54b6641bc1679405d6909827cfd09eeb --- /dev/null +++ b/drivers/net/ethernet/phytium/Makefile @@ -0,0 +1,17 @@ + +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Phytium network device drivers. +# +# + +obj-$(CONFIG_PHYTMAC) += phytmac.o + +phytmac-objs := phytmac_main.o phytmac_ethtool.o phytmac_v1.o phytmac_v2.o +phytmac-$(CONFIG_PHYTMAC_ENABLE_PTP) += phytmac_ptp.o + +obj-$(CONFIG_PHYTMAC_PLATFORM) += phytmac-platform.o +phytmac-platform-objs := phytmac_platform.o + +obj-$(CONFIG_PHYTMAC_PCI) += phytmac-pci.o +phytmac-pci-objs := phytmac_pci.o diff --git a/drivers/net/ethernet/phytium/phytmac.h b/drivers/net/ethernet/phytium/phytmac.h new file mode 100644 index 0000000000000000000000000000000000000000..ca8cd012d4af246de2ccc1ed1fa1c872eebdffb3 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac.h @@ -0,0 +1,606 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_H +#define _PHYTMAC_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHYTMAC_DRV_NAME "phytium-mac" +#define PHYTMAC_DRV_DESC "PHYTIUM Ethernet Driver" +#define PHYTMAC_DRIVER_VERSION "1.0.5" +#define PHYTMAC_DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_INTR | \ + NETIF_MSG_HW |\ + NETIF_MSG_PKTDATA) + +#define IRQ_TYPE_INT 0 +#define IRQ_TYPE_MSI 1 +#define IRQ_TYPE_INTX 2 + +#define PHYTMAC_MAX_QUEUES 8 +#define DEFAULT_DMA_BURST_LENGTH 16 +#define DEFAULT_JUMBO_MAX_LENGTH 10240 +#define PHYTMAC_MAX_TX_LEN 16320 +#define PHYTMAC_MIN_TX_LEN 64 +#define DEFAULT_TX_RING_SIZE 512 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define MAX_RX_RING_SIZE 4096 +#define MIN_TX_RING_SIZE 64 +#define MIN_RX_RING_SIZE 64 +#define DEFAULT_TX_DESC_MIN_FREE 64 + +#define MEMORY_SIZE 4096 +#define MHU_SIZE 0x20 + +#define PHYTMAC_POWEROFF 1 +#define PHYTMAC_POWERON 2 + +#define PHYTMAC_WOL_MAGIC_PACKET 1 + +#define DEFAULT_MSG_RING_SIZE 16 + +#define PHYTMAC_CAPS_JUMBO 0x00000001 +#define PHYTMAC_CAPS_PTP 0x00000002 +#define PHYTMAC_CAPS_BD_RD_PREFETCH 0x00000004 +#define PHYTMAC_CAPS_PCS 0x00000008 +#define PHYTMAC_CAPS_LSO 0x00000010 +#define PHYTMAC_CAPS_SG_DISABLED 0x00000020 +#define PHYTMAC_CAPS_TAILPTR 0x00000040 +#define PHYTMAC_CAPS_START 0x00000080 +#define PHYTMAC_CAPS_NO_WOL 0x0000100 +#define PHYTMAC_CAPS_LPI 0x0000400 +#define PHYTMAC_CAPS_MSG 0x0000800 + +#define PHYTMAC_TX 0x1 +#define PHYTMAC_RX 0x2 + +#define PHYTMAC_GREGS_LEN 16 + +#define PHYTMAC_MTU_MIN_SIZE ETH_MIN_MTU + +#define EQUAL(a, b) ((a) == (b)) + +#define TXD_USE_COUNT(_pdata, s) DIV_ROUND_UP((s), (_pdata)->max_tx_length) + +/* Bit manipulation macros */ +#define PHYTMAC_BIT(_field) \ + (1 << PHYTMAC_##_field##_INDEX) + +#define PHYTMAC_BITS(_field, value) \ + (((value) & ((1 << PHYTMAC_##_field##_WIDTH) - 1)) \ + << PHYTMAC_##_field##_INDEX) + +#define PHYTMAC_GET_BITS(_var, _field) \ + (((_var) >> (PHYTMAC_##_field##_INDEX)) \ + & ((0x1 << (PHYTMAC_##_field##_WIDTH)) - 1)) + +#define PHYTMAC_SET_BITS(_var, _field, _val) \ + (((_var) & ~(((1 << PHYTMAC_##_field##_WIDTH) - 1) \ + << PHYTMAC_##_field##_INDEX)) \ + | (((_val) & ((1 << PHYTMAC_##_field##_WIDTH) - 1)) \ + << PHYTMAC_##_field##_INDEX)) + +#define PHYTMAC_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_READ_BITS(_pdata, _reg, _field) \ + PHYTMAC_GET_BITS(PHYTMAC_READ((_pdata), _reg), _field) + +#define PHYTMAC_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_MSG_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mac_regs + (_reg)) + +#define PHYTMAC_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mac_regs + (_reg)) + +#define LSO_UFO 1 +#define LSO_TSO 2 + +#define PHYTMAC_INT_TX_COMPLETE 0x1 +#define PHYTMAC_INT_TX_ERR 0x2 +#define PHYTMAC_INT_RX_COMPLETE 0x4 +#define PHYTMAC_INT_RX_OVERRUN 0x8 +#define PHYTMAC_INT_RX_DESC_FULL 0x10 +#define PHYTMAC_RX_INT_FLAGS (PHYTMAC_INT_RX_COMPLETE) +#define PHYTMAC_TX_INT_FLAGS (PHYTMAC_INT_TX_COMPLETE \ + | PHYTMAC_INT_TX_ERR) + +#define PHYTMAC_WAKE_MAGIC 0x00000001 +#define PHYTMAC_WAKE_ARP 0x00000002 +#define PHYTMAC_WAKE_UCAST 0x00000004 +#define PHYTMAC_WAKE_MCAST 0x00000008 + +struct packet_info { + int lso; + int desc_cnt; + int hdrlen; + int nocrc; + u32 mss; + u32 seq; +}; + +#define DEV_TYPE_PLATFORM 0 +#define DEV_TYPE_PCI 1 + +struct phytmac_statistics { + char stat_string[ETH_GSTRING_LEN]; +}; + +#define STAT_TITLE(title) { \ + .stat_string = title, \ +} + +static const struct phytmac_statistics phytmac_statistics[] = { + STAT_TITLE("tx_octets"), + STAT_TITLE("tx_packets"), + STAT_TITLE("tx_bcast_packets"), + STAT_TITLE("tx_mcase_packets"), + STAT_TITLE("tx_pause_packets"), + STAT_TITLE("tx_64_byte_packets"), + STAT_TITLE("tx_65_127_byte_packets"), + STAT_TITLE("tx_128_255_byte_packets"), + STAT_TITLE("tx_256_511_byte_packets"), + STAT_TITLE("tx_512_1023_byte_packets"), + STAT_TITLE("tx_1024_1518_byte_packets"), + STAT_TITLE("tx_more_than_1518_byte_packets"), + STAT_TITLE("tx_underrun"), + STAT_TITLE("tx_single_collisions"), + STAT_TITLE("tx_multiple_collisions"), + STAT_TITLE("tx_excessive_collisions"), + STAT_TITLE("tx_late_collisions"), + STAT_TITLE("tx_deferred"), + STAT_TITLE("tx_carrier_sense_errors"), + STAT_TITLE("rx_octets"), + STAT_TITLE("rx_packets"), + STAT_TITLE("rx_bcast_packets"), + STAT_TITLE("rx_mcast_packets"), + STAT_TITLE("rx_pause_packets"), + STAT_TITLE("rx_64_byte_packets"), + STAT_TITLE("rx_65_127_byte_packets"), + STAT_TITLE("rx_128_255_byte_packets"), + STAT_TITLE("rx_256_511_byte_packets"), + STAT_TITLE("rx_512_1023_byte_packets"), + STAT_TITLE("rx_1024_1518_byte_packets"), + STAT_TITLE("rx_more_than_1518_byte_packets"), + STAT_TITLE("rx_undersized_packets"), + STAT_TITLE("rx_oversize_packets"), + STAT_TITLE("rx_jabbers"), + STAT_TITLE("rx_fcs_errors"), + STAT_TITLE("rx_length_errors"), + STAT_TITLE("rx_symbol_errors"), + STAT_TITLE("rx_alignment_errors"), + STAT_TITLE("rx_resource_over"), + STAT_TITLE("rx_overruns"), + STAT_TITLE("rx_iphdr_csum_errors"), + STAT_TITLE("rx_tcp_csum_errors"), + STAT_TITLE("rx_udp_csum_errors"), +}; + +#define PHYTMAC_STATS_LEN ARRAY_SIZE(phytmac_statistics) + +/* per queue statistics, each should be unsigned long type */ +struct phytmac_queue_stats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long rx_dropped; + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +static const struct phytmac_statistics queue_statistics[] = { + STAT_TITLE("rx_packets"), + STAT_TITLE("rx_bytes"), + STAT_TITLE("rx_dropped"), + STAT_TITLE("tx_packets"), + STAT_TITLE("tx_bytes"), + STAT_TITLE("tx_dropped"), +}; + +#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics) + +struct phytmac_config { + struct phytmac_hw_if *hw_if; + u32 caps; + u32 tsu_rate; + u16 queue_num; +}; + +struct phytmac_stats { + u64 tx_octets; + u64 tx_packets; + u64 tx_bcast_packets; + u64 tx_mcase_packets; + u64 tx_pause_packets; + u64 tx_64_byte_packets; + u64 tx_65_127_byte_packets; + u64 tx_128_255_byte_packets; + u64 tx_256_511_byte_packets; + u64 tx_512_1023_byte_packets; + u64 tx_1024_1518_byte_packets; + u64 tx_more_than_1518_byte_packets; + u64 tx_underrun; + u64 tx_single_collisions; + u64 tx_multiple_collisions; + u64 tx_excessive_collisions; + u64 tx_late_collisions; + u64 tx_deferred; + u64 tx_carrier_sense_errors; + u64 rx_octets; + u64 rx_packets; + u64 rx_bcast_packets; + u64 rx_mcast_packets; + u64 rx_pause_packets; + u64 rx_64_byte_packets; + u64 rx_65_127_byte_packets; + u64 rx_128_255_byte_packets; + u64 rx_256_511_byte_packets; + u64 rx_512_1023_byte_packets; + u64 rx_1024_1518_byte_packets; + u64 rx_more_than_1518_byte_packets; + u64 rx_undersized_packets; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_fcs_errors; + u64 rx_length_errors; + u64 rx_symbol_errors; + u64 rx_alignment_errors; + u64 rx_resource_over; + u64 rx_overruns; + u64 rx_iphdr_csum_errors; + u64 rx_tcp_csum_errors; + u64 rx_udp_csum_errors; +}; + +struct ts_incr { + u32 sub_ns; + u32 ns; +}; + +enum phytmac_bd_control { + TS_DISABLED, + TS_FRAME_PTP_EVENT_ONLY, + TS_ALL_PTP_FRAMES, + TS_ALL_FRAMES, +}; + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP +struct phytmac_dma_desc { + u32 desc0; + u32 desc1; + u32 desc2; + u32 desc3; + u32 desc4; + u32 desc5; +}; +#else +struct phytmac_dma_desc { + u32 desc0; + u32 desc1; + u32 desc2; + u32 desc3; +}; +#endif + +struct phytmac_tx_skb { + struct sk_buff *skb; + dma_addr_t addr; + size_t length; + bool mapped_as_page; +}; + +struct phytmac_tx_ts { + struct sk_buff *skb; + u32 ts_1; + u32 ts_2; +}; + +struct phytmac_rx_buffer { + dma_addr_t addr; + struct page *page; + __u16 page_offset; + __u16 pagecnt_bias; +}; + +struct phytmac_queue { + struct phytmac *pdata; + int irq; + int index; + + /* tx queue info */ + unsigned int tx_head; + unsigned int tx_tail; + unsigned int tx_xmit_more; + dma_addr_t tx_ring_addr; + struct work_struct tx_error_task; + struct napi_struct tx_napi; + struct phytmac_dma_desc *tx_ring; + struct phytmac_tx_skb *tx_skb; + /* Lock to protect tx */ + spinlock_t tx_lock; + + /* rx queue info */ + dma_addr_t rx_ring_addr; + unsigned int rx_head; + unsigned int rx_tail; + unsigned int rx_next_to_alloc; + struct phytmac_dma_desc *rx_ring; + struct phytmac_rx_buffer *rx_buffer_info; + struct napi_struct rx_napi; + struct phytmac_queue_stats stats; + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP + struct work_struct tx_ts_task; + unsigned int tx_ts_head; + unsigned int tx_ts_tail; + struct phytmac_tx_ts tx_timestamps[128]; +#endif +}; + +struct ethtool_rx_fs_item { + struct ethtool_rx_flow_spec fs; + struct list_head list; +}; + +struct ethtool_rx_fs_list { + struct list_head list; + unsigned int count; +}; + +struct phytmac_msg { + struct completion tx_msg_comp; + u32 tx_msg_ring_size; + u32 rx_msg_ring_size; + u32 tx_msg_head; + u32 tx_msg_tail; + u32 rx_msg_head; + u32 rx_msg_tail; + /* Lock to protect msg */ + spinlock_t msg_lock; +}; + +struct ts_ctrl { + int tx_control; + int rx_control; + int one_step; +}; + +struct phytmac { + void __iomem *mac_regs; + void __iomem *msg_regs; + void __iomem *mhu_regs; + struct pci_dev *pcidev; + struct platform_device *platdev; + struct net_device *ndev; + struct device *dev; + struct ncsi_dev *ncsidev; + struct fwnode_handle *fwnode; + struct phytmac_hw_if *hw_if; + struct phytmac_msg msg_ring; + int dev_type; + int sfp_irq; + int irq_type; + int queue_irq[PHYTMAC_MAX_QUEUES]; + u32 rx_irq_mask; + u32 tx_irq_mask; + u32 msg_enable; + u32 capacities; + u32 max_tx_length; + u32 min_tx_length; + u32 jumbo_len; + u32 wol; + u32 lpi; + u32 power_state; + struct work_struct restart_task; + /* Lock to protect mac config */ + spinlock_t lock; + /* Lock to protect msg tx */ + spinlock_t msg_lock; + u32 rx_ring_size; + u32 tx_ring_size; + u32 dma_data_width; + u32 dma_addr_width; + u32 dma_burst_length; + int rx_bd_prefetch; + int tx_bd_prefetch; + int rx_buffer_len; + u16 queues_max_num; + u16 queues_num; + struct phytmac_queue queues[PHYTMAC_MAX_QUEUES]; + struct phytmac_stats stats; + u64 ethtool_stats[PHYTMAC_STATS_LEN + + QUEUE_STATS_LEN * PHYTMAC_MAX_QUEUES]; + int use_ncsi; + int use_mii; + struct mii_bus *mii_bus; + struct phylink *phylink; + int pause; + phy_interface_t phy_interface; + int speed; + int duplex; + int autoneg; + /* 1588 */ + spinlock_t ts_clk_lock; /* clock locking */ + unsigned int ts_rate; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct ts_incr ts_incr; + struct hwtstamp_config ts_config; + struct ts_ctrl ts_ctrl; + /* RX queue filer rule set */ + struct ethtool_rx_fs_list rx_fs_list; + /* Lock to protect fs */ + spinlock_t rx_fs_lock; + unsigned int max_rx_fs; +}; + +struct phytmac_hw_if { + int (*init_msg_ring)(struct phytmac *pdata); + int (*init_hw)(struct phytmac *pdata); + void (*reset_hw)(struct phytmac *pdata); + int (*init_ring_hw)(struct phytmac *pdata); + int (*poweron)(struct phytmac *pdata, int on); + int (*set_wol)(struct phytmac *pdata, int wake); + int (*get_feature)(struct phytmac *pdata); + int (*set_mac_address)(struct phytmac *pdata, const u8 *addr); + int (*get_mac_address)(struct phytmac *pdata, u8 *addr); + int (*enable_promise)(struct phytmac *pdata, int enable); + int (*enable_multicast)(struct phytmac *pdata, int enable); + int (*set_hash_table)(struct phytmac *pdata, unsigned long *mc_filter); + int (*enable_rx_csum)(struct phytmac *pdata, int enable); + int (*enable_tx_csum)(struct phytmac *pdata, int enable); + int (*enable_pause)(struct phytmac *pdata, int enable); + int (*enable_autoneg)(struct phytmac *pdata, int enable); + int (*enable_network)(struct phytmac *pdata, int enable, int rx_tx); + void (*get_stats)(struct phytmac *pdata); + void (*get_regs)(struct phytmac *pdata, u32 *reg_buff); + int (*set_speed)(struct phytmac *pdata); + + void (*mac_config)(struct phytmac *pdata, u32 mode, + const struct phylink_link_state *state); + int (*mac_linkup)(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex); + int (*mac_linkdown)(struct phytmac *pdata); + int (*pcs_linkup)(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex); + int (*pcs_linkdown)(struct phytmac *pdata); + unsigned int (*get_link)(struct phytmac *pdata, phy_interface_t interface); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct phytmac *pdata); + int (*config_tx_coalesce)(struct phytmac *pdata); + + /* ethtool nfc */ + int (*add_fdir_entry)(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow); + int (*del_fdir_entry)(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow); + + /* mido ops */ + int (*enable_mdio_control)(struct phytmac *pdata, int enable); + int (*mdio_read)(struct phytmac *pdata, int mii_id, int regnum, int is_c45); + int (*mdio_write)(struct phytmac *pdata, int mii_id, + int regnum, int is_c45, u16 data); + void (*enable_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*disable_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*clear_irq)(struct phytmac *pdata, int queue_index, u32 mask); + void (*mask_irq)(struct phytmac *pdata, int queue_index, u32 mask); + unsigned int (*get_irq)(struct phytmac *pdata, int queue_index); + unsigned int (*get_intx_mask)(struct phytmac *pdata); + unsigned int (*tx_map)(struct phytmac_queue *pdata, u32 tx_tail, + struct packet_info *packet); + void (*init_rx_map)(struct phytmac_queue *queue, u32 index); + unsigned int (*rx_map)(struct phytmac_queue *queue, u32 index, dma_addr_t addr); + void (*transmit)(struct phytmac_queue *queue); + void (*restart)(struct phytmac *pdata); + int (*tx_complete)(const struct phytmac_dma_desc *desc); + bool (*rx_complete)(const struct phytmac_dma_desc *desc); + int (*get_rx_pkt_len)(struct phytmac *pdata, const struct phytmac_dma_desc *desc); + bool (*rx_checksum)(const struct phytmac_dma_desc *desc); + void (*set_desc_rxused)(struct phytmac_dma_desc *desc); + bool (*rx_single_buffer)(const struct phytmac_dma_desc *desc); + bool (*rx_pkt_start)(const struct phytmac_dma_desc *desc); + bool (*rx_pkt_end)(const struct phytmac_dma_desc *desc); + unsigned int (*zero_rx_desc_addr)(struct phytmac_dma_desc *desc); + void (*clear_rx_desc)(struct phytmac_queue *queue, int begin, int end); + void (*clear_tx_desc)(struct phytmac_queue *queue); + /* ptp */ + void (*init_ts_hw)(struct phytmac *pdata); + void (*get_time)(struct phytmac *pdata, struct timespec64 *ts); + void (*set_time)(struct phytmac *pdata, time64_t sec, long nsec); + int (*set_ts_config)(struct phytmac *pdata, struct ts_ctrl *ctrl); + void (*clear_time)(struct phytmac *pdata); + int (*set_incr)(struct phytmac *pdata, struct ts_incr *incr); + int (*adjust_fine)(struct phytmac *pdata, long ppm, bool negative); + int (*adjust_time)(struct phytmac *pdata, s64 delta, int neg); + int (*ts_valid)(struct phytmac *pdata, struct phytmac_dma_desc *desc, + int direction); + void (*get_timestamp)(struct phytmac *pdata, u32 ts_1, u32 ts_2, + struct timespec64 *ts); + unsigned int (*get_ts_rate)(struct phytmac *pdata); +}; + +/* mhu */ +#define PHYTMAC_MHU_AP_CPP_STAT 0x00 +#define PHYTMAC_MHU_AP_CPP_SET 0x04 +#define PHYTMAC_MHU_CPP_DATA0 0x18 +#define PHYTMAC_MHU_CPP_DATA1 0x1c + +#define PHYTMAC_MHU_STAT_BUSY_INDEX 0 +#define PHYTMAC_MHU_STAT_BUSY_WIDTH 1 + +#define PHYTMAC_MHU_SET_INDEX 0 +#define PHYTMAC_MHU_SET_WIDTH 1 + +#define PHYTMAC_DATA0_FREE_INDEX 0 +#define PHYTMAC_DATA0_FREE_WIDTH 1 +#define PHYTMAC_DATA0_DOMAIN_INDEX 1 +#define PHYTMAC_DATA0_DOMAIN_WIDTH 7 +#define PHYTMAC_DATA0_MSG_INDEX 8 +#define PHYTMAC_DATA0_MSG_WIDTH 8 +#define PHYTMAC_MSG_PM 0x04 +#define PHYTMAC_DATA0_PRO_INDEX 16 +#define PHYTMAC_DATA0_PRO_WIDTH 8 +#define PHYTMAC_PRO_ID 0x11 +#define PHYTMAC_DATA0_PAYLOAD_INDEX 24 +#define PHYTMAC_DATA0_PAYLOAD_WIDTH 8 + +#define PHYTMAC_DATA1_STAT_INDEX 0 +#define PHYTMAC_DATA1_STAT_WIDTH 28 +#define PHYTMAC_STATON 8 +#define PHYTMAC_STATOFF 0 +#define PHYTMAC_DATA1_MUST0_INDEX 28 +#define PHYTMAC_DATA1_MUST0_WIDTH 2 +#define PHYTMAC_DATA1_STATTYPE_INDEX 30 +#define PHYTMAC_DATA1_STATTYPE_WIDTH 1 +#define PHYTMAC_STATTYPE 0x1 +#define PHYTMAC_DATA1_MUST1_INDEX 31 +#define PHYTMAC_DATA1_MUST1_WIDTH 1 + +#define PHYTMAC_MHU_READ(_pdata, _reg) \ + __raw_readl((_pdata)->mhu_regs + (_reg)) +#define PHYTMAC_MHU_WRITE(_pdata, _reg, _val) \ + __raw_writel((_val), (_pdata)->mhu_regs + (_reg)) +#define PHYTMAC_READ_STAT(pdata) PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_AP_CPP_STAT) +#define PHYTMAC_READ_DATA0(pdata) PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA0) +#define PHYTMAC_TIMEOUT 1000000000 /* in usecs */ + +#define PHYTMAC_GFP_FLAGS \ + (GFP_ATOMIC | __GFP_NOWARN | GFP_DMA | __GFP_DMA32) +#define PHYTMAC_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#define PHYTMAC_SKB_PAD (NET_SKB_PAD) + +#define PHYTMAC_RXBUFFER_2048 2048 +#define PHYTMAC_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(PHYTMAC_RXBUFFER_2048) - PHYTMAC_SKB_PAD) + +#define PHYTMAC_RX_PAGE_ORDER 0 +#define PHYTMAC_RX_PAGE_SIZE (PAGE_SIZE << PHYTMAC_RX_PAGE_ORDER) + +struct phytmac_tx_skb *phytmac_get_tx_skb(struct phytmac_queue *queue, + unsigned int index); +struct phytmac_dma_desc *phytmac_get_tx_desc(struct phytmac_queue *queue, + unsigned int index); +struct phytmac_dma_desc *phytmac_get_rx_desc(struct phytmac_queue *queue, + unsigned int index); +void phytmac_set_ethtool_ops(struct net_device *netdev); +int phytmac_drv_probe(struct phytmac *pdata); +int phytmac_drv_remove(struct phytmac *pdata); +int phytmac_drv_suspend(struct phytmac *pdata); +int phytmac_drv_resume(struct phytmac *pdata); +struct phytmac *phytmac_alloc_pdata(struct device *dev); +void phytmac_free_pdata(struct phytmac *pdata); +int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size); +void phytmac_set_bios_wol_enable(struct phytmac *pdata, u32 wol); +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_ethtool.c b/drivers/net/ethernet/phytium/phytmac_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..354068ef98f42fdf02c637d87c256dfb8f08c3ba --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ethtool.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" +#include "phytmac_ptp.h" + +static void phytmac_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, + u64 *data) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + int i = PHYTMAC_STATS_LEN, j; + int q; + struct phytmac_queue *queue; + unsigned long *stat; + + hw_if->get_stats(pdata); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) + for (j = 0, stat = &queue->stats.rx_packets; j < QUEUE_STATS_LEN; ++j, ++stat) + pdata->ethtool_stats[i++] = *stat; + + memcpy(data, &pdata->ethtool_stats, sizeof(u64) + * (PHYTMAC_STATS_LEN + QUEUE_STATS_LEN * pdata->queues_num)); +} + +static inline int phytmac_get_sset_count(struct net_device *ndev, int sset) +{ + struct phytmac *pdata = netdev_priv(ndev); + + switch (sset) { + case ETH_SS_STATS: + return PHYTMAC_STATS_LEN + QUEUE_STATS_LEN * pdata->queues_num; + default: + return -EOPNOTSUPP; + } +} + +static void phytmac_get_ethtool_strings(struct net_device *ndev, u32 sset, u8 *p) +{ + char stat_string[ETH_GSTRING_LEN]; + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + unsigned int i; + unsigned int q; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < PHYTMAC_STATS_LEN; i++, p += ETH_GSTRING_LEN) + memcpy(p, phytmac_statistics[i].stat_string, + ETH_GSTRING_LEN); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { + snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", + q, queue_statistics[i].stat_string); + memcpy(p, stat_string, ETH_GSTRING_LEN); + } + } + break; + } +} + +static inline int phytmac_get_regs_len(struct net_device *ndev) +{ + return PHYTMAC_GREGS_LEN; +} + +static void phytmac_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, + void *p) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 *regs_buff = p; + + memset(p, 0, PHYTMAC_GREGS_LEN * sizeof(u32)); + + hw_if->get_regs(pdata, regs_buff); +} + +static void phytmac_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct phytmac *pdata = netdev_priv(ndev); + + phylink_ethtool_get_wol(pdata->phylink, wol); + + if (pdata->wol & PHYTMAC_WAKE_MAGIC) { + wol->wolopts |= WAKE_MAGIC; + wol->supported |= WAKE_MAGIC; + } + if (pdata->wol & PHYTMAC_WAKE_ARP) { + wol->wolopts |= WAKE_ARP; + wol->supported |= WAKE_ARP; + } + if (pdata->wol & PHYTMAC_WAKE_UCAST) { + wol->wolopts |= WAKE_UCAST; + wol->supported |= WAKE_UCAST; + } + if (pdata->wol & PHYTMAC_WAKE_MCAST) { + wol->wolopts |= WAKE_MCAST; + wol->supported |= WAKE_MCAST; + } +} + +static int phytmac_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct phytmac *pdata = netdev_priv(ndev); + int ret; + + ret = phylink_ethtool_set_wol(pdata->phylink, wol); + + if (!ret || ret != -EOPNOTSUPP) + return ret; + + pdata->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + pdata->wol |= PHYTMAC_WAKE_MAGIC; + if (wol->wolopts & WAKE_ARP) + pdata->wol |= PHYTMAC_WAKE_ARP; + if (wol->wolopts & WAKE_UCAST) + pdata->wol |= PHYTMAC_WAKE_UCAST; + if (wol->wolopts & WAKE_MCAST) + pdata->wol |= PHYTMAC_WAKE_MCAST; + + device_set_wakeup_enable(pdata->dev, pdata->wol ? 1 : 0); + phytmac_set_bios_wol_enable(pdata, pdata->wol ? 1 : 0); + + return 0; +} + +static void phytmac_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct phytmac *pdata = netdev_priv(ndev); + + ring->rx_max_pending = MAX_RX_RING_SIZE; + ring->tx_max_pending = MAX_TX_RING_SIZE; + + ring->rx_pending = pdata->rx_ring_size; + ring->tx_pending = pdata->tx_ring_size; +} + +static int phytmac_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct phytmac *pdata = netdev_priv(ndev); + u32 new_rx_size, new_tx_size; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_size = clamp_t(u32, ring->rx_pending, + MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); + new_rx_size = roundup_pow_of_two(new_rx_size); + + new_tx_size = clamp_t(u32, ring->tx_pending, + MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); + new_tx_size = roundup_pow_of_two(new_tx_size); + + if (EQUAL(new_tx_size, pdata->tx_ring_size) && + EQUAL(new_rx_size, pdata->rx_ring_size)) { + /* nothing to do */ + return 0; + } + + return phytmac_reset_ringsize(pdata, new_rx_size, new_tx_size); +} + +static int phytmac_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct phytmac *pdata = netdev_priv(ndev); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = + (1 << HWTSTAMP_TX_ONESTEP_SYNC) | + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + info->phc_index = pdata->ptp_clock ? ptp_clock_index(pdata->ptp_clock) : -1; + + return 0; + } + + return ethtool_op_get_ts_info(ndev, info); +} + +static int phytmac_add_fdir_ethtool(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_flow_spec *fs = &cmd->fs; + struct ethtool_rx_fs_item *item, *newfs; + unsigned long flags; + int ret = -EINVAL; + bool added = false; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (cmd->fs.location >= pdata->max_rx_fs || + cmd->fs.ring_cookie >= pdata->queues_num) { + return -EINVAL; + } + + newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); + if (!newfs) + return -ENOMEM; + memcpy(&newfs->fs, fs, sizeof(newfs->fs)); + + netdev_dbg(ndev, "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); + + spin_lock_irqsave(&pdata->rx_fs_lock, flags); + + /* find correct place to add in list */ + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location > newfs->fs.location) { + list_add_tail(&newfs->list, &item->list); + added = true; + break; + } else if (item->fs.location == fs->location) { + netdev_err(ndev, "Rule not added: location %d not free!\n", + fs->location); + ret = -EBUSY; + goto err; + } + } + if (!added) + list_add_tail(&newfs->list, &pdata->rx_fs_list.list); + + hw_if->add_fdir_entry(pdata, fs); + pdata->rx_fs_list.count++; + + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + return 0; + +err: + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + kfree(newfs); + return ret; +} + +static int phytmac_del_fdir_ethtool(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + struct ethtool_rx_flow_spec *fs; + unsigned long flags; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + spin_lock_irqsave(&pdata->rx_fs_lock, flags); + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + /* disable screener regs for the flow entry */ + fs = &item->fs; + netdev_dbg(ndev, "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", + fs->flow_type, (int)fs->ring_cookie, fs->location, + htonl(fs->h_u.tcp_ip4_spec.ip4src), + htonl(fs->h_u.tcp_ip4_spec.ip4dst), + htons(fs->h_u.tcp_ip4_spec.psrc), + htons(fs->h_u.tcp_ip4_spec.pdst)); + + hw_if->del_fdir_entry(pdata, fs); + + list_del(&item->list); + pdata->rx_fs_list.count--; + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + kfree(item); + return 0; + } + } + + spin_unlock_irqrestore(&pdata->rx_fs_lock, flags); + return -EINVAL; +} + +static int phytmac_get_fdir_entry(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (item->fs.location == cmd->fs.location) { + memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); + return 0; + } + } + return -EINVAL; +} + +static int phytmac_get_all_fdir_entries(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct ethtool_rx_fs_item *item; + u32 cnt = 0; + + list_for_each_entry(item, &pdata->rx_fs_list.list, list) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = item->fs.location; + cnt++; + } + cmd->data = pdata->max_rx_fs; + cmd->rule_cnt = cnt; + + return 0; +} + +static int phytmac_get_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct phytmac *pdata = netdev_priv(ndev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = pdata->queues_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = pdata->rx_fs_list.count; + break; + case ETHTOOL_GRXCLSRULE: + ret = phytmac_get_fdir_entry(ndev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = phytmac_get_all_fdir_entries(ndev, cmd, rule_locs); + break; + default: + netdev_err(ndev, "Command parameter %d is not supported\n", cmd->cmd); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int phytmac_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd) +{ + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + return phytmac_add_fdir_ethtool(ndev, cmd); + case ETHTOOL_SRXCLSRLDEL: + return phytmac_del_fdir_ethtool(ndev, cmd); + default: + netdev_err(ndev, "Command parameter %d is not supported\n", cmd->cmd); + return -EOPNOTSUPP; + } +} + +static int phytmac_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *kset) +{ + int ret = 0; + struct phytmac *pdata = netdev_priv(ndev); + u32 supported = 0; + u32 advertising = 0; + + if (!ndev->phydev) { + kset->base.port = PORT_FIBRE; + kset->base.transceiver = XCVR_INTERNAL; + kset->base.duplex = pdata->duplex; + kset->base.speed = pdata->speed; + + if (pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII) { + supported = SUPPORTED_10000baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_10000baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + supported = SUPPORTED_2500baseX_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_2500baseX_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { + supported = SUPPORTED_1000baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_1000baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + } else if (pdata->phy_interface == PHY_INTERFACE_MODE_SGMII) { + supported = SUPPORTED_1000baseT_Full + | SUPPORTED_FIBRE | SUPPORTED_Pause; + advertising = ADVERTISED_1000baseT_Full + | ADVERTISED_FIBRE | ADVERTISED_Pause; + } + + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(kset->link_modes.advertising, + advertising); + } else { + phy_ethtool_get_link_ksettings(ndev, kset); + } + + return ret; +} + +static int phytmac_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *kset) +{ + int ret = 0; + + if (!ndev->phydev) { + netdev_err(ndev, "Without a PHY, setting link is not supported\n"); + ret = -EOPNOTSUPP; + } else { + phy_ethtool_set_link_ksettings(ndev, kset); + } + + return ret; +} + +static inline void phytmac_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct phytmac *pdata = netdev_priv(ndev); + + pause->rx_pause = pdata->pause; + pause->tx_pause = pdata->pause; +} + +static int phytmac_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pause->rx_pause != pdata->pause) + hw_if->enable_pause(pdata, pause->rx_pause); + + pdata->pause = pause->rx_pause; + + return 0; +} + +static inline void phytmac_get_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct phytmac *pdata = netdev_priv(ndev); + + ch->max_combined = pdata->queues_max_num; + ch->combined_count = pdata->queues_num; +} + +static int phytmac_set_channels(struct net_device *ndev, + struct ethtool_channels *ch) +{ + struct phytmac *pdata = netdev_priv(ndev); + + if (netif_running(ndev)) + return -EBUSY; + + if (ch->combined_count > pdata->queues_max_num) { + netdev_err(ndev, "combined channel count cannot exceed %u\n", + ch->combined_count); + + return -EINVAL; + } + + pdata->queues_num = ch->combined_count; + + return 0; +} + +static inline u32 phytmac_get_msglevel(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + + return pdata->msg_enable; +} + +static inline void phytmac_set_msglevel(struct net_device *ndev, u32 level) +{ + struct phytmac *pdata = netdev_priv(ndev); + + pdata->msg_enable = level; +} + +static void phytmac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) +{ + struct phytmac *pdata = netdev_priv(ndev); + + strncpy(drvinfo->driver, PHYTMAC_DRV_NAME, sizeof(drvinfo->driver)); + strncpy(drvinfo->version, PHYTMAC_DRIVER_VERSION, sizeof(drvinfo->version)); + + if (pdata->platdev) + strncpy(drvinfo->bus_info, pdata->platdev->name, sizeof(drvinfo->bus_info)); + else if (pdata->pcidev) + strncpy(drvinfo->bus_info, pci_name(pdata->pcidev), sizeof(drvinfo->bus_info)); +} + +static const struct ethtool_ops phytmac_ethtool_ops = { + .get_regs_len = phytmac_get_regs_len, + .get_regs = phytmac_get_regs, + .get_msglevel = phytmac_get_msglevel, + .set_msglevel = phytmac_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ts_info = phytmac_get_ts_info, + .get_ethtool_stats = phytmac_get_ethtool_stats, + .get_strings = phytmac_get_ethtool_strings, + .get_sset_count = phytmac_get_sset_count, + .get_link_ksettings = phytmac_get_link_ksettings, + .set_link_ksettings = phytmac_set_link_ksettings, + .get_ringparam = phytmac_get_ringparam, + .set_ringparam = phytmac_set_ringparam, + .get_rxnfc = phytmac_get_rxnfc, + .set_rxnfc = phytmac_set_rxnfc, + .get_pauseparam = phytmac_get_pauseparam, + .set_pauseparam = phytmac_set_pauseparam, + .get_channels = phytmac_get_channels, + .set_channels = phytmac_set_channels, + .get_wol = phytmac_get_wol, + .set_wol = phytmac_set_wol, + .get_drvinfo = phytmac_get_drvinfo, +}; + +void phytmac_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &phytmac_ethtool_ops; +} + diff --git a/drivers/net/ethernet/phytium/phytmac_main.c b/drivers/net/ethernet/phytium/phytmac_main.c new file mode 100644 index 0000000000000000000000000000000000000000..d89f6a105ead4f0f2c69a746c9711f73948e84d6 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_main.c @@ -0,0 +1,2475 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium Ethernet Controller driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_ptp.h" + +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +#define RX_BUFFER_MULTIPLE 64 /* bytes */ +#define MAX_MTU 3072 +#define RING_ADDR_INTERVAL 128 +#define MAX_RING_ADDR_ALLOC_TIMES 3 + +#define RX_RING_BYTES(pdata) (sizeof(struct phytmac_dma_desc) \ + * (pdata)->rx_ring_size) + +#define TX_RING_BYTES(pdata) (sizeof(struct phytmac_dma_desc)\ + * (pdata)->tx_ring_size) + +/* Max length of transmit frame must be a multiple of 8 bytes */ +#define PHYTMAC_TX_LEN_ALIGN 8 +/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a + * false amba_error in TX path from the DMA assuming there is not enough + * space in the SRAM (16KB) even when there is. + */ + +static int phytmac_queue_phyaddr_check(struct phytmac *pdata, dma_addr_t ring_base_addr, + int offset) +{ + u32 bus_addr_high; + int i; + + /* Check the high address of the DMA ring. */ + bus_addr_high = upper_32_bits(ring_base_addr); + for (i = 1; i < pdata->queues_num; i++) { + ring_base_addr += offset; + if (bus_addr_high != upper_32_bits(ring_base_addr)) + return -EFAULT; + } + + return 0; +} + +static int phytmac_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (netif_running(ndev)) + return -EBUSY; + + if (new_mtu > MAX_MTU) { + netdev_info(ndev, "Can not set MTU over %d.\n", MAX_MTU); + return -EINVAL; + } + + ndev->mtu = new_mtu; + + return 0; +} + +static inline void phytmac_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + memcpy(dev->dev_addr, addr, ETH_ALEN); +} + +static int phytmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct phytmac *pdata = netdev_priv(netdev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct sockaddr *saddr = addr; + + if (netif_msg_drv(pdata)) + netdev_info(netdev, "phytmac set mac address"); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + phytmac_hw_addr_set(netdev, saddr->sa_data); + + hw_if->set_mac_address(pdata, saddr->sa_data); + + phytmac_set_bios_wol_enable(pdata, pdata->wol ? 1 : 0); + + return 0; +} + +static int phytmac_get_mac_address(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + u8 addr[6]; + + hw_if->get_mac_address(pdata, addr); + + if (is_valid_ether_addr(addr)) { + phytmac_hw_addr_set(pdata->ndev, addr); + return 0; + } + dev_info(pdata->dev, "invalid hw address, using random\n"); + eth_hw_addr_random(pdata->ndev); + + return 0; +} + +static int phytmac_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int data; + + data = hw_if->mdio_read(pdata, mii_id, regnum, regnum & MII_ADDR_C45); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio read mii_id:%d, regnum:%x, data:%x\n", + mii_id, regnum, data); + + return data; +} + +static int phytmac_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 data) +{ + struct phytmac *pdata = bus->priv; + struct phytmac_hw_if *hw_if = pdata->hw_if; + int ret; + + ret = hw_if->mdio_write(pdata, mii_id, regnum, regnum & MII_ADDR_C45, data); + if (ret) + netdev_err(pdata->ndev, "mdio %d, reg %x, data %x write failed!\n", + mii_id, regnum, data); + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "mdio write mii_id:%d, regnum:%x, data:%x\n", + mii_id, regnum, data); + + return 0; +} + +static inline int hash_bit_value(int bitnr, __u8 *addr) +{ + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; +} + +/* Return the hash index value for the specified address. */ +static int phytmac_get_hash_index(__u8 *addr) +{ + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i * 6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; +} + +static void phytmac_set_mac_hash_table(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct netdev_hw_addr *ha; + unsigned long mc_filter[2]; + unsigned int bitnr; + + mc_filter[0] = 0; + mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, ndev) { + bitnr = phytmac_get_hash_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + hw_if->set_hash_table(pdata, mc_filter); +} + +/* Enable/Disable promiscuous and multicast modes. */ +static void phytmac_set_rx_mode(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + hw_if->enable_promise(pdata, ndev->flags & IFF_PROMISC); + + hw_if->enable_multicast(pdata, ndev->flags & IFF_ALLMULTI); + if (!netdev_mc_empty(ndev)) + phytmac_set_mac_hash_table(ndev); +} + +static struct net_device_stats *phytmac_get_stats(struct net_device *dev) +{ + struct phytmac *pdata = netdev_priv(dev); + struct net_device_stats *nstat = &pdata->ndev->stats; + struct phytmac_stats *stat = &pdata->stats; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pdata->power_state == PHYTMAC_POWEROFF) + return nstat; + + hw_if->get_stats(pdata); + + nstat->rx_errors = (stat->rx_fcs_errors + + stat->rx_alignment_errors + + stat->rx_overruns + + stat->rx_oversize_packets + + stat->rx_jabbers + + stat->rx_undersized_packets + + stat->rx_length_errors); + nstat->rx_dropped = stat->rx_resource_over; + nstat->tx_errors = (stat->tx_late_collisions + + stat->tx_excessive_collisions + + stat->tx_underrun + + stat->tx_carrier_sense_errors); + nstat->multicast = stat->rx_mcast_packets; + nstat->collisions = (stat->tx_single_collisions + + stat->tx_multiple_collisions + + stat->tx_excessive_collisions + + stat->tx_late_collisions); + nstat->rx_length_errors = (stat->rx_oversize_packets + + stat->rx_jabbers + + stat->rx_undersized_packets + + stat->rx_length_errors); + nstat->rx_over_errors = stat->rx_resource_over; + nstat->rx_crc_errors = stat->rx_fcs_errors; + nstat->rx_frame_errors = stat->rx_alignment_errors; + nstat->rx_fifo_errors = stat->rx_overruns; + nstat->tx_aborted_errors = stat->tx_excessive_collisions; + nstat->tx_carrier_errors = stat->tx_carrier_sense_errors; + nstat->tx_fifo_errors = stat->tx_underrun; + + return nstat; +} + +static inline int phytmac_calc_rx_buf_len(void) +{ +#if (PAGE_SIZE < 8192) + return rounddown(PHYTMAC_MAX_FRAME_BUILD_SKB, RX_BUFFER_MULTIPLE); +#endif + return rounddown(PHYTMAC_RXBUFFER_2048, RX_BUFFER_MULTIPLE); +} + +struct phytmac_dma_desc *phytmac_get_rx_desc(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->rx_ring[index & (queue->pdata->rx_ring_size - 1)]; +} + +struct phytmac_tx_skb *phytmac_get_tx_skb(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->tx_skb[index & (queue->pdata->tx_ring_size - 1)]; +} + +struct phytmac_dma_desc *phytmac_get_tx_desc(struct phytmac_queue *queue, + unsigned int index) +{ + return &queue->tx_ring[index & (queue->pdata->tx_ring_size - 1)]; +} + +static void phytmac_rx_unmap(struct phytmac_queue *queue) +{ + struct phytmac_rx_buffer *rx_buffer_info; + struct phytmac *pdata = queue->pdata; + int i; + + if (queue->rx_buffer_info) { + /* Free all the Rx ring sk_buffs */ + i = queue->rx_tail; + + while (i != queue->rx_next_to_alloc) { + rx_buffer_info = &queue->rx_buffer_info[i]; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(pdata->dev, + rx_buffer_info->addr, + rx_buffer_info->page_offset, + pdata->rx_buffer_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(pdata->dev, + rx_buffer_info->addr, + PHYTMAC_RX_PAGE_SIZE, + DMA_FROM_DEVICE, + PHYTMAC_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer_info->page, + rx_buffer_info->pagecnt_bias); + + i++; + if (i == pdata->rx_ring_size) + i = 0; + } + + queue->rx_tail = 0; + queue->rx_head = 0; + queue->rx_next_to_alloc = 0; + } +} + +static int phytmac_free_tx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *tx_ring_base = NULL; + dma_addr_t tx_ring_base_addr; + unsigned int q; + int size; + + queue = pdata->queues; + if (queue->tx_ring) { + tx_ring_base = queue->tx_ring; + tx_ring_base_addr = queue->tx_ring_addr; + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + kfree(queue->tx_skb); + queue->tx_skb = NULL; + + if (queue->tx_ring) + queue->tx_ring = NULL; + } + + if (tx_ring_base) { + size = pdata->queues_num * (TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(pdata->dev, size, tx_ring_base, tx_ring_base_addr); + } + + return 0; +} + +static int phytmac_free_rx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *rx_ring_base = NULL; + dma_addr_t rx_ring_base_addr; + unsigned int q; + int size; + + queue = pdata->queues; + if (queue->rx_ring) { + rx_ring_base = queue->rx_ring; + rx_ring_base_addr = queue->rx_ring_addr; + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + phytmac_rx_unmap(queue); + + if (queue->rx_ring) + queue->rx_ring = NULL; + + if (queue->rx_buffer_info) { + vfree(queue->rx_buffer_info); + queue->rx_buffer_info = NULL; + } + } + + if (rx_ring_base) { + size = pdata->queues_num * (RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + + RING_ADDR_INTERVAL); + dma_free_coherent(pdata->dev, size, rx_ring_base, rx_ring_base_addr); + } + + return 0; +} + +static int phytmac_alloc_tx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *tx_ring_base; + dma_addr_t tx_ring_base_addr; + unsigned int q; + int tx_offset; + int tx_size; + int size = 0; + int ret, i; + + tx_offset = TX_RING_BYTES(pdata) + pdata->tx_bd_prefetch + RING_ADDR_INTERVAL; + tx_offset = ALIGN(tx_offset, 4096); + tx_size = pdata->queues_num * tx_offset; + for (i = 0; i < MAX_RING_ADDR_ALLOC_TIMES + 1; i++) { + if (i == MAX_RING_ADDR_ALLOC_TIMES) + goto err; + + tx_ring_base = dma_alloc_coherent(pdata->dev, tx_size, + &tx_ring_base_addr, GFP_KERNEL); + if (!tx_ring_base) + continue; + + ret = phytmac_queue_phyaddr_check(pdata, tx_ring_base_addr, + tx_offset); + if (ret) { + dma_free_coherent(pdata->dev, tx_size, tx_ring_base, + tx_ring_base_addr); + continue; + } else { + break; + } + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + queue->tx_ring = (void *)tx_ring_base + q * tx_offset; + queue->tx_ring_addr = tx_ring_base_addr + q * tx_offset; + if (!queue->tx_ring) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated TX ring for queue %u of %d bytes at %08lx\n", + q, tx_offset, (unsigned long)queue->tx_ring_addr); + + size = pdata->tx_ring_size * sizeof(struct phytmac_tx_skb); + queue->tx_skb = kzalloc(size, GFP_KERNEL); + if (!queue->tx_skb) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated %d TX struct tx_skb entries at %p\n", + pdata->tx_ring_size, queue->tx_skb); + } + + return 0; +err: + phytmac_free_tx_resource(pdata); + + return -ENOMEM; +} + +static int phytmac_alloc_rx_resource(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + struct phytmac_dma_desc *rx_ring_base; + dma_addr_t rx_ring_base_addr; + int rx_offset; + int rx_size; + unsigned int q; + int size = 0; + int ret, i; + + rx_offset = RX_RING_BYTES(pdata) + pdata->rx_bd_prefetch + RING_ADDR_INTERVAL; + rx_offset = ALIGN(rx_offset, 4096); + rx_size = pdata->queues_num * rx_offset; + for (i = 0; i < MAX_RING_ADDR_ALLOC_TIMES + 1; i++) { + if (i == MAX_RING_ADDR_ALLOC_TIMES) + goto err; + + rx_ring_base = dma_alloc_coherent(pdata->dev, rx_size, + &rx_ring_base_addr, GFP_KERNEL); + if (!rx_ring_base) + continue; + + ret = phytmac_queue_phyaddr_check(pdata, rx_ring_base_addr, + rx_offset); + if (ret) { + dma_free_coherent(pdata->dev, rx_size, rx_ring_base, + rx_ring_base_addr); + continue; + } else { + break; + } + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + queue->rx_ring = (void *)rx_ring_base + q * rx_offset; + queue->rx_ring_addr = rx_ring_base_addr + q * rx_offset; + if (!queue->rx_ring) + goto err; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, + "Allocated RX ring for queue %u of %d bytes at %08lx\n", + q, rx_offset, (unsigned long)queue->rx_ring_addr); + + size = pdata->rx_ring_size * sizeof(struct phytmac_rx_buffer); + queue->rx_buffer_info = vzalloc(size); + if (!queue->rx_buffer_info) + goto err; + } + + return 0; +err: + phytmac_free_rx_resource(pdata); + + return -ENOMEM; +} + +static int phytmac_alloc_resource(struct phytmac *pdata) +{ + int ret; + + pdata->rx_buffer_len = phytmac_calc_rx_buf_len(); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "alloc resource, rx_buffer_len:%d\n", + pdata->rx_buffer_len); + + ret = phytmac_alloc_tx_resource(pdata); + if (ret) + return ret; + + ret = phytmac_alloc_rx_resource(pdata); + if (ret) { + phytmac_free_tx_resource(pdata); + return ret; + } + + return 0; +} + +static void phytmac_free_resource(struct phytmac *pdata) +{ + phytmac_free_tx_resource(pdata); + phytmac_free_rx_resource(pdata); +} + +static irqreturn_t phytmac_irq(int irq, void *data) +{ + struct phytmac_queue *queue = data; + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 status; + + status = hw_if->get_irq(pdata, queue->index); + + if (netif_msg_intr(pdata)) + netdev_info(pdata->ndev, "phymac irq status:%x\n", status); + + if (unlikely(!status)) + return IRQ_NONE; + + while (status) { + if (status & pdata->rx_irq_mask) { + /* Disable RX interrupts */ + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_COMPLETE); + + if (napi_schedule_prep(&queue->rx_napi)) + __napi_schedule(&queue->rx_napi); + } + + if (status & (PHYTMAC_INT_TX_COMPLETE)) { + /* Disable TX interrupts */ + hw_if->disable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + + if (napi_schedule_prep(&queue->tx_napi)) + __napi_schedule(&queue->tx_napi); + } + + if (status & PHYTMAC_INT_TX_ERR) + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_ERR); + + if (status & PHYTMAC_INT_RX_OVERRUN) { + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_OVERRUN); + pdata->stats.rx_overruns++; + } + status = hw_if->get_irq(pdata, queue->index); + } + + return IRQ_HANDLED; +} + +static irqreturn_t phytmac_intx_irq(int irq, void *data) +{ + struct phytmac *pdata = data; + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 irq_mask; + int i; + + irq_mask = hw_if->get_intx_mask(pdata); + + if (unlikely(!irq_mask)) + return IRQ_NONE; + + for (i = 0; i < pdata->queues_num; i++) { + if (irq_mask & BIT(i)) + phytmac_irq(irq, &pdata->queues[i]); + } + + return IRQ_HANDLED; +} + +static void phytmac_dump_pkt(struct phytmac *pdata, struct sk_buff *skb, bool tx) +{ + struct net_device *ndev = pdata->ndev; + + if (tx) { + netdev_dbg(ndev, "start_xmit: queue %u len %u head %p data %p tail %p end %p\n", + skb->queue_mapping, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); + } else { + netdev_dbg(ndev, "queue %u received skb of length %u, csum: %08x\n", + skb->queue_mapping, skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb_mac_header(skb), 16, true); + } + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); +} + +static bool phytmac_alloc_mapped_page(struct phytmac *pdata, + struct phytmac_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = __dev_alloc_pages(PHYTMAC_GFP_FLAGS, PHYTMAC_RX_PAGE_ORDER); + if (unlikely(!page)) { + netdev_err(pdata->ndev, "rx alloc page failed\n"); + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(pdata->dev, page, 0, + PHYTMAC_RX_PAGE_SIZE, + DMA_FROM_DEVICE, PHYTMAC_RX_DMA_ATTR); + if (dma_mapping_error(pdata->dev, dma)) { + __free_pages(page, PHYTMAC_RX_PAGE_ORDER); + return false; + } + + bi->addr = dma; + bi->page = page; + bi->page_offset = PHYTMAC_SKB_PAD; + bi->pagecnt_bias = 1; + + return true; +} + +static inline bool phytmac_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool phytmac_can_reuse_rx_page(struct phytmac_rx_buffer *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* avoid re-using remote pages */ + if (unlikely(phytmac_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define PHYTMAC_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - PHYTMAC_RXBUFFER_2048) + + if (rx_buffer->page_offset > PHYTMAC_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void phytmac_reuse_rx_page(struct phytmac_queue *queue, + struct phytmac_rx_buffer *old_buff) +{ + struct phytmac_rx_buffer *new_buff; + struct phytmac *pdata = queue->pdata; + u16 nta = queue->rx_next_to_alloc; + + new_buff = &queue->rx_buffer_info[nta & (pdata->rx_ring_size - 1)]; + + /* update, and store next to alloc */ + nta++; + queue->rx_next_to_alloc = (nta < pdata->rx_ring_size) ? nta : 0; + + /* Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls. + */ + new_buff->addr = old_buff->addr; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static struct phytmac_rx_buffer *phytmac_get_rx_buffer(struct phytmac_queue *queue, + unsigned int index, + const unsigned int size) +{ + struct phytmac_rx_buffer *rx_buffer; + struct phytmac *pdata = queue->pdata; + + rx_buffer = &queue->rx_buffer_info[index & (pdata->rx_ring_size - 1)]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(pdata->dev, + rx_buffer->addr, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void phytmac_put_rx_buffer(struct phytmac_queue *queue, + struct phytmac_rx_buffer *rx_buffer) +{ + struct phytmac *pdata = queue->pdata; + + if (phytmac_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + phytmac_reuse_rx_page(queue, rx_buffer); + } else { + dma_unmap_page_attrs(pdata->dev, rx_buffer->addr, + PHYTMAC_RX_PAGE_SIZE, + DMA_FROM_DEVICE, PHYTMAC_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +static void phytmac_add_rx_frag(struct phytmac_queue *queue, + struct phytmac_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize; + +#if (PAGE_SIZE < 8192) + truesize = PHYTMAC_RX_PAGE_SIZE / 2; +#else + truesize = SKB_DATA_ALIGN(PHYTMAC_SKB_PAD + size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *phytmac_build_skb(struct phytmac_rx_buffer *rx_buffer, + unsigned int size) +{ + struct sk_buff *skb; + unsigned int truesize; + void *va; + +#if (PAGE_SIZE < 8192) + truesize = PHYTMAC_RX_PAGE_SIZE / 2; +#else + truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(PHYTMAC_SKB_PAD + size); +#endif + + va = page_address(rx_buffer->page) + rx_buffer->page_offset; + /* prefetch first cache line of first page */ + prefetch(va); + + /* build an skb around the page buffer */ + skb = build_skb(va - PHYTMAC_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, PHYTMAC_SKB_PAD); + __skb_put(skb, size); + + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +static struct sk_buff *phytmac_rx_single(struct phytmac_queue *queue, struct phytmac_dma_desc *desc) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_rx_buffer *rx_buffer; + struct sk_buff *skb = NULL; + unsigned int len; + + len = hw_if->get_rx_pkt_len(pdata, desc); + rx_buffer = phytmac_get_rx_buffer(queue, queue->rx_tail, len); + hw_if->zero_rx_desc_addr(desc); + + skb = phytmac_build_skb(rx_buffer, len); + if (unlikely(!skb)) { + netdev_err(pdata->ndev, + "rx single build skb failed\n"); + pdata->ndev->stats.rx_dropped++; + queue->stats.rx_dropped++; + rx_buffer->pagecnt_bias++; + return NULL; + } + + phytmac_put_rx_buffer(queue, rx_buffer); + + skb->protocol = eth_type_trans(skb, pdata->ndev); + skb_checksum_none_assert(skb); + + if (pdata->ndev->features & NETIF_F_RXCSUM && + !(pdata->ndev->flags & IFF_PROMISC) && + hw_if->rx_checksum(desc)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, false); + + return skb; +} + +static struct sk_buff *phytmac_rx_frame(struct phytmac_queue *queue, + unsigned int first_frag, unsigned int last_frag, + unsigned int total_len) +{ + unsigned int frag = 0; + struct sk_buff *skb; + struct phytmac_dma_desc *desc; + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int frag_len = pdata->rx_buffer_len; + unsigned int offset = frag_len; + struct phytmac_rx_buffer *rx_buffer; + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "rx frame %u - %u (len %u)\n", + first_frag, last_frag, total_len); + + desc = phytmac_get_rx_desc(queue, first_frag); + rx_buffer = phytmac_get_rx_buffer(queue, first_frag, frag_len); + hw_if->zero_rx_desc_addr(desc); + + skb = phytmac_build_skb(rx_buffer, frag_len); + if (unlikely(!skb)) { + netdev_err(pdata->ndev, "rx frame build skb failed\n"); + pdata->ndev->stats.rx_dropped++; + queue->stats.rx_dropped++; + rx_buffer->pagecnt_bias++; + return NULL; + } + + phytmac_put_rx_buffer(queue, rx_buffer); + + for (frag = first_frag + 1; ; frag++) { + desc = phytmac_get_rx_desc(queue, frag); + rx_buffer = phytmac_get_rx_buffer(queue, frag, frag_len); + hw_if->zero_rx_desc_addr(desc); + + if (offset + frag_len > total_len) { + if (unlikely(frag != last_frag)) { + dev_kfree_skb_any(skb); + phytmac_put_rx_buffer(queue, rx_buffer); + return NULL; + } + frag_len = total_len - offset; + } + + phytmac_add_rx_frag(queue, rx_buffer, skb, frag_len); + phytmac_put_rx_buffer(queue, rx_buffer); + + offset += frag_len; + + if (frag == last_frag) + break; + } + + skb_checksum_none_assert(skb); + + if (pdata->ndev->features & NETIF_F_RXCSUM && + !(pdata->ndev->flags & IFF_PROMISC) && + hw_if->rx_checksum(phytmac_get_rx_desc(queue, last_frag))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->protocol = eth_type_trans(skb, pdata->ndev); + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, false); + + return skb; +} + +static struct sk_buff *phytmac_rx_mbuffer(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + struct sk_buff *skb = NULL; + unsigned int rx_tail = 0; + int first_frag = -1; + unsigned int len; + + for (rx_tail = queue->rx_tail; ; rx_tail++) { + desc = phytmac_get_rx_desc(queue, rx_tail); + if (!hw_if->rx_complete(desc)) + return NULL; + + if (hw_if->rx_pkt_start(desc)) { + if (first_frag != -1) + hw_if->clear_rx_desc(queue, first_frag, rx_tail); + first_frag = rx_tail; + continue; + } + + if (hw_if->rx_pkt_end(desc)) { + queue->rx_tail = rx_tail; + len = hw_if->get_rx_pkt_len(pdata, desc); + skb = phytmac_rx_frame(queue, first_frag, rx_tail, len); + first_frag = -1; + break; + } + } + return skb; +} + +static void phytmac_rx_clean(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int index, space; + struct phytmac_rx_buffer *rx_buf_info; + + space = CIRC_SPACE(queue->rx_head, queue->rx_tail, + pdata->rx_ring_size); + + while (space > 0) { + index = queue->rx_head & (pdata->rx_ring_size - 1); + rx_buf_info = &queue->rx_buffer_info[index]; + + if (!phytmac_alloc_mapped_page(pdata, rx_buf_info)) + break; + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(pdata->dev, rx_buf_info->addr, + rx_buf_info->page_offset, + pdata->rx_buffer_len, + DMA_FROM_DEVICE); + + hw_if->rx_map(queue, index, rx_buf_info->addr + rx_buf_info->page_offset); + + queue->rx_head++; + if (queue->rx_head >= pdata->rx_ring_size) + queue->rx_head &= (pdata->rx_ring_size - 1); + + space--; + } + + queue->rx_next_to_alloc = queue->rx_head; + /* make newly descriptor to hardware */ + wmb(); +} + +static int phytmac_rx(struct phytmac_queue *queue, struct napi_struct *napi, + int budget) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct sk_buff *skb; + struct phytmac_dma_desc *desc; + int count = 0; + + while (count < budget) { + desc = phytmac_get_rx_desc(queue, queue->rx_tail); + /* make newly desc to cpu */ + rmb(); + + if (!hw_if->rx_complete(desc)) + break; + + /* Ensure ctrl is at least as up-to-date as rxused */ + dma_rmb(); + + if (hw_if->rx_single_buffer(desc)) + skb = phytmac_rx_single(queue, desc); + else + skb = phytmac_rx_mbuffer(queue); + + if (!skb) { + netdev_warn(pdata->ndev, "phytmac rx skb is NULL\n"); + break; + } + + pdata->ndev->stats.rx_packets++; + queue->stats.rx_packets++; + pdata->ndev->stats.rx_bytes += skb->len; + queue->stats.rx_bytes += skb->len; + queue->rx_tail = (queue->rx_tail + 1) & (pdata->rx_ring_size - 1); + + count++; + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + phytmac_ptp_rxstamp(pdata, skb, desc); + + napi_gro_receive(napi, skb); + } + + phytmac_rx_clean(queue); + + return count; +} + +static void phytmac_tx_unmap(struct phytmac *pdata, struct phytmac_tx_skb *tx_skb, int budget) +{ + if (tx_skb->addr) { + if (tx_skb->mapped_as_page) + dma_unmap_page(pdata->dev, tx_skb->addr, + tx_skb->length, DMA_TO_DEVICE); + else + dma_unmap_single(pdata->dev, tx_skb->addr, + tx_skb->length, DMA_TO_DEVICE); + tx_skb->addr = 0; + } + + if (tx_skb->skb) { + napi_consume_skb(tx_skb->skb, budget); + tx_skb->skb = NULL; + } +} + +static int phytmac_maybe_stop_tx_queue(struct phytmac_queue *queue, + unsigned int count) +{ + struct phytmac *pdata = queue->pdata; + int space = CIRC_SPACE(queue->tx_tail, queue->tx_head, + pdata->tx_ring_size); + + if (space < count) { + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "Tx queue %d stopped, not enough descriptors available\n", + queue->index); + + netif_stop_subqueue(pdata->ndev, queue->index); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static int phytmac_maybe_wake_tx_queue(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + int space = CIRC_CNT(queue->tx_tail, queue->tx_head, + pdata->tx_ring_size); + + return (space <= (3 * pdata->tx_ring_size / 4)) ? 1 : 0; +} + +static int phytmac_tx_clean(struct phytmac_queue *queue, int budget) +{ + struct phytmac *pdata = queue->pdata; + u16 queue_index = queue->index; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_tx_skb *tx_skb; + struct phytmac_dma_desc *desc; + int complete = 0; + int packet_count = 0; + unsigned int tail = queue->tx_tail; + unsigned int head; + + spin_lock(&pdata->lock); + + for (head = queue->tx_head; head != tail && packet_count < budget; ) { + struct sk_buff *skb; + + desc = phytmac_get_tx_desc(queue, head); + /* make newly desc to cpu */ + rmb(); + if (!hw_if->tx_complete(desc)) + break; + + /* Process all buffers of the current transmitted frame */ + for (;; head++) { + tx_skb = phytmac_get_tx_skb(queue, head); + skb = tx_skb->skb; + + if (skb) { + complete = 1; + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP) && + !phytmac_ptp_one_step(skb)) { + phytmac_ptp_txstamp(queue, skb, desc); + } + } + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "desc %u (data %p) tx complete\n", + head, tx_skb->skb->data); + + pdata->ndev->stats.tx_packets++; + queue->stats.tx_packets++; + pdata->ndev->stats.tx_bytes += tx_skb->skb->len; + queue->stats.tx_bytes += tx_skb->skb->len; + packet_count++; + } + + /* Now we can safely release resources */ + phytmac_tx_unmap(pdata, tx_skb, budget); + + if (complete) { + complete = 0; + break; + } + } + + head++; + if (head >= pdata->tx_ring_size) + head &= (pdata->tx_ring_size - 1); + } + + queue->tx_head = head; + if (__netif_subqueue_stopped(pdata->ndev, queue_index) && + (phytmac_maybe_wake_tx_queue(queue))) + netif_wake_subqueue(pdata->ndev, queue_index); + spin_unlock(&pdata->lock); + + return packet_count; +} + +static int phytmac_rx_poll(struct napi_struct *napi, int budget) +{ + struct phytmac_queue *queue = container_of(napi, struct phytmac_queue, rx_napi); + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + int work_done; + + work_done = phytmac_rx(queue, napi, budget); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "RX poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue->index), work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) { + hw_if->enable_irq(pdata, queue->index, pdata->rx_irq_mask); + + desc = phytmac_get_rx_desc(queue, queue->rx_tail); + /* make newly desc to cpu */ + rmb(); + + if (hw_if->rx_complete(desc)) { + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_RX_COMPLETE); + + napi_schedule(napi); + } + } + + return work_done; +} + +static int phytmac_tx_poll(struct napi_struct *napi, int budget) +{ + struct phytmac_queue *queue = container_of(napi, struct phytmac_queue, tx_napi); + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_dma_desc *desc; + int work_done; + + work_done = phytmac_tx_clean(queue, budget); + + if (netif_msg_drv(pdata)) + netdev_info(pdata->ndev, "TX poll: queue = %u, work_done = %d, budget = %d\n", + (unsigned int)(queue->index), work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) { + hw_if->enable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + if (queue->tx_head != queue->tx_tail) { + desc = phytmac_get_tx_desc(queue, queue->tx_head); + /* make newly desc to cpu */ + rmb(); + + if (hw_if->tx_complete(desc)) { + hw_if->disable_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + hw_if->clear_irq(pdata, queue->index, PHYTMAC_INT_TX_COMPLETE); + + napi_schedule(napi); + } + } + } + + return work_done; +} + +static inline int phytmac_clear_csum(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* make sure we can modify the header */ + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; + return 0; +} + +static int phytmac_add_fcs(struct sk_buff **skb, struct net_device *ndev) +{ + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || + skb_is_nonlinear(*skb); + int padlen = ETH_ZLEN - (*skb)->len; + int headroom = skb_headroom(*skb); + int tailroom = skb_tailroom(*skb); + struct sk_buff *nskb; + u32 fcs; + int i; + + if ((ndev->features & NETIF_F_HW_CSUM) || + !((*skb)->ip_summed != CHECKSUM_PARTIAL) || + skb_shinfo(*skb)->gso_size || phytmac_ptp_one_step(*skb)) + return 0; + + if (padlen <= 0) { + if (tailroom >= ETH_FCS_LEN) + goto add_fcs; + else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) + padlen = 0; + else + padlen = ETH_FCS_LEN; + } else { + padlen += ETH_FCS_LEN; + } + + if (!cloned && headroom + tailroom >= padlen) { + (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); + skb_set_tail_pointer(*skb, (*skb)->len); + } else { + nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + dev_consume_skb_any(*skb); + *skb = nskb; + } + + if (padlen > ETH_FCS_LEN) + skb_put_zero(*skb, padlen - ETH_FCS_LEN); + +add_fcs: + fcs = crc32_le(~0, (*skb)->data, (*skb)->len); + fcs = ~fcs; + + for (i = 0; i < 4; ++i) + skb_put_u8(*skb, (fcs >> (i * 8)) & 0xff); + return 0; +} + +static int phytmac_packet_info(struct phytmac *pdata, + struct phytmac_queue *queue, struct sk_buff *skb, + struct packet_info *packet) +{ + int is_lso; + unsigned int hdrlen, f; + int desc_cnt; + + memset(packet, 0, sizeof(struct packet_info)); + + is_lso = (skb_shinfo(skb)->gso_size != 0); + + if (is_lso) { + /* length of headers */ + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + /* only queue eth + ip headers separately for UDP */ + hdrlen = skb_transport_offset(skb); + packet->lso = LSO_UFO; + packet->mss = skb_shinfo(skb)->gso_size + hdrlen + ETH_FCS_LEN; + } else { + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + packet->lso = LSO_TSO; + packet->mss = skb_shinfo(skb)->gso_size; + } + + if (skb_headlen(skb) < hdrlen) { + dev_err(pdata->dev, "Error - LSO headers fragmented!!!\n"); + return NETDEV_TX_BUSY; + } + } else { + hdrlen = min(skb_headlen(skb), pdata->max_tx_length); + packet->lso = 0; + packet->mss = 0; + } + + packet->hdrlen = hdrlen; + + if (is_lso && (skb_headlen(skb) > hdrlen)) + desc_cnt = TXD_USE_COUNT(pdata, (skb_headlen(skb) - hdrlen)) + 1; + else + desc_cnt = TXD_USE_COUNT(pdata, hdrlen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + desc_cnt += TXD_USE_COUNT(pdata, skb_frag_size(&skb_shinfo(skb)->frags[f])); + packet->desc_cnt = desc_cnt; + + if ((!(pdata->ndev->features & NETIF_F_HW_CSUM)) && + skb->ip_summed != CHECKSUM_PARTIAL && + !is_lso && + !phytmac_ptp_one_step(skb)) + packet->nocrc = 1; + else + packet->nocrc = 0; + + if (netif_msg_pktdata(pdata)) { + netdev_info(pdata->ndev, "packet info: desc_cnt=%d, nocrc=%d,ip_summed=%d\n", + desc_cnt, packet->nocrc, skb->ip_summed); + netdev_info(pdata->ndev, "packet info: mss=%d, lso=%d,skb_len=%d, nr_frags=%d\n", + packet->mss, packet->lso, skb->len, skb_shinfo(skb)->nr_frags); + } + + return 0; +} + +static unsigned int phytmac_tx_map(struct phytmac *pdata, + struct phytmac_queue *queue, + struct sk_buff *skb, + struct packet_info *packet) +{ + dma_addr_t mapping; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int len, i, tx_tail; + struct phytmac_tx_skb *tx_skb = NULL; + unsigned int offset, size, count = 0; + unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; + + len = skb_headlen(skb); + size = packet->hdrlen; + + offset = 0; + tx_tail = queue->tx_tail; + while (len) { + tx_skb = phytmac_get_tx_skb(queue, tx_tail); + + mapping = dma_map_single(pdata->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->addr = mapping; + tx_skb->length = size; + tx_skb->mapped_as_page = false; + + len -= size; + offset += size; + count++; + tx_tail++; + + size = min(len, pdata->max_tx_length); + } + + /* Then, map paged data from fragments */ + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + while (len) { + size = min(len, pdata->max_tx_length); + tx_skb = phytmac_get_tx_skb(queue, tx_tail); + mapping = skb_frag_dma_map(pdata->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->addr = mapping; + tx_skb->length = size; + tx_skb->mapped_as_page = true; + + len -= size; + offset += size; + count++; + tx_tail++; + } + } + + /* Should never happen */ + if (unlikely(!tx_skb)) { + netdev_err(pdata->ndev, "BUG! empty skb!\n"); + return 0; + } + + /* This is the last buffer of the frame: save socket buffer */ + tx_skb->skb = skb; + + if (hw_if->tx_map(queue, tx_tail, packet)) { + netdev_err(pdata->ndev, "BUG!hw tx map failed!\n"); + return 0; + } + + queue->tx_tail = tx_tail & (pdata->tx_ring_size - 1); + + return count; + +dma_error: + netdev_err(pdata->ndev, "TX DMA map failed\n"); + + for (i = queue->tx_tail; i != tx_tail; i++) { + tx_skb = phytmac_get_tx_skb(queue, i); + phytmac_tx_unmap(pdata, tx_skb, 0); + } + + return 0; +} + +static inline void phytmac_init_ring(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned int q = 0; + int i; + + for (queue = pdata->queues; q < pdata->queues_num; ++q) { + queue->tx_head = 0; + queue->tx_tail = 0; + hw_if->clear_tx_desc(queue); + + for (i = 0; i < pdata->rx_ring_size; i++) + hw_if->init_rx_map(queue, i); + queue->rx_head = 0; + queue->rx_tail = 0; + queue->rx_next_to_alloc = 0; + phytmac_rx_clean(queue); + ++queue; + } + + hw_if->init_ring_hw(pdata); +} + +static netdev_tx_t phytmac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + u16 queue_index = skb->queue_mapping; + struct phytmac_queue *queue = &pdata->queues[queue_index]; + netdev_tx_t ret = NETDEV_TX_OK; + struct packet_info packet; + unsigned long flags; + + if (phytmac_clear_csum(skb)) { + dev_kfree_skb_any(skb); + return ret; + } + + if (phytmac_add_fcs(&skb, ndev)) { + dev_kfree_skb_any(skb); + return ret; + } + + ret = phytmac_packet_info(pdata, queue, skb, &packet); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + if (netif_msg_pktdata(pdata)) + phytmac_dump_pkt(pdata, skb, true); + + spin_lock_irqsave(&pdata->lock, flags); + /* Check that there are enough descriptors available */ + ret = phytmac_maybe_stop_tx_queue(queue, packet.desc_cnt); + if (ret) + goto tx_return; + + /* Map socket buffer for DMA transfer */ + if (!phytmac_tx_map(pdata, queue, skb, &packet)) { + dev_kfree_skb_any(skb); + goto tx_return; + } + + skb_tx_timestamp(skb); + /* Make newly descriptor to hardware */ + wmb(); + + hw_if->transmit(queue); + +tx_return: + spin_unlock_irqrestore(&pdata->lock, flags); + return ret; +} + +static bool phytmac_phy_connect_need(struct phytmac *pdata) +{ + struct fwnode_handle *fwnode = dev_fwnode(pdata->dev); + struct fwnode_handle *dn; + u8 pl_cfg_link_an_mode = 0; + + /* Fixed links is handled without needing a PHY */ + dn = fwnode_get_named_child_node(fwnode, "fixed-link"); + if (dn || fwnode_property_present(fwnode, "fixed-link")) + pl_cfg_link_an_mode = MLO_AN_FIXED; + fwnode_handle_put(dn); + + if (pl_cfg_link_an_mode == MLO_AN_FIXED || + phy_interface_mode_is_8023z(pdata->phy_interface)) + return false; + return true; +} + +static int phytmac_phylink_connect(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct phy_device *phydev; + struct fwnode_handle *fwnode = dev_fwnode(pdata->dev); + struct device_node *of_node = dev_of_node(pdata->dev); + int ret = 0; + + if (of_node) + ret = phylink_of_phy_connect(pdata->phylink, of_node, 0); + + if (!of_node && fwnode) + ret = phytmac_phy_connect_need(pdata); + + if (!fwnode || ret) { + if (pdata->mii_bus) { + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + dev_err(pdata->dev, "no PHY found\n"); + return -ENXIO; + } + /* attach the mac to the phy */ + ret = phylink_connect_phy(pdata->phylink, phydev); + } else { + netdev_err(ndev, "Not mii register\n"); + return -ENXIO; + } + } + + if (ret) { + netdev_err(ndev, "Could not attach PHY (%d)\n", ret); + return ret; + } + + return 0; +} + +void phytmac_pcs_link_up(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "pcs link up, interface:%s, speed:%d, duplex:%d\n", + phy_modes(pdata->phy_interface), pdata->speed, pdata->duplex); + hw_if->pcs_linkup(pdata, pdata->phy_interface, pdata->speed, pdata->duplex); +} + +static void phytmac_mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + bool rx_pause; + + if (netif_msg_link(pdata)) { + netdev_info(pdata->ndev, "mac config interface=%s, mode=%d\n", + phy_modes(state->interface), mode); + } + + pdata->speed = state->speed; + pdata->duplex = state->duplex; + rx_pause = !!(state->pause & MLO_PAUSE_RX); + + spin_lock_irqsave(&pdata->lock, flags); + hw_if->mac_config(pdata, mode, state); + + if (rx_pause != pdata->pause) { + hw_if->enable_pause(pdata, rx_pause); + pdata->pause = rx_pause; + } + spin_unlock_irqrestore(&pdata->lock, flags); +} + +static void phytmac_mac_an_restart(struct net_device *ndev) +{ +} + +static void phytmac_mac_link_down(struct net_device *ndev, unsigned int mode, + phy_interface_t interface) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned int q; + unsigned long flags; + struct phytmac_tx_skb *tx_skb; + int i; + + if (netif_msg_link(pdata)) { + netdev_info(ndev, "link down interface:%s, mode=%d\n", + phy_modes(interface), mode); + } + + if (pdata->use_ncsi) + ncsi_stop_dev(pdata->ncsidev); + + spin_lock_irqsave(&pdata->lock, flags); + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + hw_if->disable_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + hw_if->clear_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + } + + /* Disable Rx and Tx */ + hw_if->enable_network(pdata, false, PHYTMAC_RX | PHYTMAC_TX); + + /* Tx clean */ + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + for (i = 0; i < pdata->tx_ring_size; i++) { + tx_skb = phytmac_get_tx_skb(queue, i); + if (tx_skb) + phytmac_tx_unmap(pdata, tx_skb, 0); + } + } + + spin_unlock_irqrestore(&pdata->lock, flags); + + netif_tx_stop_all_queues(ndev); +} + +static void phytmac_mac_link_up(struct net_device *ndev, + unsigned int mode, phy_interface_t interface, + struct phy_device *phy) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct phytmac_queue *queue; + unsigned long flags; + unsigned int q; + int ret; + + if (netif_msg_link(pdata)) + netdev_info(pdata->ndev, "link up interface:%s, speed:%d, duplex:%s\n", + phy_modes(interface), pdata->speed, pdata->duplex ? + "full-duplex" : "half-duplex"); + + spin_lock_irqsave(&pdata->lock, flags); + + hw_if->mac_linkup(pdata, interface, pdata->speed, pdata->duplex); + + phytmac_init_ring(pdata); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) + hw_if->enable_irq(pdata, queue->index, pdata->rx_irq_mask | pdata->tx_irq_mask); + + /* Enable Rx and Tx */ + hw_if->enable_network(pdata, true, PHYTMAC_RX | PHYTMAC_TX); + spin_unlock_irqrestore(&pdata->lock, flags); + + if (pdata->use_ncsi) { + /* Start the NCSI device */ + ret = ncsi_start_dev(pdata->ncsidev); + if (ret) { + netdev_err(pdata->ndev, "Ncsi start dev failed (error %d)\n", ret); + return; + } + } + + netif_tx_wake_all_queues(ndev); +} + +int phytmac_mdio_register(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + int ret; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + ret = -ENOMEM; + goto err_out; + } + + pdata->mii_bus->name = "phytmac_mii_bus"; + pdata->mii_bus->read = &phytmac_mdio_read; + pdata->mii_bus->write = &phytmac_mdio_write; + + if (pdata->platdev) { + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%s", + pdata->mii_bus->name, pdata->platdev->name); + } else if (pdata->pcidev) { + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%s", + pdata->mii_bus->name, pci_name(pdata->pcidev)); + } else { + ret = -ENOMEM; + goto free_mdio; + } + + pdata->mii_bus->priv = pdata; + pdata->mii_bus->parent = pdata->dev; + + hw_if->enable_mdio_control(pdata, 1); + + return mdiobus_register(pdata->mii_bus); +free_mdio: + mdiobus_free(pdata->mii_bus); + pdata->mii_bus = NULL; + +err_out: + return ret; +} + +static void phytmac_pcs_get_state(struct net_device *ndev, + struct phylink_link_state *state) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + state->link = hw_if->get_link(pdata, state->interface); +} + +static void phytmac_validate(struct net_device *ndev, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct phytmac *pdata = netdev_priv(ndev); + + if (state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_1000BASEX && + state->interface != PHY_INTERFACE_MODE_2500BASEX && + state->interface != PHY_INTERFACE_MODE_USXGMII && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Asym_Pause); + + if (state->interface == PHY_INTERFACE_MODE_USXGMII) { + pdata->speed = state->speed; + pdata->duplex = state->duplex; + if (pdata->speed == SPEED_5000) { + phylink_set(mask, 5000baseT_Full); + } else { + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseT_Full); + } + } + + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + phylink_set(mask, 2500baseX_Full); + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + phylink_set(mask, 1000baseX_Full); + + if (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_rgmii(state->interface)) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static const struct phylink_mac_ops phytmac_phylink_ops = { + .validate = phytmac_validate, + .mac_an_restart = phytmac_mac_an_restart, + .mac_config = phytmac_mac_config, + .mac_link_down = phytmac_mac_link_down, + .mac_link_up = phytmac_mac_link_up, +}; + +static int phytmac_phylink_create(struct phytmac *pdata) +{ + struct fwnode_handle *fw_node = dev_fwnode(pdata->dev); + + pdata->phylink = phylink_create(pdata->ndev, fw_node, + pdata->phy_interface, &phytmac_phylink_ops); + if (IS_ERR(pdata->phylink)) { + dev_err(pdata->dev, "Could not create a phylink instance (%ld)\n", + PTR_ERR(pdata->phylink)); + return PTR_ERR(pdata->phylink); + } + + if (pdata->phy_interface == PHY_INTERFACE_MODE_SGMII || + pdata->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + pdata->phy_interface == PHY_INTERFACE_MODE_2500BASEX || + pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII) + phylink_fixed_state_cb(pdata->phylink, &phytmac_pcs_get_state); + + return 0; +} + +static int phytmac_open(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned int q = 0; + int ret; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "open\n"); + + /* phytmac_powerup */ + if (pdata->power_state == PHYTMAC_POWEROFF) + hw_if->poweron(pdata, PHYTMAC_POWERON); + + if (hw_if->init_msg_ring) + hw_if->init_msg_ring(pdata); + + ret = hw_if->get_feature(pdata); + if (ret) { + netdev_err(ndev, "phytmac get features failed\n"); + return ret; + } + + hw_if->reset_hw(pdata); + + ret = netif_set_real_num_tx_queues(ndev, pdata->queues_num); + if (ret) { + netdev_err(ndev, "error setting real tx queue number\n"); + return ret; + } + ret = netif_set_real_num_rx_queues(ndev, pdata->queues_num); + if (ret) { + netdev_err(ndev, "error setting real tx queue number\n"); + return ret; + } + + /* RX buffers initialization */ + ret = phytmac_alloc_resource(pdata); + if (ret) { + netdev_err(ndev, "Unable to allocate DMA memory (error %d)\n", + ret); + goto reset_hw; + } + + for (queue = pdata->queues; q < pdata->queues_num; ++q) { + napi_enable(&queue->tx_napi); + napi_enable(&queue->rx_napi); + ++queue; + } + + hw_if->init_hw(pdata); + + ret = phytmac_phylink_connect(pdata); + if (ret) { + netdev_err(ndev, "phylink connect failed,(error %d)\n", + ret); + goto reset_hw; + } + + phylink_start(pdata->phylink); + phytmac_set_bios_wol_enable(pdata, pdata->wol ? 1 : 0); + + phytmac_pcs_link_up(pdata); + + netif_tx_start_all_queues(pdata->ndev); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) { + ret = phytmac_ptp_register(pdata); + if (ret) { + netdev_err(ndev, "ptp register failed, (error %d)\n", + ret); + goto reset_hw; + } + + phytmac_ptp_init(pdata->ndev); + } + + return 0; + +reset_hw: + hw_if->reset_hw(pdata); + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + ++queue; + } + phytmac_free_resource(pdata); + return ret; +} + +static int phytmac_close(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + unsigned int q; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "close"); + + netif_tx_stop_all_queues(ndev); + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + } + + phylink_stop(pdata->phylink); + phylink_disconnect_phy(pdata->phylink); + + netif_carrier_off(ndev); + + spin_lock_irqsave(&pdata->lock, flags); + hw_if->reset_hw(pdata); + spin_unlock_irqrestore(&pdata->lock, flags); + + phytmac_free_resource(pdata); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + phytmac_ptp_unregister(pdata); + + /* phytmac_powerup */ + if (pdata->power_state == PHYTMAC_POWERON) + hw_if->poweron(pdata, PHYTMAC_POWEROFF); + + return 0; +} + +static int phytmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct phytmac *pdata = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + ret = phylink_mii_ioctl(pdata->phylink, rq, cmd); + break; +#ifdef CONFIG_PHYTMAC_ENABLE_PTP + case SIOCSHWTSTAMP: + ret = phytmac_ptp_set_ts_config(dev, rq, cmd); + break; + case SIOCGHWTSTAMP: + ret = phytmac_ptp_get_ts_config(dev, rq); + break; +#endif + default: + break; + } + + return ret; +} + +static inline int phytmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct phytmac *pdata = netdev_priv(netdev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + netdev_features_t changed = features ^ netdev->features; + + /* TX checksum offload */ + if (changed & NETIF_F_HW_CSUM) { + if (features & NETIF_F_HW_CSUM) + hw_if->enable_tx_csum(pdata, 1); + else + hw_if->enable_tx_csum(pdata, 0); + } + + /* RX checksum offload */ + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM && + !(netdev->flags & IFF_PROMISC)) + hw_if->enable_rx_csum(pdata, 1); + else + hw_if->enable_rx_csum(pdata, 0); + } + return 0; +} + +static netdev_features_t phytmac_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int nr_frags, f; + unsigned int hdrlen; + + /* there is only one buffer or protocol is not UDP */ + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) + return features; + + /* length of header */ + hdrlen = skb_transport_offset(skb); + + if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, PHYTMAC_TX_LEN_ALIGN)) + return features & ~NETIF_F_TSO; + + nr_frags = skb_shinfo(skb)->nr_frags; + /* No need to check last fragment */ + nr_frags--; + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + if (!IS_ALIGNED(skb_frag_size(frag), PHYTMAC_TX_LEN_ALIGN)) + return features & ~NETIF_F_TSO; + } + return features; +} + +void phytmac_set_bios_wol_enable(struct phytmac *pdata, u32 wol) +{ + struct net_device *ndev = pdata->ndev; + + if (ndev->phydev) { +#ifdef CONFIG_ACPI + if (has_acpi_companion(pdata->dev)) { + acpi_handle handle = ACPI_HANDLE(pdata->dev); + + if (acpi_has_method(handle, "PWOL")) { + union acpi_object args[] = { + { .type = ACPI_TYPE_INTEGER, }, + }; + struct acpi_object_list arg_input = { + .pointer = args, + .count = ARRAY_SIZE(args), + }; + acpi_status status; + + /* Set the input parameters */ + args[0].integer.value = wol; + + status = acpi_evaluate_object(handle, "PWOL", &arg_input, NULL); + if (ACPI_FAILURE(status)) + netdev_err(ndev, "The PWOL method failed to be executed.\n"); + } + } +#endif + } +} + +int phytmac_reset_ringsize(struct phytmac *pdata, u32 rx_size, u32 tx_size) +{ + int ret = 0; + int reset = 0; + + if (netif_running(pdata->ndev)) { + reset = 1; + phytmac_close(pdata->ndev); + } + + pdata->rx_ring_size = rx_size; + pdata->tx_ring_size = tx_size; + + if (reset) + phytmac_open(pdata->ndev); + + return ret; +} + +static const struct net_device_ops phytmac_netdev_ops = { + .ndo_open = phytmac_open, + .ndo_stop = phytmac_close, + .ndo_start_xmit = phytmac_start_xmit, + .ndo_set_rx_mode = phytmac_set_rx_mode, + .ndo_get_stats = phytmac_get_stats, + .ndo_do_ioctl = phytmac_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = phytmac_change_mtu, + .ndo_set_mac_address = phytmac_set_mac_address, + .ndo_set_features = phytmac_set_features, + .ndo_features_check = phytmac_features_check, + .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid, +}; + +static int phytmac_init(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + unsigned int q; + struct phytmac_queue *queue; + int ret; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "phytmac init !\n"); + + spin_lock_init(&pdata->lock); + + /* set the queue register mapping once for all: queue0 has a special + * register mapping but we don't want to test the queue index then + * compute the corresponding register offset at run time. + */ + for (q = 0; q < pdata->queues_num; ++q) { + queue = &pdata->queues[q]; + queue->pdata = pdata; + queue->index = q; + spin_lock_init(&queue->tx_lock); + + netif_napi_add(ndev, &queue->tx_napi, phytmac_tx_poll, NAPI_POLL_WEIGHT); + netif_napi_add(ndev, &queue->rx_napi, phytmac_rx_poll, NAPI_POLL_WEIGHT); + + if (pdata->irq_type == IRQ_TYPE_INT || pdata->irq_type == IRQ_TYPE_MSI) { + queue->irq = pdata->queue_irq[q]; + if (pdata->irq_type == IRQ_TYPE_INT) + ret = devm_request_irq(pdata->dev, queue->irq, phytmac_irq, + IRQF_SHARED, ndev->name, queue); + else + ret = devm_request_irq(pdata->dev, queue->irq, phytmac_irq, + 0, ndev->name, queue); + + if (ret) { + dev_err(pdata->dev, + "Unable to request IRQ %d (error %d)\n", + queue->irq, ret); + return ret; + } + } + } + + if (pdata->irq_type == IRQ_TYPE_INTX) { + ret = devm_request_irq(pdata->dev, pdata->queue_irq[0], phytmac_intx_irq, + IRQF_SHARED, ndev->name, pdata); + if (ret) { + dev_err(pdata->dev, + "Unable to request INTX IRQ %d (error %d)\n", + pdata->queue_irq[0], ret); + return ret; + } + } + + ndev->netdev_ops = &phytmac_netdev_ops; + phytmac_set_ethtool_ops(ndev); + + if (ndev->hw_features & NETIF_F_NTUPLE) { + INIT_LIST_HEAD(&pdata->rx_fs_list.list); + pdata->rx_fs_list.count = 0; + spin_lock_init(&pdata->rx_fs_lock); + } + + device_set_wakeup_enable(pdata->dev, pdata->wol ? 1 : 0); + + return 0; +} + +void phytmac_default_config(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + + pdata->rx_irq_mask = PHYTMAC_RX_INT_FLAGS; + pdata->tx_irq_mask = PHYTMAC_TX_INT_FLAGS; + pdata->tx_ring_size = DEFAULT_TX_RING_SIZE; + pdata->rx_ring_size = DEFAULT_RX_RING_SIZE; + pdata->max_tx_length = PHYTMAC_MAX_TX_LEN; + pdata->min_tx_length = PHYTMAC_MIN_TX_LEN; + pdata->pause = true; + + ndev->hw_features = NETIF_F_SG; + + if (pdata->capacities & PHYTMAC_CAPS_LSO) + ndev->hw_features |= NETIF_F_TSO; + + if (pdata->use_ncsi) { + ndev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } else { + ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + } + + if (pdata->capacities & PHYTMAC_CAPS_SG_DISABLED) + ndev->hw_features &= ~NETIF_F_SG; + + ndev->hw_features |= NETIF_F_NTUPLE; + + ndev->min_mtu = ETH_MIN_MTU; + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + ndev->max_mtu = pdata->jumbo_len - ETH_HLEN - ETH_FCS_LEN; + else + ndev->max_mtu = ETH_DATA_LEN; + + ndev->features = ndev->hw_features; +} + +static void phytmac_ncsi_handler(struct ncsi_dev *nd) +{ + if (unlikely(nd->state != ncsi_dev_state_functional)) + return; + + netdev_dbg(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); +} + +int phytmac_drv_probe(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct device *dev = pdata->dev; + int ret = 0; + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "phytmac drv probe start\n"); + + phytmac_default_config(pdata); + + if (dma_set_mask(dev, DMA_BIT_MASK(40)) || + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40))) { + dev_err(dev, "dma_set_mask or coherent failed\n"); + return 1; + } + + ret = phytmac_init(pdata); + if (ret) + goto err_out; + + if (pdata->use_ncsi) { + pdata->ncsidev = ncsi_register_dev(ndev, phytmac_ncsi_handler); + if (!pdata->ncsidev) + goto err_out; + } + + netif_carrier_off(ndev); + ret = register_netdev(ndev); + if (ret) { + dev_err(pdata->dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + if (pdata->use_mii && !pdata->mii_bus) { + ret = phytmac_mdio_register(pdata); + if (ret) { + netdev_err(ndev, "MDIO bus registration failed\n"); + goto err_out_free_mdiobus; + } + } + + ret = phytmac_phylink_create(pdata); + if (ret) { + netdev_err(ndev, "phytmac phylink create failed, error %d\n", ret); + goto err_phylink_init; + } + + ret = phytmac_get_mac_address(pdata); + if (ret) { + netdev_err(ndev, "phytmac get mac address failed\n"); + goto err_phylink_init; + } + + if (netif_msg_probe(pdata)) + dev_dbg(pdata->dev, "probe successfully! Phytium %s at 0x%08lx irq %d (%pM)\n", + "MAC", ndev->base_addr, ndev->irq, ndev->dev_addr); + + return 0; + +err_phylink_init: + if (pdata->mii_bus) + mdiobus_unregister(pdata->mii_bus); + +err_out_free_mdiobus: + if (pdata->mii_bus) + mdiobus_free(pdata->mii_bus); + + unregister_netdev(ndev); + +err_out: + return ret; +} +EXPORT_SYMBOL_GPL(phytmac_drv_probe); + +int phytmac_drv_remove(struct phytmac *pdata) +{ + struct net_device *ndev = pdata->ndev; + + if (ndev) { + if (pdata->use_ncsi && pdata->ncsidev) + ncsi_unregister_dev(pdata->ncsidev); + + unregister_netdev(ndev); + + if (pdata->use_mii && pdata->mii_bus) { + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + } + + if (pdata->phylink) + phylink_destroy(pdata->phylink); + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_remove); + +int phytmac_drv_suspend(struct phytmac *pdata) +{ + int q; + unsigned long flags; + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (!netif_running(pdata->ndev)) + return 0; + + if (pdata->power_state == PHYTMAC_POWEROFF) + return 0; + + netif_carrier_off(pdata->ndev); + netif_device_detach(pdata->ndev); + + /* napi_disable */ + for (q = 0, queue = pdata->queues; q < pdata->queues_num; + ++q, ++queue) { + napi_disable(&queue->tx_napi); + napi_disable(&queue->rx_napi); + } + + if (pdata->wol) { + hw_if->set_wol(pdata, pdata->wol); + } else { + rtnl_lock(); + phylink_stop(pdata->phylink); + rtnl_unlock(); + spin_lock_irqsave(&pdata->lock, flags); + hw_if->reset_hw(pdata); + hw_if->poweron(pdata, PHYTMAC_POWEROFF); + spin_unlock_irqrestore(&pdata->lock, flags); + } + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_suspend); + +int phytmac_drv_resume(struct phytmac *pdata) +{ + int q; + struct phytmac_queue *queue; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct ethtool_rx_fs_item *item; + + if (!netif_running(pdata->ndev)) + return 0; + + if (pdata->power_state == PHYTMAC_POWEROFF) + hw_if->poweron(pdata, PHYTMAC_POWERON); + + if (hw_if->init_msg_ring) + hw_if->init_msg_ring(pdata); + + if (pdata->wol) { + hw_if->set_wol(pdata, 0); + rtnl_lock(); + phylink_stop(pdata->phylink); + rtnl_unlock(); + } + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; + ++q, ++queue) { + napi_enable(&queue->tx_napi); + napi_enable(&queue->rx_napi); + } + + hw_if->init_hw(pdata); + phytmac_set_rx_mode(pdata->ndev); + phytmac_set_features(pdata->ndev, pdata->ndev->features); + list_for_each_entry(item, &pdata->rx_fs_list.list, list) + hw_if->add_fdir_entry(pdata, &item->fs); + + rtnl_lock(); + phylink_start(pdata->phylink); + rtnl_unlock(); + + netif_device_attach(pdata->ndev); + + return 0; +} +EXPORT_SYMBOL_GPL(phytmac_drv_resume); + +struct phytmac *phytmac_alloc_pdata(struct device *dev) +{ + struct phytmac *pdata; + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct phytmac), + PHYTMAC_MAX_QUEUES); + if (!netdev) { + dev_err(dev, "alloc_etherdev_mq failed\n"); + return ERR_PTR(-ENOMEM); + } + SET_NETDEV_DEV(netdev, dev); + pdata = netdev_priv(netdev); + pdata->ndev = netdev; + pdata->dev = dev; + + spin_lock_init(&pdata->lock); + spin_lock_init(&pdata->msg_lock); + spin_lock_init(&pdata->ts_clk_lock); + pdata->msg_enable = netif_msg_init(debug, PHYTMAC_DEFAULT_MSG_ENABLE); + + return pdata; +} +EXPORT_SYMBOL_GPL(phytmac_alloc_pdata); + +void phytmac_free_pdata(struct phytmac *pdata) +{ + struct net_device *netdev = pdata->ndev; + + free_netdev(netdev); +} +EXPORT_SYMBOL_GPL(phytmac_free_pdata); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium Ethernet driver"); +MODULE_AUTHOR("Wenting Song"); +MODULE_ALIAS("platform:phytmac"); +MODULE_VERSION(PHYTMAC_DRIVER_VERSION); diff --git a/drivers/net/ethernet/phytium/phytmac_pci.c b/drivers/net/ethernet/phytium/phytmac_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..dd0774cb773883584acb996c5397cfd0a6178e04 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_pci.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium GMAC PCI wrapper. + * + */ + +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" + +#define PCI_DEVICE_ID_GMAC 0xDC3B +#define PCI_SUBDEVICE_ID_SGMII 0x1000 +#define PCI_SUBDEVICE_ID_1000BASEX 0x1001 +#define PCI_SUBDEVICE_ID_2500BASEX 0x1002 +#define PCI_SUBDEVICE_ID_5GBASER 0x1003 +#define PCI_SUBDEVICE_ID_USXGMII 0x1004 +#define PCI_SUBDEVICE_ID_10GBASER 0x1005 + +struct phytmac_data { + struct phytmac_hw_if *hw_if; + u32 caps; + u32 tsu_rate; + u16 queue_num; + int speed; + bool duplex; + bool use_mii; + bool use_ncsi; + phy_interface_t interface; + const struct property_entry *properties; +}; + +static const u32 fixedlink[][5] = { + {0, 1, 1000, 1, 0}, + {0, 1, 2500, 1, 0}, + {0, 1, 5000, 1, 0}, + {0, 1, 10000, 1, 0}, +}; + +static const struct property_entry fl_properties[][2] = { + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[0]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[1]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[2]), {} }, + {PROPERTY_ENTRY_U32_ARRAY("fixed-link", fixedlink[3]), {} }, +}; + +static int phytmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct phytmac_data *data = (struct phytmac_data *)id->driver_data; + struct phytmac *pdata; + struct device *dev = &pdev->dev; + void __iomem * const *iomap_table; + int bar_mask; + int ret, i; + + pdata = phytmac_alloc_pdata(dev); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto err_alloc; + } + + pdata->pcidev = pdev; + pci_set_drvdata(pdev, pdata); + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "pcim_enable_device failed\n"); + goto err_pci_enable; + } + + /* Obtain the mmio areas for the device */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + ret = pcim_iomap_regions(pdev, bar_mask, PHYTMAC_DRV_NAME); + if (ret) { + dev_err(dev, "pcim_iomap_regions failed\n"); + goto err_pci_enable; + } + + iomap_table = pcim_iomap_table(pdev); + if (!iomap_table) { + dev_err(dev, "pcim_iomap_table failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pdata->mac_regs = iomap_table[0]; + if (!pdata->mac_regs) { + dev_err(dev, "xgmac ioremap failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pdata->msg_regs = iomap_table[1]; + if (!pdata->msg_regs) { + dev_err(dev, "xpcs ioremap failed\n"); + ret = -ENOMEM; + goto err_pci_enable; + } + + pci_set_master(pdev); + + /* para */ + pdata->dma_burst_length = DEFAULT_DMA_BURST_LENGTH; + pdata->jumbo_len = DEFAULT_DMA_BURST_LENGTH; + pdata->wol |= PHYTMAC_WAKE_MAGIC; + pdata->use_ncsi = data->use_ncsi; + pdata->use_mii = data->use_mii; + pdata->phy_interface = data->interface; + pdata->queues_num = data->queue_num; + pdata->capacities = data->caps; + pdata->hw_if = data->hw_if; + + /* irq */ + ret = pci_alloc_irq_vectors(pdev, pdata->queues_num, pdata->queues_num, PCI_IRQ_MSI); + if (ret < 0) { + pdata->irq_type = IRQ_TYPE_INTX; + pdata->queue_irq[0] = pdev->irq; + } else { + pdata->irq_type = IRQ_TYPE_MSI; + for (i = 0; i < pdata->queues_num; i++) + pdata->queue_irq[i] = pci_irq_vector(pdev, i); + } + + /* Configure the netdev resource */ + ret = phytmac_drv_probe(pdata); + if (ret) + goto err_irq_vectors; + + netdev_notice(pdata->ndev, "net device enabled\n"); + + return 0; + +err_irq_vectors: + pci_free_irq_vectors(pdata->pcidev); + +err_pci_enable: + phytmac_free_pdata(pdata); + +err_alloc: + dev_notice(dev, "net device not enabled\n"); + + return ret; +} + +static void phytmac_pci_remove(struct pci_dev *pdev) +{ + struct phytmac *pdata = pci_get_drvdata(pdev); + int i = 0; + int bar_mask; + + phytmac_drv_remove(pdata); + + for (i = 0; i < pdata->queues_num; i++) + free_irq(pci_irq_vector(pdev, i), &pdata->queues[i]); + pci_free_irq_vectors(pdev); + + phytmac_free_pdata(pdata); + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); + pcim_iounmap_regions(pdev, bar_mask); + + pci_disable_device(pdev); +} + +static int __maybe_unused phytmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytmac *pdata = pci_get_drvdata(pdev); + int ret; + + ret = phytmac_drv_suspend(pdata); + + return ret; +} + +static int __maybe_unused phytmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct phytmac *pdata = pci_get_drvdata(pdev); + int ret; + + ret = phytmac_drv_resume(pdata); + + return ret; +} + +struct phytmac_data phytmac_sgmii = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = true, + .interface = PHY_INTERFACE_MODE_SGMII, +}; + +struct phytmac_data phytmac_1000basex = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 1000, + .duplex = true, + .interface = PHY_INTERFACE_MODE_1000BASEX, + .properties = fl_properties[0], +}; + +struct phytmac_data phytmac_2500basex = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 2500, + .duplex = true, + .interface = PHY_INTERFACE_MODE_2500BASEX, + .properties = fl_properties[1], +}; + +struct phytmac_data phytmac_usxgmii = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .use_ncsi = false, + .use_mii = false, + .speed = 10000, + .duplex = true, + .interface = PHY_INTERFACE_MODE_USXGMII, + .properties = fl_properties[3], +}; + +static const struct pci_device_id phytmac_pci_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_SGMII), + .driver_data = (kernel_ulong_t)&phytmac_sgmii}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_1000BASEX), + .driver_data = (kernel_ulong_t)&phytmac_1000basex}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_2500BASEX), + .driver_data = (kernel_ulong_t)&phytmac_2500basex}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_USXGMII), + .driver_data = (kernel_ulong_t)&phytmac_usxgmii}, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_PHYTIUM, PCI_DEVICE_ID_GMAC, + PCI_VENDOR_ID_PHYTIUM, PCI_SUBDEVICE_ID_10GBASER), + .driver_data = (kernel_ulong_t)&phytmac_usxgmii}, + /* Last entry must be zero */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, phytmac_pci_table); + +static const struct dev_pm_ops phytmac_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytmac_pci_suspend, phytmac_pci_resume) +}; + +static struct pci_driver phytmac_driver = { + .name = PHYTMAC_DRV_NAME, + .id_table = phytmac_pci_table, + .probe = phytmac_pci_probe, + .remove = phytmac_pci_remove, + .driver = { + .pm = &phytmac_pci_pm_ops, + } +}; + +module_pci_driver(phytmac_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium NIC PCI wrapper"); +MODULE_VERSION(PHYTMAC_DRIVER_VERSION); diff --git a/drivers/net/ethernet/phytium/phytmac_platform.c b/drivers/net/ethernet/phytium/phytmac_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..de69c360c4ae3c4100ae75cecde60b10a1fae9ae --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_platform.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Phytium GMAC Platform wrapper. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" +#include "phytmac_v2.h" + +static const struct phytmac_config phytium_1p0_config = { + .hw_if = &phytmac_1p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_START + | PHYTMAC_CAPS_JUMBO + | PHYTMAC_CAPS_LSO, + .queue_num = 4, + .tsu_rate = 300000000, +}; + +static const struct phytmac_config phytium_2p0_config = { + .hw_if = &phytmac_2p0_hw, + .caps = PHYTMAC_CAPS_TAILPTR + | PHYTMAC_CAPS_LPI + | PHYTMAC_CAPS_LSO + | PHYTMAC_CAPS_MSG + | PHYTMAC_CAPS_JUMBO, + .queue_num = 2, + .tsu_rate = 300000000, +}; + +#if defined(CONFIG_OF) +static const struct of_device_id phytmac_dt_ids[] = { + { .compatible = "phytium,gmac-1.0", .data = &phytium_1p0_config }, + { .compatible = "phytium,gmac-2.0", .data = &phytium_2p0_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, phytmac_dt_ids); +#endif /* CONFIG_OF */ + +#ifdef CONFIG_ACPI +static const struct acpi_device_id phytmac_acpi_ids[] = { + { .id = "PHYT0046", .driver_data = (kernel_ulong_t)&phytium_1p0_config }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, phytmac_acpi_ids); +#else +#define phytmac_acpi_ids NULL +#endif + +static int phytmac_get_phy_mode(struct platform_device *pdev) +{ + const char *pm; + int err, i; + + err = device_property_read_string(&pdev->dev, "phy-mode", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(pm, phy_modes(i))) + return i; + } + + return -ENODEV; +} + +static int phytmac_plat_probe(struct platform_device *pdev) +{ + const struct phytmac_config *phytmac_config = &phytium_1p0_config; + struct device_node *np = pdev->dev.of_node; + struct resource *regs; + struct phytmac *pdata; + int ret = 0; + int i; + u32 queue_num; + + pdata = phytmac_alloc_pdata(&pdev->dev); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto err_alloc; + } + + platform_set_drvdata(pdev, pdata); + + pdata->platdev = pdev; + + if (pdev->dev.of_node) { + const struct of_device_id *match; + + match = of_match_node(phytmac_dt_ids, np); + if (match && match->data) { + phytmac_config = match->data; + pdata->hw_if = phytmac_config->hw_if; + pdata->capacities = phytmac_config->caps; + pdata->queues_max_num = phytmac_config->queue_num; + } + } else if (has_acpi_companion(&pdev->dev)) { + const struct acpi_device_id *match; + + match = acpi_match_device(phytmac_acpi_ids, &pdev->dev); + if (match && match->driver_data) { + phytmac_config = (void *)match->driver_data; + pdata->hw_if = phytmac_config->hw_if; + pdata->capacities = phytmac_config->caps; + pdata->queues_max_num = phytmac_config->queue_num; + } + } + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->mac_regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(pdata->mac_regs)) { + dev_err(&pdev->dev, "mac_regs ioremap failed\n"); + ret = PTR_ERR(pdata->mac_regs); + goto err_mem; + } + pdata->ndev->base_addr = regs->start; + + i = 0; + if (pdata->capacities & PHYTMAC_CAPS_MSG) { + ++i; + regs = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (regs) { + pdata->msg_regs = ioremap_wt(regs->start, MEMORY_SIZE); + if (!pdata->msg_regs) { + dev_err(&pdev->dev, "msg_regs ioremap failed, i=%d\n", i); + goto err_mem; + } + } + } + + if (device_property_read_bool(&pdev->dev, "lpi")) + pdata->capacities |= PHYTMAC_CAPS_LPI; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + /* lpi resource */ + ++i; + regs = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (regs) { + pdata->mhu_regs = ioremap(regs->start, MHU_SIZE); + if (!pdata->mhu_regs) + dev_err(&pdev->dev, "mhu regs ioremap failed, i=%d\n", i); + } + } + + if (device_property_read_u32(&pdev->dev, "dma-burst-length", &pdata->dma_burst_length)) + pdata->dma_burst_length = DEFAULT_DMA_BURST_LENGTH; + + if (device_property_read_u32(&pdev->dev, "jumbo-max-length", &pdata->jumbo_len)) + pdata->jumbo_len = DEFAULT_JUMBO_MAX_LENGTH; + + if (device_property_read_u32(&pdev->dev, "queue-number", &queue_num)) + pdata->queues_num = pdata->queues_max_num; + else + pdata->queues_num = queue_num; + + pdata->wol = 0; + if (device_property_read_bool(&pdev->dev, "magic-packet")) + pdata->wol |= PHYTMAC_WAKE_MAGIC; + + pdata->use_ncsi = device_property_read_bool(&pdev->dev, "use-ncsi"); + pdata->use_mii = device_property_read_bool(&pdev->dev, "use-mii"); + + pdata->power_state = PHYTMAC_POWEROFF; + + device_set_wakeup_capable(&pdev->dev, pdata->wol & PHYTMAC_WOL_MAGIC_PACKET); + + for (i = 0; i < pdata->queues_num; i++) { + pdata->irq_type = IRQ_TYPE_INT; + pdata->queue_irq[i] = platform_get_irq(pdev, i); + } + + ret = phytmac_get_phy_mode(pdev); + if (ret < 0) + pdata->phy_interface = PHY_INTERFACE_MODE_MII; + else + pdata->phy_interface = ret; + + ret = phytmac_drv_probe(pdata); + if (ret) + goto err_mem; + + if (netif_msg_probe(pdata)) { + dev_notice(&pdev->dev, "phytium net device enabled\n"); + dev_dbg(pdata->dev, "use_ncsi:%d, use_mii:%d, wol:%d, queues_num:%d\n", + pdata->use_ncsi, pdata->use_mii, pdata->wol, pdata->queues_num); + } + + return 0; + +err_mem: + phytmac_free_pdata(pdata); + +err_alloc: + dev_err(&pdev->dev, "phytium net device not enabled\n"); + + return ret; +} + +static int phytmac_plat_remove(struct platform_device *pdev) +{ + struct phytmac *pdata = platform_get_drvdata(pdev); + + phytmac_drv_remove(pdata); + phytmac_free_pdata(pdata); + + return 0; +} + +static int __maybe_unused phytmac_plat_suspend(struct device *dev) +{ + struct phytmac *pdata = dev_get_drvdata(dev); + int ret; + + ret = phytmac_drv_suspend(pdata); + + return ret; +} + +static int __maybe_unused phytmac_plat_resume(struct device *dev) +{ + struct phytmac *pdata = dev_get_drvdata(dev); + int ret; + + ret = phytmac_drv_resume(pdata); + + return ret; +} + +static const struct dev_pm_ops phytmac_plat_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytmac_plat_suspend, phytmac_plat_resume) +}; + +static struct platform_driver phytmac_driver = { + .probe = phytmac_plat_probe, + .remove = phytmac_plat_remove, + .driver = { + .name = PHYTMAC_DRV_NAME, + .of_match_table = of_match_ptr(phytmac_dt_ids), + .acpi_match_table = phytmac_acpi_ids, + .pm = &phytmac_plat_pm_ops, + }, +}; + +module_platform_driver(phytmac_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Phytium Ethernet driver"); +MODULE_AUTHOR("Wenting Song"); +MODULE_ALIAS("platform:phytmac"); +MODULE_VERSION(PHYTMAC_DRIVER_VERSION); diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.c b/drivers/net/ethernet/phytium/phytmac_ptp.c new file mode 100644 index 0000000000000000000000000000000000000000..7a2803e1c4904bc92a82075658e396cfd379b070 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ptp.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/** + * 1588 PTP support for Phytium GMAC device. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_ptp.h" + +static int phytmac_ptp_parse_header(struct sk_buff *skb, unsigned int type, + u8 *p_flag_field, u8 *p_msgtype) +{ + unsigned int offset = 0; + u8 *data = skb_mac_header(skb); + + switch (type & PTP_CLASS_VMASK) { + case PTP_CLASS_V1: + case PTP_CLASS_V2: + break; + default: + return -ERANGE; + } + + if (type & PTP_CLASS_VLAN) + offset += VLAN_HLEN; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + offset += ETH_HLEN; + break; + default: + return -ERANGE; + } + + /* Ensure that the entire header is present in this packet. */ + /* PTP header is 34 bytes. */ + if (offset + 34 > skb->len) + return -EINVAL; + + *p_flag_field = data[offset + 6]; + *p_msgtype = data[offset] & 0x0f; + + return 0; +} + +bool phytmac_ptp_one_step(struct sk_buff *skb) +{ + unsigned int ptp_class; + u8 msgtype; + u8 flag_field; + int ret = 0; + + /* No need to parse packet if PTP TS is not involved */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + goto not_oss; + + /* Identify and return whether PTP one step sync is being processed */ + ptp_class = ptp_classify_raw(skb); + if (ptp_class == PTP_CLASS_NONE) + goto not_oss; + + ret = phytmac_ptp_parse_header(skb, ptp_class, &flag_field, &msgtype); + if (ret) + goto not_oss; + + if (flag_field & 0x2) + goto not_oss; + + if (msgtype == PTP_MSGTYPE_SYNC) + return true; + +not_oss: + return false; +} + +int phytmac_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + + spin_lock_irqsave(&pdata->ts_clk_lock, flags); + hw_if->get_time(pdata, ts); + spin_unlock_irqrestore(&pdata->ts_clk_lock, flags); + + return 0; +} + +int phytmac_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + unsigned long flags; + + spin_lock_irqsave(&pdata->ts_clk_lock, flags); + hw_if->set_time(pdata, ts->tv_sec, ts->tv_nsec); + spin_unlock_irqrestore(&pdata->ts_clk_lock, flags); + + return 0; +} + +int phytmac_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + bool negative = false; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm = -scaled_ppm; + } + + hw_if->adjust_fine(pdata, scaled_ppm, negative); + return 0; +} + +int phytmac_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct phytmac *pdata = container_of(ptp, struct phytmac, ptp_clock_info); + struct phytmac_hw_if *hw_if = pdata->hw_if; + int negative = 0; + + if (delta < 0) { + negative = 1; + delta = -delta; + } + + spin_lock_irq(&pdata->ts_clk_lock); + hw_if->adjust_time(pdata, delta, negative); + spin_unlock_irq(&pdata->ts_clk_lock); + + return 0; +} + +int phytmac_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +void phytmac_ptp_init_timer(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + u32 rem = 0; + u64 adj; + + pdata->ts_rate = hw_if->get_ts_rate(pdata); + pdata->ts_incr.ns = div_u64_rem(NSEC_PER_SEC, pdata->ts_rate, &rem); + if (rem) { + adj = rem; + adj <<= 24; + pdata->ts_incr.sub_ns = div_u64(adj, pdata->ts_rate); + } else { + pdata->ts_incr.sub_ns = 0; + } +} + +void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct timespec64 ts; + + if (hw_if->ts_valid(pdata, desc, PHYTMAC_RX)) { + hw_if->get_timestamp(pdata, desc->desc4, desc->desc5, &ts); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + } +} + +int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct timespec64 ts; + struct skb_shared_hwtstamps shhwtstamps; + + if (queue->pdata->ts_config.tx_type == TS_DISABLED) + return -EOPNOTSUPP; + + if (!hw_if->ts_valid(pdata, desc, PHYTMAC_TX)) + return -EINVAL; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + hw_if->get_timestamp(pdata, desc->desc4, desc->desc5, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + + return 0; +} + +int phytmac_ptp_register(struct phytmac *pdata) +{ + pdata->ptp_clock_info.owner = THIS_MODULE; + snprintf(pdata->ptp_clock_info.name, 16, "%s", pdata->ndev->name); + pdata->ptp_clock_info.max_adj = 64000000; /* In PPB */ + pdata->ptp_clock_info.n_alarm = 0; + pdata->ptp_clock_info.n_ext_ts = 0; + pdata->ptp_clock_info.n_per_out = 0; + pdata->ptp_clock_info.pps = 1; + pdata->ptp_clock_info.adjfine = phytmac_ptp_adjfine; + pdata->ptp_clock_info.adjtime = phytmac_ptp_adjtime; + pdata->ptp_clock_info.gettime64 = phytmac_ptp_gettime; + pdata->ptp_clock_info.settime64 = phytmac_ptp_settime; + pdata->ptp_clock_info.enable = phytmac_ptp_enable; + pdata->ptp_clock = ptp_clock_register(&pdata->ptp_clock_info, pdata->dev); + if (IS_ERR_OR_NULL(pdata->ptp_clock)) { + dev_err(pdata->dev, "ptp_clock_register failed %lu\n", + PTR_ERR(pdata->ptp_clock)); + return -EINVAL; + } + + return 0; +} + +void phytmac_ptp_unregister(struct phytmac *pdata) +{ + struct phytmac_hw_if *hw_if = pdata->hw_if; + + if (pdata->ptp_clock) + ptp_clock_unregister(pdata->ptp_clock); + pdata->ptp_clock = NULL; + + hw_if->clear_time(pdata); + + dev_info(pdata->dev, "phytmac ptp clock unregistered.\n"); +} + +void phytmac_ptp_init(struct net_device *ndev) +{ + struct phytmac *pdata = netdev_priv(ndev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + + phytmac_ptp_init_timer(pdata); + + hw_if->init_ts_hw(pdata); + + dev_info(pdata->dev, "phytmac ptp clock init success.\n"); +} + +int phytmac_ptp_get_ts_config(struct net_device *dev, struct ifreq *rq) +{ + struct hwtstamp_config *tstamp_config; + struct phytmac *pdata = netdev_priv(dev); + + if (!IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + return -EOPNOTSUPP; + + tstamp_config = &pdata->ts_config; + + if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config))) + return -EFAULT; + else + return 0; +} + +int phytmac_ptp_set_ts_config(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config config; + struct phytmac *pdata = netdev_priv(dev); + struct phytmac_hw_if *hw_if = pdata->hw_if; + struct ts_ctrl tstamp_ctrl; + int ret; + + memset(&tstamp_ctrl, 0, sizeof(struct ts_ctrl)); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + tstamp_ctrl.one_step = 1; + tstamp_ctrl.tx_control = TS_ALL_FRAMES; + break; + case HWTSTAMP_TX_ON: + tstamp_ctrl.one_step = 0; + tstamp_ctrl.tx_control = TS_ALL_FRAMES; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tstamp_ctrl.rx_control = TS_ALL_PTP_FRAMES; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + tstamp_ctrl.rx_control = TS_ALL_FRAMES; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config.rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + ret = hw_if->set_ts_config(pdata, &tstamp_ctrl); + if (ret) + return ret; + + /* save these settings for future reference */ + pdata->ts_config = config; + memcpy(&pdata->ts_config, &config, sizeof(config)); + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + else + return 0; +} + diff --git a/drivers/net/ethernet/phytium/phytmac_ptp.h b/drivers/net/ethernet/phytium/phytmac_ptp.h new file mode 100644 index 0000000000000000000000000000000000000000..0ee385e5fe4d0a4e165e15b80afc50dfaff3ae3d --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_ptp.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Phytium Ethernet Controller driver + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _PHYTMAC_PTP_H +#define _PHYTMAC_PTP_H + +#ifdef CONFIG_PHYTMAC_ENABLE_PTP + +enum ptp_msgtype { + PTP_MSGTYPE_SYNC, + PTP_MSGTYPE_DELAY_REQ, + PTP_MSGTYPE_PDELAY_REQ, + PTP_MSGTYPE_PDELAY_RESP, +}; + +bool phytmac_ptp_one_step(struct sk_buff *skb); +int phytmac_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +int phytmac_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts); +int phytmac_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); +int phytmac_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta); +int phytmac_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); +void phytmac_ptp_init_timer(struct phytmac *pdata); +void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc); +int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc); +int phytmac_ptp_register(struct phytmac *pdata); +void phytmac_ptp_unregister(struct phytmac *pdata); +void phytmac_ptp_init(struct net_device *ndev); +int phytmac_ptp_get_ts_config(struct net_device *dev, struct ifreq *rq); +int phytmac_ptp_set_ts_config(struct net_device *dev, struct ifreq *ifr, int cmd); +#else +static inline bool phytmac_ptp_one_step(struct sk_buff *skb) +{ + return 1; +} + +static inline void phytmac_ptp_rxstamp(struct phytmac *pdata, struct sk_buff *skb, + struct phytmac_dma_desc *desc) {} +static inline int phytmac_ptp_txstamp(struct phytmac_queue *queue, struct sk_buff *skb, + struct phytmac_dma_desc *desc) +{ + return -1; +} + +static inline int phytmac_ptp_register(struct phytmac *pdata) +{ + return 0; +} + +static inline void phytmac_ptp_unregister(struct phytmac *pdata) {} +static inline void phytmac_ptp_init(struct net_device *ndev) {} + +#endif +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_v1.c b/drivers/net/ethernet/phytium/phytmac_v1.c new file mode 100644 index 0000000000000000000000000000000000000000..99314f33754b13c1e44ff3cd3701bbc3a0340a9a --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v1.c @@ -0,0 +1,1400 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v1.h" + +static int phytmac_enable_promise(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(PROMISC); + else + value &= ~PHYTMAC_BIT(PROMISC); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + + return 0; +} + +static int phytmac_enable_multicast(struct phytmac *pdata, int enable) +{ + u32 config; + + if (enable) { + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, -1); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config |= PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, 0); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config &= ~PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + } + + return 0; +} + +static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) +{ + u32 config; + + PHYTMAC_WRITE(pdata, PHYTMAC_HASHB, mc_filter[0]); + PHYTMAC_WRITE(pdata, PHYTMAC_HASHT, mc_filter[1]); + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + config |= PHYTMAC_BIT(MH_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + return 0; +} + +static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(RCO_EN); + else + value &= ~PHYTMAC_BIT(RCO_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + + return 0; +} + +static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + + if (enable) + value |= PHYTMAC_BIT(TCO_EN); + else + value &= ~PHYTMAC_BIT(TCO_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, value); + + return 0; +} + +static int phytmac_enable_mdio(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + + if (enable) + value |= PHYTMAC_BIT(MPE); + else + value &= ~PHYTMAC_BIT(MPE); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, value); + + return 0; +} + +static int phytmac_enable_pause(struct phytmac *pdata, int enable) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + if (enable) + value |= PHYTMAC_BIT(PAUSE_EN); + else + value &= ~PHYTMAC_BIT(PAUSE_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + return 0; +} + +static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) +{ + u32 old_ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + u32 ctrl; + + ctrl = old_ctrl; + + if (rx_tx & PHYTMAC_TX) { + if (enable) + ctrl |= PHYTMAC_BIT(TE); + else + ctrl &= ~PHYTMAC_BIT(TE); + } + + if (rx_tx & PHYTMAC_RX) { + if (enable) + ctrl |= PHYTMAC_BIT(RE); + else + ctrl &= ~PHYTMAC_BIT(RE); + } + + if (ctrl ^ old_ctrl) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + return 0; +} + +static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) +{ + u32 ctrl = 0; + u32 old_ctrl; + + if (enable) + ctrl |= PHYTMAC_BIT(AUTONEG); + else + ctrl &= ~PHYTMAC_BIT(AUTONEG); + + old_ctrl = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + if (old_ctrl != ctrl) + PHYTMAC_WRITE(pdata, PHYTMAC_PCSCTRL, ctrl); + + return 0; +} + +static int phytmac_pcs_software_reset(struct phytmac *pdata, int reset) +{ + u32 value = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + + if (reset) + value |= PHYTMAC_BIT(PCS_RESET); + else + value &= ~PHYTMAC_BIT(PCS_RESET); + + PHYTMAC_WRITE(pdata, PHYTMAC_PCSCTRL, value); + + return 0; +} + +static int phytmac_mac_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + u32 ctrl, config; + + config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + + config &= ~(PHYTMAC_BIT(SPEED) | PHYTMAC_BIT(FD) | PHYTMAC_BIT(GM_EN)); + + if (speed == SPEED_100) + config |= PHYTMAC_BIT(SPEED); + else if (speed == SPEED_1000 || speed == SPEED_2500) + config |= PHYTMAC_BIT(GM_EN); + + if (duplex) + config |= PHYTMAC_BIT(FD); + + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + if (speed == SPEED_2500) { + ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + ctrl |= PHYTMAC_BIT(2PT5G); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + } + + if (speed == SPEED_10000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_10000M); + else if (speed == SPEED_5000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_5000M); + else if (speed == SPEED_2500) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_2500M); + else if (speed == SPEED_1000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_1000M); + else + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_100M); + + return 0; +} + +static int phytmac_mac_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + u32 config; + + if (interface == PHY_INTERFACE_MODE_USXGMII) { + config = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + if (speed == SPEED_10000) { + config = PHYTMAC_SET_BITS(config, SERDES_RATE, PHYTMAC_SERDES_RATE_10G); + config = PHYTMAC_SET_BITS(config, USX_SPEED, PHYTMAC_SPEED_10000M); + } else if (speed == SPEED_5000) { + config = PHYTMAC_SET_BITS(config, SERDES_RATE, PHYTMAC_SERDES_RATE_5G); + config = PHYTMAC_SET_BITS(config, USX_SPEED, PHYTMAC_SPEED_5000M); + } + + /* reset */ + config &= ~(PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN)); + config |= PHYTMAC_BIT(RX_SYNC_RESET); + + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, config); + + /* enable rx and tx */ + config &= ~(PHYTMAC_BIT(RX_SYNC_RESET)); + config |= PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, config); + } + + return 0; +} + +static int phytmac_pcs_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) +{ + u32 bottom; + u16 top; + + bottom = PHYTMAC_READ(pdata, PHYTMAC_MAC1B); + top = PHYTMAC_READ(pdata, PHYTMAC_MAC1T); + + addr[0] = bottom & 0xff; + addr[1] = (bottom >> 8) & 0xff; + addr[2] = (bottom >> 16) & 0xff; + addr[3] = (bottom >> 24) & 0xff; + addr[4] = top & 0xff; + addr[5] = (top >> 8) & 0xff; + + return 0; +} + +static int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) +{ + u32 bottom; + u16 top; + + bottom = cpu_to_le32(*((u32 *)addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_MAC1B, bottom); + top = cpu_to_le16(*((u16 *)(addr + 4))); + PHYTMAC_WRITE(pdata, PHYTMAC_MAC1T, top); + + return 0; +} + +static void phytmac_reset_hw(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + unsigned int q; + u32 ctrl; + + ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + + ctrl &= ~(PHYTMAC_BIT(RE) | PHYTMAC_BIT(TE)); + ctrl |= PHYTMAC_BIT(CLEARSTAT); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + /* Disable and clear all interrupts and disable queues */ + for (q = 0, queue = pdata->queues; q < pdata->queues_max_num; ++q, ++queue) { + if (q == 0) { + PHYTMAC_WRITE(pdata, PHYTMAC_ID, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_IS, -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR_Q0, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR_Q0, 1); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_IDR(q - 1), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_ISR(q - 1), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR(q - 1), 1); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR(q - 1), 1); + } + + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTRH(q), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), 0); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), 0); + } +} + +static void phytmac_get_regs(struct phytmac *pdata, u32 *reg_buff) +{ + reg_buff[0] = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + reg_buff[1] = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + reg_buff[2] = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + reg_buff[3] = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + reg_buff[4] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR_Q0); + reg_buff[5] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR_Q0); + reg_buff[6] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(1)); + reg_buff[7] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(1)); + reg_buff[8] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(2)); + reg_buff[9] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(2)); + reg_buff[10] = PHYTMAC_READ(pdata, PHYTMAC_TXPTR(3)); + reg_buff[11] = PHYTMAC_READ(pdata, PHYTMAC_RXPTR(3)); + reg_buff[12] = PHYTMAC_READ(pdata, PHYTMAC_HCONFIG); + reg_buff[13] = PHYTMAC_READ(pdata, PHYTMAC_IM); + if (pdata->phy_interface == PHY_INTERFACE_MODE_USXGMII) { + reg_buff[14] = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + reg_buff[15] = PHYTMAC_READ(pdata, PHYTMAC_USXSTATUS); + } else { + reg_buff[14] = PHYTMAC_READ(pdata, PHYTMAC_PCSCTRL); + reg_buff[15] = PHYTMAC_READ(pdata, PHYTMAC_PCSSTATUS); + } +} + +static int phytmac_init_hw(struct phytmac *pdata) +{ + u32 config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + u32 dmaconfig; + u32 nctrlconfig; + + nctrlconfig = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + nctrlconfig |= PHYTMAC_BIT(MPE); + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, nctrlconfig); + + phytmac_set_mac_addr(pdata, pdata->ndev->dev_addr); + + PHYTMAC_WRITE(pdata, PHYTMAC_AXICTRL, 0x1010); + + /* jumbo */ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + config |= PHYTMAC_BIT(JUMBO_EN); + else + config |= PHYTMAC_BIT(RCV_BIG); + /* promisc */ + if (pdata->ndev->flags & IFF_PROMISC) + config |= PHYTMAC_BIT(PROMISC); + if (pdata->ndev->features & NETIF_F_RXCSUM) + config |= PHYTMAC_BIT(RCO_EN); + if (pdata->ndev->flags & IFF_BROADCAST) + config &= ~PHYTMAC_BIT(NO_BCAST); + else + config |= PHYTMAC_BIT(NO_BCAST); + /* pause enable */ + config |= PHYTMAC_BIT(PAUSE_EN); + /* Rx Fcs remove */ + config |= PHYTMAC_BIT(FCS_REMOVE); + if (pdata->dma_data_width == PHYTMAC_DBW_64) + config |= PHYTMAC_BIT(DBW64); + if (pdata->dma_data_width == PHYTMAC_DBW_128) + config |= PHYTMAC_BIT(DBW128); + /* mdc div */ + config = PHYTMAC_SET_BITS(config, MCD, 6); + netdev_dbg(pdata->ndev, "phytmac configure NetConfig with 0x%08x\n", + config); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + /* init dma */ + dmaconfig = PHYTMAC_READ(pdata, PHYTMAC_DCONFIG); + if (pdata->dma_burst_length) + dmaconfig = PHYTMAC_SET_BITS(dmaconfig, BURST, pdata->dma_burst_length); + /* default in small endian */ + dmaconfig &= ~(PHYTMAC_BIT(ENDIA_PKT) | PHYTMAC_BIT(ENDIA_DESC)); + + if (pdata->ndev->features & NETIF_F_HW_CSUM) + dmaconfig |= PHYTMAC_BIT(TCO_EN); + else + dmaconfig &= ~PHYTMAC_BIT(TCO_EN); + + if (pdata->dma_addr_width) + dmaconfig |= PHYTMAC_BIT(ABW); + + /* fdir ethtype -- ipv4 */ + PHYTMAC_WRITE(pdata, PHYTMAC_ETHT(0), (uint16_t)ETH_P_IP); + + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + dmaconfig |= PHYTMAC_BIT(RX_EXBD_EN) | PHYTMAC_BIT(TX_EXBD_EN); + + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, dmaconfig); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_ENABLE, PHYTMAC_BIT(TXTAIL_ENABLE)); + + if (phy_interface_mode_is_8023z(pdata->phy_interface)) + phytmac_pcs_software_reset(pdata, 1); + + return 0; +} + +static int phytmac_powerup_hw(struct phytmac *pdata, int on) +{ + u32 status, data0, data1, rdata1; + int ret; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh status is busy, status=%x\n", status); + + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy, data0=%x\n", data0); + + data0 = 0; + data0 = PHYTMAC_SET_BITS(data0, DATA0_MSG, PHYTMAC_MSG_PM); + data0 = PHYTMAC_SET_BITS(data0, DATA0_PRO, PHYTMAC_PRO_ID); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA0, data0); + data1 = 0; + + if (on == PHYTMAC_POWERON) { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATON); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } else { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATOFF); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } + + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_AP_CPP_SET, 1); + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy"); + + rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); + if (rdata1 == data1) + netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + else + netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + } + pdata->power_state = on; + + return 0; +} + +static int phytmac_set_wake(struct phytmac *pdata, int wake) +{ + u32 value = 0; + + if (wake & PHYTMAC_WAKE_MAGIC) + value |= PHYTMAC_BIT(MAGIC); + if (wake & PHYTMAC_WAKE_ARP) + value |= PHYTMAC_BIT(ARP); + if (wake & PHYTMAC_WAKE_UCAST) + value |= PHYTMAC_BIT(UCAST); + if (wake & PHYTMAC_WAKE_MCAST) + value |= PHYTMAC_BIT(MCAST); + + PHYTMAC_WRITE(pdata, PHYTMAC_WOL, value); + if (wake) { + PHYTMAC_WRITE(pdata, PHYTMAC_IE, PHYTMAC_BIT(WOL_RECEIVE_ENABLE)); + value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG) | PHYTMAC_BIT(IGNORE_RX_FCS); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_ID, PHYTMAC_BIT(WOL_RECEIVE_DISABLE)); + value = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG) & ~PHYTMAC_BIT(IGNORE_RX_FCS); + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, value); + } + + return 0; +} + +static void phytmac_mdio_idle(struct phytmac *pdata) +{ + u32 val; + + /* wait for end of transfer */ + val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + while (!(val & PHYTMAC_BIT(MDIO_IDLE))) { + cpu_relax(); + val = PHYTMAC_READ(pdata, PHYTMAC_NSTATUS); + } +} + +static int phytmac_mdio_data_read(struct phytmac *pdata, int mii_id, int regnum, int is_c45) +{ + u16 data; + + if (is_c45) { + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_ADDR) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_READ) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C22) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C22_READ) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, regnum) + | PHYTMAC_BITS(MUST, 2))); + } + + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDATA) & 0xffff; + phytmac_mdio_idle(pdata); + + return data; +} + +static int phytmac_mdio_data_write(struct phytmac *pdata, int mii_id, + int regnum, int is_c45, u16 data) +{ + if (is_c45) { + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_ADDR) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(DATA, regnum & 0xFFFF) + | PHYTMAC_BITS(MUST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C45) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C45_WRITE) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(DATA, data) + | PHYTMAC_BITS(MUST, 2))); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_MDATA, (PHYTMAC_BITS(CLAUSE_SEL, PHYTMAC_CLAUSE_C22) + | PHYTMAC_BITS(OPS, PHYTMAC_OPS_C22_WRITE) + | PHYTMAC_BITS(PHY_ADDR, mii_id) + | PHYTMAC_BITS(REG_ADDR, regnum) + | PHYTMAC_BITS(DATA, data) + | PHYTMAC_BITS(MUST, 2))); + } + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_get_feature_all(struct phytmac *pdata) +{ + unsigned int queue_mask; + unsigned int num_queues; + int val; + + /* get max queues */ + queue_mask = 0x1; + queue_mask |= PHYTMAC_READ(pdata, PHYTMAC_DEFAULT2) & 0xff; + num_queues = hweight32(queue_mask); + pdata->queues_max_num = num_queues; + + /* get dma desc prefetch number */ + val = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT4, TXDESCRD); + if (val) + pdata->tx_bd_prefetch = (2 << (val - 1)) * + sizeof(struct phytmac_dma_desc); + + val = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT4, RXDESCRD); + if (val) + pdata->rx_bd_prefetch = (2 << (val - 1)) * + sizeof(struct phytmac_dma_desc); + + /* dma bus width */ + pdata->dma_data_width = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT1, DBW); + + /* dma addr width */ + if (PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT2, DAW64)) + pdata->dma_addr_width = 64; + else + pdata->dma_addr_width = 32; + + /* max rx fs */ + pdata->max_rx_fs = PHYTMAC_READ_BITS(pdata, PHYTMAC_DEFAULT3, SCR2CMP); + + if (netif_msg_hw(pdata)) + netdev_info(pdata->ndev, "get feature queue_num:%d, daw:%d, dbw:%d, rx_fs:%d, rx_bd:%d, tx_bd:%d\n", + pdata->queues_num, pdata->dma_addr_width, pdata->dma_data_width, + pdata->max_rx_fs, pdata->rx_bd_prefetch, pdata->tx_bd_prefetch); + return 0; +} + +static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; + u16 index = rx_flow->location; + u32 tmp, fdir_ctrl; + bool ipsrc = false; + bool ipdst = false; + bool port = false; + + tp4sp_v = &rx_flow->h_u.tcp_ip4_spec; + tp4sp_m = &rx_flow->m_u.tcp_ip4_spec; + + if (tp4sp_m->ip4src == 0xFFFFFFFF) { + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, ETHTYPE_SIP_OFFSET); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L2HEAD); + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index), tp4sp_v->ip4src); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index), tmp); + ipsrc = true; + } + + if (tp4sp_m->ip4dst == 0xFFFFFFFF) { + /* 2nd compare reg - IP destination address */ + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, ETHTYPE_DIP_OFFSET); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L2HEAD); + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 1), tp4sp_v->ip4dst); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + 1), tmp); + ipdst = true; + } + + if (tp4sp_m->psrc == 0xFFFF || tp4sp_m->pdst == 0xFFFF) { + tmp = 0; + tmp = PHYTMAC_SET_BITS(tmp, OFFSET_TYPE, PHYTMAC_OFFSET_AFTER_L3HEAD); + if (tp4sp_m->psrc == tp4sp_m->pdst) { + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 1); + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_SPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_v->psrc | (u32)(tp4sp_v->pdst << 16)); + } else { + tmp = PHYTMAC_SET_BITS(tmp, DIS_MASK, 0); + if (tp4sp_m->psrc == 0xFFFF) { /* src port */ + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_SPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_m->psrc | (u32)(tp4sp_v->psrc << 16)); + } else { /* dst port */ + tmp = PHYTMAC_SET_BITS(tmp, OFFSET, IPHEAD_DPORT_OFFSET); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + 2), + tp4sp_m->pdst | (u32)(tp4sp_v->pdst << 16)); + } + } + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + 2), tmp); + port = true; + } + + fdir_ctrl = PHYTMAC_READ(pdata, PHYTMAC_FDIR(rx_flow->location)); + + if (ipsrc) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CA, 3 * index); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CA_EN, 1); + } + + if (ipdst) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CB, 3 * index + 1); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CB_EN, 1); + } + + if (port) { + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CC, 3 * index + 2); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, CC_EN, 1); + } + + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, QUEUE_NUM, (rx_flow->ring_cookie) & 0xFF); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, ETH_TYPE, 0); + fdir_ctrl = PHYTMAC_SET_BITS(fdir_ctrl, ETH_EN, 1); + + PHYTMAC_WRITE(pdata, PHYTMAC_FDIR(rx_flow->location), fdir_ctrl); + + return 0; +} + +static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + int i; + int index = rx_flow->location; + + PHYTMAC_WRITE(pdata, PHYTMAC_FDIR(index), 0); + for (i = 0; i < 3; i++) { + PHYTMAC_WRITE(pdata, PHYTMAC_COMP1(3 * index + i), 0); + PHYTMAC_WRITE(pdata, PHYTMAC_COMP2(3 * index + i), 0); + } + return 0; +} + +static int phytmac_init_ring_hw(struct phytmac *pdata) +{ + struct phytmac_queue *queue; + unsigned int q = 0; + u32 buffer_size = pdata->rx_buffer_len / 64; + + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + if (q == 0) { + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR_Q0, + lower_32_bits(queue->rx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR_Q0, + lower_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_DCONFIG, + PHYTMAC_SET_BITS(PHYTMAC_READ(pdata, PHYTMAC_DCONFIG), + RX_BUF_LEN, buffer_size)); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTR(q - 1), + lower_32_bits(queue->rx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTR(q - 1), + lower_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_RBQS(q - 1), buffer_size); + } + + PHYTMAC_WRITE(pdata, PHYTMAC_TXPTRH(q), upper_32_bits(queue->tx_ring_addr)); + PHYTMAC_WRITE(pdata, PHYTMAC_RXPTRH(q), upper_32_bits(queue->rx_ring_addr)); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(q), queue->tx_tail); + } + + return 0; +} + +static u32 phytmac_get_irq_mask(u32 mask) +{ + u32 value = 0; + + value |= (mask & PHYTMAC_INT_TX_COMPLETE) ? PHYTMAC_BIT(TXCOMP) : 0; + value |= (mask & PHYTMAC_INT_TX_ERR) ? PHYTMAC_BIT(BUS_ERR) : 0; + value |= (mask & PHYTMAC_INT_RX_COMPLETE) ? PHYTMAC_BIT(RXCOMP) : 0; + value |= (mask & PHYTMAC_INT_RX_OVERRUN) ? PHYTMAC_BIT(RXOVERRUN) : 0; + value |= (mask & PHYTMAC_INT_RX_DESC_FULL) ? PHYTMAC_BIT(RUB) : 0; + + return value; +} + +static u32 phytmac_get_irq_status(u32 value) +{ + u32 status = 0; + + status |= (value & PHYTMAC_BIT(TXCOMP)) ? PHYTMAC_INT_TX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(BUS_ERR)) ? PHYTMAC_INT_TX_ERR : 0; + status |= (value & PHYTMAC_BIT(RXCOMP)) ? PHYTMAC_INT_RX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(RXOVERRUN)) ? PHYTMAC_INT_RX_OVERRUN : 0; + status |= (value & PHYTMAC_BIT(RUB)) ? PHYTMAC_INT_RX_DESC_FULL : 0; + + return status; +} + +static void phytmac_enable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_IE, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_IER(queue_index - 1), value); +} + +static void phytmac_disable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_ID, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_IDR(queue_index - 1), value); +} + +static void phytmac_clear_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + + if (queue_index == 0) + PHYTMAC_WRITE(pdata, PHYTMAC_IS, value); + else + PHYTMAC_WRITE(pdata, PHYTMAC_ISR(queue_index - 1), value); +} + +static unsigned int phytmac_get_intx_mask(struct phytmac *pdata) +{ + u32 value; + + value = PHYTMAC_READ(pdata, PHYTMAC_INTX_IRQ_MASK); + PHYTMAC_WRITE(pdata, PHYTMAC_INTX_IRQ_MASK, value); + + return value; +} + +static unsigned int phytmac_get_irq(struct phytmac *pdata, int queue_index) +{ + u32 status, value; + + if (queue_index == 0) + value = PHYTMAC_READ(pdata, PHYTMAC_IS); + else + value = PHYTMAC_READ(pdata, PHYTMAC_ISR(queue_index - 1)); + + status = phytmac_get_irq_status(value); + + return status; +} + +static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, + u32 tx_tail, struct packet_info *packet) +{ + unsigned int i, ctrl; + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + struct phytmac_tx_skb *tx_skb; + unsigned int eof = 1; + + i = tx_tail; + + if (!(pdata->capacities & PHYTMAC_CAPS_TAILPTR)) { + ctrl = PHYTMAC_BIT(TX_USED); + desc = phytmac_get_tx_desc(queue, i); + desc->desc1 = ctrl; + } + + do { + i--; + tx_skb = phytmac_get_tx_skb(queue, i); + desc = phytmac_get_tx_desc(queue, i); + + ctrl = (u32)tx_skb->length; + if (eof) { + ctrl |= PHYTMAC_BIT(TX_LAST); + eof = 0; + } + + if (unlikely(i == (pdata->tx_ring_size - 1))) + ctrl |= PHYTMAC_BIT(TX_WRAP); + + if (i == queue->tx_tail) { + ctrl |= PHYTMAC_BITS(TX_LSO, packet->lso); + ctrl |= PHYTMAC_BITS(TX_TCP_SEQ_SRC, packet->seq); + if (packet->nocrc) + ctrl |= PHYTMAC_BIT(TX_NOCRC); + } else { + ctrl |= PHYTMAC_BITS(MSS_MFS, packet->mss); + } + + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* Make newly descriptor visible to hardware */ + wmb(); + desc->desc1 = ctrl; + } while (i != queue->tx_tail); + + return 0; +} + +static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, + u32 index) +{ + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + desc->desc1 = 0; + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 |= PHYTMAC_BIT(RX_USED); +} + +static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, + u32 index, dma_addr_t addr) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + if (addr) { + if (unlikely(index == (pdata->rx_ring_size - 1))) + addr |= PHYTMAC_BIT(RX_WRAP); + desc->desc1 = 0; + desc->desc2 = upper_32_bits(addr); + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 = lower_32_bits(addr); + } else { + desc->desc1 = 0; + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 &= ~PHYTMAC_BIT(RX_USED); + } + + return 0; +} + +static unsigned int phytmac_zero_rx_desc_addr(struct phytmac_dma_desc *desc) +{ + desc->desc2 = 0; + desc->desc0 = PHYTMAC_BIT(RX_USED); + + return 0; +} + +static void phytmac_tx_start(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), + PHYTMAC_BIT(TXTAIL_UPDATE) | queue->tx_tail); + + if (pdata->capacities & PHYTMAC_CAPS_START) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); +} + +static void phytmac_restart(struct phytmac *pdata) +{ + int q; + struct phytmac_queue *queue; + + for (q = 0; q < pdata->queues_num; q++) { + queue = &pdata->queues[q]; + if (queue->tx_head != queue->tx_tail) { + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(TSTART)); + break; + } + } +} + +static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) +{ + return PHYTMAC_GET_BITS(desc->desc1, TX_USED); +} + +static bool phytmac_rx_complete(const struct phytmac_dma_desc *desc) +{ + dma_addr_t addr; + bool used; + + used = desc->desc0 & PHYTMAC_BIT(RX_USED); + addr = ((u64)(desc->desc2) << 32); + addr |= desc->desc0 & 0xfffffff8; + + if (used != 0 && addr != 0) + return true; + else + return false; +} + +static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) +{ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + return desc->desc1 & PHYTMAC_JUMBO_FRAME_MASK; + else + return desc->desc1 & PHYTMAC_FRAME_MASK; +} + +static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + u32 check = value >> PHYTMAC_RX_CSUM_INDEX & 0x3; + + return (check == PHYTMAC_RX_CSUM_IP_TCP || check == PHYTMAC_RX_CSUM_IP_UDP); +} + +static bool phytmac_rx_single_buffer(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return ((value & PHYTMAC_BIT(RX_SOF)) && (value & PHYTMAC_BIT(RX_EOF))); +} + +static bool phytmac_rx_sof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RX_SOF)); +} + +static bool phytmac_rx_eof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RX_EOF)); +} + +static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) +{ + unsigned int frag; + unsigned int tmp = end; + struct phytmac_dma_desc *desc; + + if (begin > end) + tmp = end + queue->pdata->rx_ring_size; + + for (frag = begin; frag != tmp; frag++) { + desc = phytmac_get_rx_desc(queue, frag); + desc->desc0 &= ~PHYTMAC_BIT(RX_USED); + } +} + +static void phytmac_mac_interface_config(struct phytmac *pdata, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 old_ctrl, old_config; + u32 ctrl, config, usxctl; + + old_ctrl = PHYTMAC_READ(pdata, PHYTMAC_NCTRL); + old_config = PHYTMAC_READ(pdata, PHYTMAC_NCONFIG); + ctrl = old_ctrl; + config = old_config; + + if (state->speed == SPEED_10000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_10000M); + else if (state->speed == SPEED_5000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_5000M); + else if (state->speed == SPEED_2500) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_2500M); + else if (state->speed == SPEED_1000) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_1000M); + else if (state->speed == SPEED_100 || state->speed == SPEED_10) + PHYTMAC_WRITE(pdata, PHYTMAC_HCONFIG, PHYTMAC_SPEED_100M); + + config &= ~(PHYTMAC_BIT(SGMII_EN) | PHYTMAC_BIT(PCS_EN) + | PHYTMAC_BIT(SPEED) | PHYTMAC_BIT(FD) | PHYTMAC_BIT(GM_EN)); + ctrl &= ~(PHYTMAC_BIT(HIGHSPEED) | PHYTMAC_BIT(2PT5G)); + + if (state->interface == PHY_INTERFACE_MODE_SGMII) { + config |= PHYTMAC_BIT(SGMII_EN) | PHYTMAC_BIT(PCS_EN); + if (state->speed == SPEED_1000) + config |= PHYTMAC_BIT(GM_EN); + else if (state->speed == SPEED_2500) { + ctrl |= PHYTMAC_BIT(2PT5G); + config |= PHYTMAC_BIT(GM_EN); + } + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { + config |= PHYTMAC_BIT(PCS_EN) | PHYTMAC_BIT(GM_EN); + } else if (state->interface == PHY_INTERFACE_MODE_2500BASEX) { + ctrl |= PHYTMAC_BIT(2PT5G); + config |= PHYTMAC_BIT(PCS_EN) | PHYTMAC_BIT(GM_EN); + } else if (state->interface == PHY_INTERFACE_MODE_USXGMII) { + usxctl = PHYTMAC_READ(pdata, PHYTMAC_USXCTRL); + if (state->speed == SPEED_10000) { + usxctl = PHYTMAC_SET_BITS(usxctl, SERDES_RATE, PHYTMAC_SERDES_RATE_10G); + usxctl = PHYTMAC_SET_BITS(usxctl, USX_SPEED, PHYTMAC_SPEED_10000M); + } else if (state->speed == SPEED_5000) { + usxctl = PHYTMAC_SET_BITS(usxctl, SERDES_RATE, PHYTMAC_SERDES_RATE_5G); + usxctl = PHYTMAC_SET_BITS(usxctl, USX_SPEED, PHYTMAC_SPEED_5000M); + } + usxctl |= PHYTMAC_BIT(RX_EN) | PHYTMAC_BIT(TX_EN); + PHYTMAC_WRITE(pdata, PHYTMAC_USXCTRL, usxctl); + + config |= PHYTMAC_BIT(PCS_EN); + ctrl |= PHYTMAC_BIT(HIGHSPEED); + } + + if (state->duplex) + config |= PHYTMAC_BIT(FD); + + if (old_ctrl ^ ctrl) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, ctrl); + + if (old_config ^ config) + PHYTMAC_WRITE(pdata, PHYTMAC_NCONFIG, config); + + /* Disable AN for SGMII fixed link or speed equal to 2.5G, enable otherwise.*/ + if (state->interface == PHY_INTERFACE_MODE_SGMII) { + if (state->speed == SPEED_2500 || mode == MLO_AN_FIXED) + phytmac_enable_autoneg(pdata, 0); + else + phytmac_enable_autoneg(pdata, 1); + } + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + phytmac_enable_autoneg(pdata, 1); + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) + phytmac_enable_autoneg(pdata, 0); +} + +static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, + phy_interface_t interface) +{ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_NSTATUS, PCS_LINK); + else if (interface == PHY_INTERFACE_MODE_USXGMII) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_USXSTATUS, USX_PCS_LINK); + + return 0; +} + +static void phytmac_clear_tx_desc(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc = NULL; + int i; + + for (i = 0; i < queue->pdata->tx_ring_size; i++) { + desc = phytmac_get_tx_desc(queue, i); + desc->desc2 = 0; + desc->desc0 = 0; + /* make newly desc1 to hardware */ + wmb(); + desc->desc1 = PHYTMAC_BIT(TX_USED); + } + desc->desc1 |= PHYTMAC_BIT(TX_WRAP); + + if (pdata->capacities & PHYTMAC_CAPS_TAILPTR) + PHYTMAC_WRITE(pdata, PHYTMAC_TAILPTR(queue->index), queue->tx_tail); +} + +static void phytmac_get_hw_stats(struct phytmac *pdata) +{ + u32 stats[45]; + int i, j; + u64 val; + u64 *p = &pdata->stats.tx_octets; + + for (i = 0 ; i < 45; i++) + stats[i] = PHYTMAC_READ(pdata, PHYTMAC_OCTTX + i * 4); + + for (i = 0, j = 0; i < 45; i++) { + if (i == 0 || i == 20) { + val = (u64)stats[i + 1] << 32 | stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } else { + if (i != 1 && i != 21) { + val = stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } + } + } +} + +static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) +{ + u32 ns, secl, sech; + + ns = PHYTMAC_READ(pdata, PHYTMAC_TN); + secl = PHYTMAC_READ(pdata, PHYTMAC_TSL); + sech = PHYTMAC_READ(pdata, PHYTMAC_TSH); + + ts->tv_nsec = ns; + ts->tv_sec = (((u64)sech << 32) | secl) & SEC_MAX_VAL; +} + +static void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) +{ + u32 secl, sech; + + secl = (u32)sec; + sech = (sec >> 32) & (0xffff); + + PHYTMAC_WRITE(pdata, PHYTMAC_TN, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TSH, sech); + PHYTMAC_WRITE(pdata, PHYTMAC_TSL, secl); + PHYTMAC_WRITE(pdata, PHYTMAC_TN, nsec); +} + +static void phytmac_clear_time(struct phytmac *pdata) +{ + u32 value; + + pdata->ts_incr.sub_ns = 0; + pdata->ts_incr.ns = 0; + + value = PHYTMAC_READ(pdata, PHYTMAC_TISN); + value = PHYTMAC_SET_BITS(value, SUB_NSECL, 0); + value = PHYTMAC_SET_BITS(value, SUB_NSECH, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TISN, value); + + value = PHYTMAC_READ(pdata, PHYTMAC_TI); + value = PHYTMAC_SET_BITS(value, INCR_NS, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TI, value); + + PHYTMAC_WRITE(pdata, PHYTMAC_TA, 0); +} + +static int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) +{ + if (ctrl->rx_control == TS_ALL_PTP_FRAMES) + PHYTMAC_WRITE(pdata, PHYTMAC_NCTRL, + PHYTMAC_READ(pdata, PHYTMAC_NCTRL) | PHYTMAC_BIT(STORE_RX_TS)); + + PHYTMAC_WRITE(pdata, PHYTMAC_TXBDCTRL, PHYTMAC_BITS(TX_TSMODE, ctrl->tx_control)); + PHYTMAC_WRITE(pdata, PHYTMAC_RXBDCTRL, PHYTMAC_BITS(RX_TSMODE, ctrl->rx_control)); + + return 0; +} + +static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) +{ + u32 value; + + value = PHYTMAC_BITS(SUB_NSECL, incr->sub_ns) | + PHYTMAC_BITS(SUB_NSECH, incr->sub_ns >> 8); + PHYTMAC_WRITE(pdata, PHYTMAC_TISN, value); + PHYTMAC_WRITE(pdata, PHYTMAC_TI, incr->ns); + + return 0; +} + +static void phytmac_ptp_init_hw(struct phytmac *pdata) +{ + struct timespec64 ts; + + ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); + phytmac_set_time(pdata, ts.tv_sec, ts.tv_nsec); + + phytmac_set_tsincr(pdata, &pdata->ts_incr); +} + +static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) +{ + struct ts_incr ts_incr; + u32 tmp; + u64 adj; + + ts_incr.ns = pdata->ts_incr.ns; + ts_incr.sub_ns = pdata->ts_incr.sub_ns; + + tmp = ((u64)ts_incr.ns << PHYTMAC_SUB_NSECL_INDEX) + ts_incr.sub_ns; + adj = ((u64)ppm * tmp + (USEC_PER_SEC >> 1)) >> PHYTMAC_SUB_NSECL_INDEX; + + adj = div_u64(adj, USEC_PER_SEC); + adj = negative ? (tmp - adj) : (tmp + adj); + + ts_incr.ns = (adj >> PHYTMAC_SUB_NSEC_WIDTH) + & ((1 << PHYTMAC_SUB_NSECL_WIDTH) - 1); + ts_incr.sub_ns = adj & ((1 << PHYTMAC_SUB_NSEC_WIDTH) - 1); + + phytmac_set_tsincr(pdata, &ts_incr); + + return 0; +} + +static int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) +{ + u32 adj; + + if (delta > PHYTMAC_ADJUST_SEC_MAX) { + struct timespec64 now, then; + + if (neg) + then = ns_to_timespec64(-delta); + else + then = ns_to_timespec64(delta); + phytmac_get_time(pdata, &now); + now = timespec64_add(now, then); + phytmac_set_time(pdata, now.tv_sec, now.tv_nsec); + } else { + adj = (neg << PHYTMAC_INCR_ADD_INDEX) | delta; + PHYTMAC_WRITE(pdata, PHYTMAC_TA, adj); + } + + return 0; +} + +static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) +{ + int ts_valid = 0; + + if (direction == PHYTMAC_TX) + ts_valid = desc->desc1 & PHYTMAC_BIT(TX_TS_VALID); + else if (direction == PHYTMAC_RX) + ts_valid = desc->desc0 & PHYTMAC_BIT(RX_TS_VALID); + return ts_valid; +} + +static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) +{ + struct timespec64 ts2; + + ts->tv_sec = (PHYTMAC_GET_BITS(ts_2, DMA_SECH) << 2) | + PHYTMAC_GET_BITS(ts_1, DMA_SECL); + ts->tv_nsec = PHYTMAC_GET_BITS(ts_1, DMA_NSEC); + + phytmac_get_time(pdata, &ts2); + + if (((ts->tv_sec ^ ts2.tv_sec) & (PHYTMAC_DMA_SEC_TOP >> 1)) != 0) + ts->tv_sec -= PHYTMAC_DMA_SEC_TOP; + + ts->tv_sec += (ts2.tv_sec & (~PHYTMAC_DMA_SEC_MASK)); +} + +static unsigned int phytmac_get_ts_rate(struct phytmac *pdata) +{ + return 300000000; +} + +struct phytmac_hw_if phytmac_1p0_hw = { + .reset_hw = phytmac_reset_hw, + .init_hw = phytmac_init_hw, + .init_ring_hw = phytmac_init_ring_hw, + .get_feature = phytmac_get_feature_all, + .get_regs = phytmac_get_regs, + .get_stats = phytmac_get_hw_stats, + .set_mac_address = phytmac_set_mac_addr, + .get_mac_address = phytmac_get_mac_addr, + .mdio_read = phytmac_mdio_data_read, + .mdio_write = phytmac_mdio_data_write, + .poweron = phytmac_powerup_hw, + .set_wol = phytmac_set_wake, + .enable_promise = phytmac_enable_promise, + .enable_multicast = phytmac_enable_multicast, + .set_hash_table = phytmac_set_mc_hash, + .enable_rx_csum = phytmac_enable_rxcsum, + .enable_tx_csum = phytmac_enable_txcsum, + .enable_mdio_control = phytmac_enable_mdio, + .enable_autoneg = phytmac_enable_autoneg, + .enable_pause = phytmac_enable_pause, + .enable_network = phytmac_enable_network, + .add_fdir_entry = phytmac_add_fdir_entry, + .del_fdir_entry = phytmac_del_fdir_entry, + /* mac config */ + .mac_config = phytmac_mac_interface_config, + .mac_linkup = phytmac_mac_linkup, + .mac_linkdown = phytmac_mac_linkdown, + .pcs_linkup = phytmac_pcs_linkup, + .pcs_linkdown = phytmac_pcs_linkdown, + .get_link = phytmac_pcs_get_link, + /* irq */ + .enable_irq = phytmac_enable_irq, + .disable_irq = phytmac_disable_irq, + .clear_irq = phytmac_clear_irq, + .get_irq = phytmac_get_irq, + .get_intx_mask = phytmac_get_intx_mask, + /* tx and rx */ + .tx_map = phytmac_tx_map_desc, + .transmit = phytmac_tx_start, + .restart = phytmac_restart, + .tx_complete = phytmac_tx_complete, + .rx_complete = phytmac_rx_complete, + .get_rx_pkt_len = phytmac_rx_pkt_len, + .init_rx_map = phytmac_init_rx_map_desc, + .rx_map = phytmac_rx_map_desc, + .rx_checksum = phytmac_rx_checksum, + .rx_single_buffer = phytmac_rx_single_buffer, + .rx_pkt_start = phytmac_rx_sof, + .rx_pkt_end = phytmac_rx_eof, + .clear_rx_desc = phytmac_clear_rx_desc, + .clear_tx_desc = phytmac_clear_tx_desc, + .zero_rx_desc_addr = phytmac_zero_rx_desc_addr, + /* ptp */ + .init_ts_hw = phytmac_ptp_init_hw, + .set_time = phytmac_set_time, + .clear_time = phytmac_clear_time, + .get_time = phytmac_get_time, + .set_ts_config = phytmac_set_tsmode, + .set_incr = phytmac_set_tsincr, + .adjust_fine = phytmac_adjust_fine, + .adjust_time = phytmac_adjust_time, + .ts_valid = phytmac_ts_valid, + .get_timestamp = phytmac_get_dma_ts, + .get_ts_rate = phytmac_get_ts_rate, +}; +EXPORT_SYMBOL_GPL(phytmac_1p0_hw); + diff --git a/drivers/net/ethernet/phytium/phytmac_v1.h b/drivers/net/ethernet/phytium/phytmac_v1.h new file mode 100644 index 0000000000000000000000000000000000000000..6936cb0906bff0a7ec55a1fa5585ad0848fa0051 --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v1.h @@ -0,0 +1,473 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_V1_H +#define _PHYTMAC_V1_H + +extern struct phytmac_hw_if phytmac_1p0_hw; + +#define PHYTMAC_FRAME_MASK 0x1fff +#define PHYTMAC_JUMBO_FRAME_MASK 0x3fff + +#define PHYTMAC_SPEED_100M 0 +#define PHYTMAC_SPEED_1000M 1 +#define PHYTMAC_SPEED_2500M 2 +#define PHYTMAC_SPEED_5000M 3 +#define PHYTMAC_SPEED_10000M 4 +#define PHYTMAC_SERDES_RATE_5G 0 +#define PHYTMAC_SERDES_RATE_10G 1 + +#define PHYTMAC_NCTRL 0x0000 +#define PHYTMAC_NCONFIG 0x0004 +#define PHYTMAC_NSTATUS 0x0008 +#define PHYTMAC_DCONFIG 0x0010 +#define PHYTMAC_RXPTR_Q0 0x0018 +#define PHYTMAC_TXPTR_Q0 0x001C +#define PHYTMAC_IS 0x0024 +#define PHYTMAC_IE 0x0028 +#define PHYTMAC_ID 0x002C +#define PHYTMAC_IM 0x0030 +#define PHYTMAC_MDATA 0x0034 +#define PHYTMAC_HCONFIG 0x0050 +#define PHYTMAC_AXICTRL 0x0054 +#define PHYTMAC_INT_MODERATION 0x005C +#define PHYTMAC_HASHB 0x0080 +#define PHYTMAC_HASHT 0x0084 +#define PHYTMAC_MAC1B 0x0088 +#define PHYTMAC_MAC1T 0x008C +#define PHYTMAC_WOL 0x00B8 +#define PHYTMAC_OCTTX 0x0100 +#define PHYTMAC_TISN 0x01BC +#define PHYTMAC_TSH 0x01C0 +#define PHYTMAC_TSL 0x01D0 +#define PHYTMAC_TN 0x01D4 +#define PHYTMAC_TA 0x01D8 +#define PHYTMAC_TI 0x01DC +#define PHYTMAC_PCSCTRL 0x0200 +#define PHYTMAC_PCSSTATUS 0x0204 +#define PHYTMAC_PCSANLPBASE 0x0214 +#define PHYTMAC_DEFAULT1 0x0280 /* Default HW Config 1 */ +#define PHYTMAC_DEFAULT2 0x0294 /* Default HW Config 2 */ +#define PHYTMAC_DEFAULT3 0x029C /* Default HW Config 3 */ +#define PHYTMAC_DEFAULT4 0x02A4 /* Default HW Config 4 */ +#define PHYTMAC_DEFAULT5 0x02AC /* Default HW Config 5 */ +#define PHYTMAC_USXCTRL 0x0A80 +#define PHYTMAC_USXSTATUS 0x0A88 +#define PHYTMAC_TXBDCTRL 0x04CC +#define PHYTMAC_RXBDCTRL 0x04D0 + +/* Fdir match registers */ +#define PHYTMAC_FDIR(i) (0x0540 + ((i) << 2)) + +/* EtherType registers */ +#define PHYTMAC_ETHT(i) (0x06E0 + ((i) << 2)) + +/* Fdir compare registers */ +#define PHYTMAC_COMP1(i) (0x0700 + ((i) << 3)) +#define PHYTMAC_COMP2(i) (0x0704 + ((i) << 3)) + +#define PHYTMAC_ISR(i) (0x0400 + ((i) << 2)) +#define PHYTMAC_TXPTR(i) (0x0440 + ((i) << 2)) +#define PHYTMAC_RXPTR(i) (0x0480 + ((i) << 2)) +#define PHYTMAC_RBQS(i) (0x04A0 + ((i) << 2)) +#define PHYTMAC_TXPTRH(i) (0x04c8) +#define PHYTMAC_RXPTRH(i) (0x04d4) + +#define PHYTMAC_IER(i) (0x0600 + ((i) << 2)) +#define PHYTMAC_IDR(i) (0x0620 + ((i) << 2)) +#define PHYTMAC_IMR(i) (0x0640 + ((i) << 2)) +#define PHYTMAC_TAIL_ENABLE (0x0e7c) +#define PHYTMAC_TAILPTR(i) (0x0e80 + ((i) << 2)) + +#define PHYTMAC_PHY_INT_ENABLE 0x1C88 +#define PHYTMAC_PHY_INT_CLEAR 0x1C8C +#define PHYTMAC_PHY_INT_STATE 0x1C90 +#define PHYTMAC_INTX_IRQ_MASK 0x1C7C + +#define PHYTMAC_READ_NSTATUS(pdata) PHYTMAC_READ(pdata, PHYTMAC_NSTATUS) + +/* Ethernet Network Control Register */ +#define PHYTMAC_RE_INDEX 2 /* Receive enable */ +#define PHYTMAC_RE_WIDTH 1 +#define PHYTMAC_TE_INDEX 3 /* Transmit enable */ +#define PHYTMAC_TE_WIDTH 1 +#define PHYTMAC_MPE_INDEX 4 /* Management port enable */ +#define PHYTMAC_MPE_WIDTH 1 +#define PHYTMAC_CLEARSTAT_INDEX 5 /* Clear stats regs */ +#define PHYTMAC_CLEARSTAT_WIDTH 1 +#define PHYTMAC_TSTART_INDEX 9 /* Start transmission */ +#define PHYTMAC_TSTART_WIDTH 1 +#define PHYTMAC_THALT_INDEX 10 /* Transmit halt */ +#define PHYTMAC_THALT_WIDTH 1 +#define PHYTMAC_STORE_RX_TS_INDEX 15 +#define PHYTMAC_STORE_RX_TS_WIDTH 1 +#define PHYTMAC_OSSMODE_INDEX 24 /* Enable One Step Synchro Mode */ +#define PHYTMAC_OSSMODE_WIDTH 1 +#define PHYTMAC_2PT5G_INDEX 29 /* 2.5G operation selected */ +#define PHYTMAC_2PT5G_WIDTH 1 +#define PHYTMAC_HIGHSPEED_INDEX 31 /* High speed enable */ +#define PHYTMAC_HIGHSPEED_WIDTH 1 + +/* Ethernet Network Config Register */ +#define PHYTMAC_SPEED_INDEX 0 /* Speed */ +#define PHYTMAC_SPEED_WIDTH 1 +#define PHYTMAC_FD_INDEX 1 /* Full duplex */ +#define PHYTMAC_FD_WIDTH 1 +#define PHYTMAC_JUMBO_EN_INDEX 3 /* Transmit enable */ +#define PHYTMAC_JUMBO_EN_WIDTH 1 +#define PHYTMAC_PROMISC_INDEX 4 /* Copy all frames */ +#define PHYTMAC_PROMISC_WIDTH 1 +#define PHYTMAC_NO_BCAST_INDEX 5 /* No broadcast */ +#define PHYTMAC_NO_BCAST_WIDTH 1 +#define PHYTMAC_MH_EN_INDEX 6 /* Multicast hash enable */ +#define PHYTMAC_MH_EN_WIDTH 1 +#define PHYTMAC_RCV_BIG_INDEX 8 +#define PHYTMAC_RCV_BIG_WIDTH 1 +#define PHYTMAC_GM_EN_INDEX 10 /* Gigabit mode enable */ +#define PHYTMAC_GM_EN_WIDTH 1 +#define PHYTMAC_PCS_EN_INDEX 11 /* PCS select */ +#define PHYTMAC_PCS_EN_WIDTH 1 +#define PHYTMAC_PAUSE_EN_INDEX 13 /* Pause enable */ +#define PHYTMAC_PAUSE_EN_WIDTH 1 +#define PHYTMAC_FCS_REMOVE_INDEX 17 /* FCS remove */ +#define PHYTMAC_FCS_REMOVE_WIDTH 1 +#define PHYTMAC_MCD_INDEX 18 /* MDC clock division */ +#define PHYTMAC_MCD_WIDTH 3 +#define PHYTMAC_DBW64_INDEX 21 /* Data bus width */ +#define PHYTMAC_DBW64_WIDTH 1 +#define PHYTMAC_DBW128_INDEX 22 /* Data bus width */ +#define PHYTMAC_DBW128_WIDTH 1 +#define PHYTMAC_DBW_32 1 +#define PHYTMAC_DBW_64 2 +#define PHYTMAC_DBW_128 4 +#define PHYTMAC_RCO_EN_INDEX 24 /* Receive checksum offload enable */ +#define PHYTMAC_RCO_EN_WIDTH 1 +#define PHYTMAC_IGNORE_RX_FCS_INDEX 26 +#define PHYTMAC_IGNORE_RX_FCS_WIDTH 1 +#define PHYTMAC_SGMII_EN_INDEX 27 /* Sgmii mode enable */ +#define PHYTMAC_SGMII_EN_WIDTH 1 + +/* Ethernet Network Status Register */ +#define PHYTMAC_PCS_LINK_INDEX 0 /* PCS link status */ +#define PHYTMAC_PCS_LINK_WIDTH 1 +#define PHYTMAC_MDIO_IDLE_INDEX 2 /* Mdio idle */ +#define PHYTMAC_MDIO_IDLE_WIDTH 1 +#define PHYTMAC_PCS_FD_INDEX 3 /* PCS auto negotiation duplex resolution */ +#define PHYTMAC_PCS_FD__WIDTH 1 + +/* Ethernet Network Dma config Register */ +#define PHYTMAC_BURST_INDEX 0 /* Amba burst length */ +#define PHYTMAC_BURST_WIDTH 5 +#define PHYTMAC_ENDIA_PKT_INDEX 6 +#define PHYTMAC_ENDIA_PKT_WIDTH 1 +#define PHYTMAC_ENDIA_DESC_INDEX 7 +#define PHYTMAC_ENDIA_DESC_WIDTH 1 +#define PHYTMAC_TCO_EN_INDEX 11 /* Tx Checksum Offload en */ +#define PHYTMAC_TCO_EN_WIDTH 1 +#define PHYTMAC_RX_BUF_LEN_INDEX 16 /* DMA receive buffer size */ +#define PHYTMAC_RX_BUF_LEN_WIDTH 8 +#define PHYTMAC_RX_EXBD_EN_INDEX 28 /* Enable RX extended BD mode */ +#define PHYTMAC_RX_EXBD_EN_WIDTH 1 +#define PHYTMAC_TX_EXBD_EN_INDEX 29 /* Enable TX extended BD mode */ +#define PHYTMAC_TX_EXBD_EN_WIDTH 1 +#define PHYTMAC_ABW_INDEX 30 /* DMA address bus width */ +#define PHYTMAC_ABW_WIDTH 1 + +/* Int stauts/Enable/Disable/Mask Register */ +#define PHYTMAC_RXCOMP_INDEX 1 /* Rx complete */ +#define PHYTMAC_RXCOMP_WIDTH 1 +#define PHYTMAC_RUB_INDEX 2 /* Rx used bit read */ +#define PHYTMAC_RUB_WIDTH 1 +#define PHYTMAC_BUS_ERR_INDEX 6 /* AMBA error */ +#define PHYTMAC_BUS_ERR_WIDTH 1 +#define PHYTMAC_TXCOMP_INDEX 7 /* Tx complete */ +#define PHYTMAC_TXCOMP_WIDTH 1 +#define PHYTMAC_RXOVERRUN_INDEX 10 /* Tx overrun */ +#define PHYTMAC_RXOVERRUN_WIDTH 1 +#define PHYTMAC_RESP_ERR_INDEX 11 /* Resp not ok */ +#define PHYTMAC_RESP_ERR_WIDTH 1 + +/* Mdio read/write Register */ +#define PHYTMAC_DATA_INDEX 0 /* Data */ +#define PHYTMAC_DATA_WIDTH 16 +#define PHYTMAC_MUST_INDEX 16 /* Must Be 10 */ +#define PHYTMAC_MUST_WIDTH 2 +#define PHYTMAC_REG_ADDR_INDEX 18 /* Register address */ +#define PHYTMAC_REG_ADDR_WIDTH 5 +#define PHYTMAC_PHY_ADDR_INDEX 23 /* Phy address */ +#define PHYTMAC_PHY_ADDR_WIDTH 5 +#define PHYTMAC_OPS_INDEX 28 +#define PHYTMAC_OPS_WIDTH 2 +#define PHYTMAC_CLAUSE_SEL_INDEX 30 +#define PHYTMAC_CLAUSE_SEL_WIDTH 1 +#define PHYTMAC_CLAUSE_C22 1 +#define PHYTMAC_CLAUSE_C45 0 +#define PHYTMAC_OPS_C45_ADDR 0 +#define PHYTMAC_OPS_C45_WRITE 1 +#define PHYTMAC_OPS_C45_READ 3 +#define PHYTMAC_OPS_C22_WRITE 1 +#define PHYTMAC_OPS_C22_READ 2 + +/* hs mac config register */ +#define PHYTMAC_HS_SPEED_INDEX 0 +#define PHYTMAC_HS_SPEED_WIDTH 3 +#define PHYTMAC_HS_SPEED_100M 0 +#define PHYTMAC_HS_SPEED_1000M 1 +#define PHYTMAC_HS_SPEED_2500M 2 +#define PHYTMAC_HS_SPEED_5000M 3 +#define PHYTMAC_HS_SPEED_10G 4 + +/* WOL register */ +#define PHYTMAC_ARP_IP_INDEX 0 +#define PHYTMAC_ARP_IP_WIDTH 16 +#define PHYTMAC_MAGIC_INDEX 16 +#define PHYTMAC_MAGIC_WIDTH 1 +#define PHYTMAC_ARP_INDEX 17 +#define PHYTMAC_ARP_WIDTH 1 +#define PHYTMAC_UCAST_INDEX 18 +#define PHYTMAC_UCAST_WIDTH 1 +#define PHYTMAC_MCAST_INDEX 19 +#define PHYTMAC_MCAST_WIDTH 1 + +/* PCSCTRL register */ +#define PHYTMAC_AUTONEG_INDEX 12 +#define PHYTMAC_AUTONEG_WIDTH 1 +#define PHYTMAC_PCS_RESET_INDEX 15 +#define PHYTMAC_PCS_RESET_WIDTH 1 + +/* DEFAULT1 register */ +#define PHYTMAC_DBW_INDEX 25 +#define PHYTMAC_DBW_WIDTH 3 + +/* DEFAULT2 register */ +#define PHYTMAC_DAW64_INDEX 23 +#define PHYTMAC_DAW64_WIDTH 1 + +/* DEFAULT3 register */ +#define PHYTMAC_SCR2CMP_INDEX 0 +#define PHYTMAC_SCR2CMP_WIDTH 8 +#define PHYTMAC_SCR2ETH_INDEX 8 +#define PHYTMAC_SCR2ETH_WIDTH 8 + +/* DEFAULT4 register */ +#define PHYTMAC_TXDESCRD_INDEX 12 +#define PHYTMAC_TXDESCRD_WIDTH 4 +#define PHYTMAC_RXDESCRD_INDEX 8 +#define PHYTMAC_RXDESCRD_WIDTH 4 + +/* USXCTRL register */ +#define PHYTMAC_RX_EN_INDEX 0 +#define PHYTMAC_RX_EN_WIDTH 1 +#define PHYTMAC_TX_EN_INDEX 1 +#define PHYTMAC_TX_EN_WIDTH 1 +#define PHYTMAC_RX_SYNC_RESET_INDEX 2 +#define PHYTMAC_RX_SYNC_RESET_WIDTH 1 +#define PHYTMAC_SERDES_RATE_INDEX 12 +#define PHYTMAC_SERDES_RATE_WIDTH 2 +#define PHYTMAC_USX_SPEED_INDEX 14 +#define PHYTMAC_USX_SPEED_WIDTH 3 + +/* Bitfields in USX_STATUS. */ +#define PHYTMAC_USX_PCS_LINK_INDEX 0 +#define PHYTMAC_USX_PCS_LINK_WIDTH 1 + +/* Bitfields in PHYTMAC_TISN */ +#define PHYTMAC_SUB_NSECH_INDEX 0 +#define PHYTMAC_SUB_NSECH_WIDTH 16 +#define PHYTMAC_SUB_NSECL_INDEX 24 +#define PHYTMAC_SUB_NSECL_WIDTH 8 +#define PHYTMAC_SUB_NSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TSH */ +#define PHYTMAC_SECH_INDEX 0 +#define PHYTMAC_SECH_WIDTH 16 + +/* Bitfields in PHYTMAC_TSL */ +#define PHYTMAC_SECL_INDEX 0 +#define PHYTMAC_SECL_WIDTH 32 + +/* Bitfields in PHYTMAC_TN */ +#define PHYTMAC_NSEC_INDEX 0 +#define PHYTMAC_NSEC_WIDTH 30 + +/* Bitfields in PHYTMAC_TA */ +#define PHYTMAC_INCR_SEC_INDEX 0 +#define PHYTMAC_INCR_SEC_WIDTH 30 +#define PHYTMAC_INCR_ADD_INDEX 31 +#define PHYTMAC_INCR_ADD_WIDTH 1 +#define PHYTMAC_ADJUST_SEC_MAX ((1 << PHYTMAC_INCR_SEC_WIDTH) - 1) + +/* Bitfields in PHYTMAC_TI */ +#define PHYTMAC_INCR_NS_INDEX 0 +#define PHYTMAC_INCR_NS_WIDTH 8 + +/* PHYTMAC_TXBDCTRL register */ +#define PHYTMAC_TX_TSMODE_INDEX 4 +#define PHYTMAC_TX_TSMODE_WIDTH 2 + +#define PHYTMAC_RX_TSMODE_INDEX 4 +#define PHYTMAC_RX_TSMODE_WIDTH 2 + +/* Bitfields in PHYTMAC_FDIR */ +#define PHYTMAC_QUEUE_NUM_INDEX 0 +#define PHYTMAC_QUEUE_NUM_WIDTH 4 +#define PHYTMAC_VLAN_PRI_INDEX 4 +#define PHYTMAC_VLAN_PRI_WIDTH 3 +#define PHYTMAC_VLAN_EN_INDEX 8 +#define PHYTMAC_VLAN_EN_WIDTH 1 +#define PHYTMAC_ETH_TYPE_INDEX 9 +#define PHYTMAC_ETH_TYPE_WIDTH 3 +#define PHYTMAC_ETH_EN_INDEX 12 +#define PHYTMAC_ETH_EN_WIDTH 1 +#define PHYTMAC_CA_INDEX 13 +#define PHYTMAC_CA_WIDTH 5 +#define PHYTMAC_CA_EN_INDEX 18 +#define PHYTMAC_CA_EN_WIDTH 1 +#define PHYTMAC_CB_INDEX 19 +#define PHYTMAC_CB_WIDTH 5 +#define PHYTMAC_CB_EN_INDEX 24 +#define PHYTMAC_CB_EN_WIDTH 1 +#define PHYTMAC_CC_INDEX 25 +#define PHYTMAC_CC_WIDTH 5 +#define PHYTMAC_CC_EN_INDEX 30 +#define PHYTMAC_CC_EN_WIDTH 1 + +/* Bitfields in PHYTMAC_ETHERTYPE */ +#define PHYTMAC_ETHTYPE_VALUE_INDEX 0 +#define PHYTMAC_ETHTYPE_VALUE_WIDTH 16 + +/* Bitfields in PHYTMAC_COMP1 */ +#define PHYTMAC_SPORT_INDEX 0 +#define PHYTMAC_SPORT_WIDTH 16 +#define PHYTMAC_DPORT_INDEX 16 +#define PHYTMAC_DPORTE_WIDTH 16 + +/* Bitfields in PHYTMAC_COMP2 */ +#define PHYTMAC_OFFSET_INDEX 0 +#define PHYTMAC_OFFSET_WIDTH 7 +#define ETHTYPE_SIP_OFFSET 12 +#define ETHTYPE_DIP_OFFSET 16 +#define IPHEAD_SPORT_OFFSET 0 +#define IPHEAD_DPORT_OFFSET 2 +#define PHYTMAC_OFFSET_TYPE_INDEX 7 +#define PHYTMAC_OFFSET_TYPE_WIDTH 2 +#define PHYTMAC_OFFSET_BEGIN 0 +#define PHYTMAC_OFFSET_AFTER_L2HEAD 1 +#define PHYTMAC_OFFSET_AFTER_L3HEAD 2 +#define PHYTMAC_OFFSET_AFTER_L4HEAD 3 +#define PHYTMAC_DIS_MASK_INDEX 9 +#define PHYTMAC_DIS_MASK_WIDTH 1 +#define PHYTMAC_VLAN_ID_INDEX 10 +#define PHYTMAC_VLAN_ID_WIDTH 1 + +/* Bitfields in TAILPTR */ +#define PHYTMAC_TXTAIL_UPDATE_INDEX 31 /* Update tx tail */ +#define PHYTMAC_TXTAIL_UPDATE_WIDTH 1 + +/* Bitfields in TAIL_ENABLE */ +#define PHYTMAC_TXTAIL_ENABLE_INDEX 0 /* Enable tx tail */ +#define PHYTMAC_TXTAIL_ENABLE_WIDTH 1 + +/* Bitfields in INT ENABLE */ +#define PHYTMAC_WOL_RECEIVE_ENABLE_INDEX 28 /* Enable wol_event_receieve */ +#define PHYTMAC_WOL_RECEIVE_ENABLE_WIDTH 1 + +/* Bitfields in INT DISABLE */ +#define PHYTMAC_WOL_RECEIVE_DISABLE_INDEX 28 /* Disable wol_event_receieve */ +#define PHYTMAC_WOL_RECEIVE_DISABLE_WIDTH 1 + +#define PHYTMAC_TSEC_WIDTH (PHYTMAC_SECH_WIDTH + PHYTMAC_SECL_WIDTH) +#define SEC_MAX_VAL (((u64)1 << PHYTMAC_TSEC_WIDTH) - 1) +#define NSEC_MAX_VAL ((1 << PHYTMAC_NSEC_WIDTH) - 1) + +/* rx dma desc bit */ +/* DMA descriptor bitfields */ +#define PHYTMAC_RX_USED_INDEX 0 +#define PHYTMAC_RX_USED_WIDTH 1 +#define PHYTMAC_RX_WRAP_INDEX 1 +#define PHYTMAC_RX_WRAP_WIDTH 1 +#define PHYTMAC_RX_TS_VALID_INDEX 2 +#define PHYTMAC_RX_TS_VALID_WIDTH 1 +#define PHYTMAC_RX_WADDR_INDEX 2 +#define PHYTMAC_RX_WADDR_WIDTH 30 + +#define PHYTMAC_RX_FRMLEN_INDEX 0 +#define PHYTMAC_RX_FRMLEN_WIDTH 12 +#define PHYTMAC_RX_INDEX_INDEX 12 +#define PHYTMAC_RX_INDEX_WIDTH 2 +#define PHYTMAC_RX_SOF_INDEX 14 +#define PHYTMAC_RX_SOF_WIDTH 1 +#define PHYTMAC_RX_EOF_INDEX 15 +#define PHYTMAC_RX_EOF_WIDTH 1 +#define PHYTMAC_RX_CFI_INDEX 16 +#define PHYTMAC_RX_CFI_WIDTH 1 +#define PHYTMAC_RX_VLAN_PRI_INDEX 17 +#define PHYTMAC_RX_VLAN_PRI_WIDTH 3 +#define PHYTMAC_RX_PRI_TAG_INDEX 20 +#define PHYTMAC_RX_PRI_TAG_WIDTH 1 +#define PHYTMAC_RX_VLAN_TAG_INDEX 21 +#define PHYTMAC_RX_VLAN_TAG_WIDTH 1 +#define PHYTMAC_RX_UHASH_MATCH_INDEX 29 +#define PHYTMAC_RX_UHASH_MATCH_WIDTH 1 +#define PHYTMAC_RX_MHASH_MATCH_INDEX 30 +#define PHYTMAC_RX_MHASH_MATCH_WIDTH 1 +#define PHYTMAC_RX_BROADCAST_INDEX 31 +#define PHYTMAC_RX_BROADCAST_WIDTH 1 + +#define PHYTMAC_RX_FRMLEN_MASK 0x1FFF +#define PHYTMAC_RX_JFRMLEN_MASK 0x3FFF + +/* RX checksum offload disabled: bit 24 clear in NCFGR */ +#define PHYTMAC_RX_TYPEID_MATCH_INDEX 22 +#define PHYTMAC_RX_TYPEID_MATCH_WIDTH 2 + +/* RX checksum offload enabled: bit 24 set in NCFGR */ +#define PHYTMAC_RX_CSUM_INDEX 22 +#define PHYTMAC_RX_CSUM_WIDTH 2 + +/* tx dma desc bit */ +#define PHYTMAC_TX_FRMLEN_INDEX 0 +#define PHYTMAC_TX_FRMLEN_WIDTH 14 +#define PHYTMAC_TX_LAST_INDEX 15 +#define PHYTMAC_TX_LAST_WIDTH 1 +#define PHYTMAC_TX_NOCRC_INDEX 16 +#define PHYTMAC_TX_NOCRC_WIDTH 1 +#define PHYTMAC_MSS_MFS_INDEX 16 +#define PHYTMAC_MSS_MFS_WIDTH 14 +#define PHYTMAC_TX_LSO_INDEX 17 +#define PHYTMAC_TX_LSO_WIDTH 2 +#define PHYTMAC_TX_TCP_SEQ_SRC_INDEX 19 +#define PHYTMAC_TX_TCP_SEQ_SRC_WIDTH 1 +#define PHYTMAC_TX_TS_VALID_INDEX 23 +#define PHYTMAC_TX_TS_VALID_WIDTH 1 +#define PHYTMAC_TX_BUF_EXHAUSTED_INDEX 27 +#define PHYTMAC_TX_BUF_EXHAUSTED_WIDTH 1 +#define PHYTMAC_TX_UNDERRUN_INDEX 28 +#define PHYTMAC_TX_UNDERRUN_WIDTH 1 +#define PHYTMAC_TX_ERROR_INDEX 29 +#define PHYTMAC_TX_ERROR_WIDTH 1 +#define PHYTMAC_TX_WRAP_INDEX 30 +#define PHYTMAC_TX_WRAP_WIDTH 1 +#define PHYTMAC_TX_USED_INDEX 31 +#define PHYTMAC_TX_USED_WIDTH 1 + +/* Transmit DMA buffer descriptor Word 4 */ +#define PHYTMAC_DMA_NSEC_INDEX 0 +#define PHYTMAC_DMA_NSEC_WIDTH 30 +#define PHYTMAC_DMA_SECL_INDEX 30 +#define PHYTMAC_DMA_SECL_WIDTH 2 + +/* Transmit DMA buffer descriptor Word 4 */ +#define PHYTMAC_DMA_SECH_INDEX 0 +#define PHYTMAC_DMA_SECH_WIDTH 4 +#define PHYTMAC_DMA_SEC_MASK 0x3f +#define PHYTMAC_DMA_SEC_TOP 0x40 + +/* Buffer descriptor constants */ +#define PHYTMAC_RX_CSUM_NONE 0 +#define PHYTMAC_RX_CSUM_IP_ONLY 1 +#define PHYTMAC_RX_CSUM_IP_TCP 2 +#define PHYTMAC_RX_CSUM_IP_UDP 3 + +/* limit RX checksum offload to TCP and UDP packets */ +#define PHYTMAC_RX_CSUM_CHECKED_MASK 2 + +#endif diff --git a/drivers/net/ethernet/phytium/phytmac_v2.c b/drivers/net/ethernet/phytium/phytmac_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..ba05b2944d458d0e7e5b1216273dcaa4528ef01b --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v2.c @@ -0,0 +1,1248 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytmac.h" +#include "phytmac_v2.h" + +static int phytmac_msg_send(struct phytmac *pdata, u16 cmd_id, + u16 cmd_subid, void *data, int len, int wait) +{ + int index = 0; + struct phytmac_msg_info msg; + struct phytmac_msg_info msg_rx; + int ret = 0; + + ++pdata->msg_ring.tx_msg_tail; + if (pdata->msg_ring.tx_msg_tail > pdata->msg_ring.tx_msg_ring_size) + pdata->msg_ring.tx_msg_tail = 1; + index = pdata->msg_ring.tx_msg_tail; + + wait = 1; + memset(&msg, 0, sizeof(msg)); + memset(&msg_rx, 0, sizeof(msg_rx)); + msg.module_id = PHYTMAC_MODULE_ID_GMAC; + msg.cmd_id = cmd_id; + msg.cmd_subid = cmd_subid; + msg.flags = PHYTMAC_FLAGS_MSG_NOINT; + + if (len) + memcpy(&msg.para[0], data, len); + + if (netif_msg_hw(pdata)) { + netdev_info(pdata->ndev, "tx msg: cmdid:%d, subid:%d, flags:%d, len:%d, tail:%d\n", + msg.cmd_id, msg.cmd_subid, msg.flags, len, pdata->msg_ring.tx_msg_tail); + } + + memcpy(pdata->msg_regs + PHYTMAC_MSG(index), &msg, sizeof(msg)); + PHYTMAC_WRITE(pdata, PHYTMAC_TX_MSG_TAIL, + pdata->msg_ring.tx_msg_tail | PHYTMAC_BIT(TX_MSG_INT)); + + if (wait) { + memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); + while (!(msg_rx.flags & 0x1)) { + cpu_relax(); + memcpy(&msg_rx, pdata->msg_regs + PHYTMAC_MSG(index), MSG_HDR_LEN); + } + } + + return ret; +} + +void phytmac_reset_hw(struct phytmac *pdata) +{ + int q; + u16 cmd_id, cmd_subid; + struct phytmac_ring_info ring; + + /* Disable and clear all interrupts and disable queues */ + for (q = 0; q < pdata->queues_max_num; ++q) { + PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(q), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(q), -1); + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), 0); + } + + /* reset hw rx/tx enable */ + cmd_id = PHYTMAC_MSG_CMD_DEFAULT; + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_HW; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + /* reset tx ring */ + memset(&ring, 0, sizeof(ring)); + ring.queue_num = pdata->queues_max_num; + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 0); + + /* reset rx ring */ + cmd_subid = PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&ring), sizeof(ring), 1); +} + +static int phytmac_get_mac_addr(struct phytmac *pdata, u8 *addr) +{ + int index; + u16 cmd_id, cmd_subid; + struct phytmac_mac para; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_ADDR; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + sizeof(struct phytmac_mac)); + + addr[0] = para.addrl & 0xff; + addr[1] = (para.addrl >> 8) & 0xff; + addr[2] = (para.addrl >> 16) & 0xff; + addr[3] = (para.addrl >> 24) & 0xff; + addr[4] = para.addrh & 0xff; + addr[5] = (para.addrh >> 8) & 0xff; + + return 0; +} + +int phytmac_set_mac_addr(struct phytmac *pdata, const u8 *addr) +{ + u16 cmd_id; + u16 cmd_subid; + struct phytmac_mac para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ADDR; + para.addrl = cpu_to_le32(*((u32 *)addr)); + para.addrh = cpu_to_le16(*((u16 *)(addr + 4))); + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_init_hw(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + struct phytmac_dma_info dma; + struct phytmac_eth_info eth; + + phytmac_set_mac_addr(pdata, pdata->ndev->dev_addr); + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_JUMBO; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_1536_FRAMES; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + if (pdata->ndev->flags & IFF_PROMISC) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + if (pdata->ndev->features & NETIF_F_RXCSUM) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + if (!(pdata->ndev->flags & IFF_BROADCAST)) { + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_BC; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + } + + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_STRIPCRC; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 0); + + memset(&dma, 0, sizeof(dma)); + cmd_subid = PHYTMAC_MSG_CMD_SET_DMA; + dma.dma_burst_length = pdata->dma_burst_length; + if (pdata->dma_addr_width) + dma.hw_dma_cap |= HW_DMA_CAP_64B; + if (pdata->ndev->features & NETIF_F_HW_CSUM) + dma.hw_dma_cap |= HW_DMA_CAP_CSUM; + if (IS_REACHABLE(CONFIG_PHYTMAC_ENABLE_PTP)) + dma.hw_dma_cap |= HW_DMA_CAP_PTP; + if (pdata->dma_data_width == PHYTMAC_DBW64) + dma.hw_dma_cap |= HW_DMA_CAP_DDW64; + if (pdata->dma_data_width == PHYTMAC_DBW128) + dma.hw_dma_cap |= HW_DMA_CAP_DDW128; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&dma, sizeof(dma), 0); + + memset(ð, 0, sizeof(eth)); + cmd_subid = PHYTMAC_MSG_CMD_SET_ETH_MATCH; + eth.index = 0; + eth.etype = (uint16_t)ETH_P_IP; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)ð, sizeof(eth), 1); + + return 0; +} + +static int phytmac_enable_multicast(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_MC; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MC; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + return 0; +} + +static int phytmac_set_mc_hash(struct phytmac *pdata, unsigned long *mc_filter) +{ + u16 cmd_id, cmd_subid; + struct phytmac_mc_info para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_HASH_MC; + para.mc_bottom = (u32)mc_filter[0]; + para.mc_top = (u32)mc_filter[1]; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_init_ring_hw(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + struct phytmac_ring_info rxring; + struct phytmac_ring_info txring; + struct phytmac_rxbuf_info rxbuf; + struct phytmac_queue *queue; + u32 q; + + memset(&rxring, 0, sizeof(rxring)); + memset(&txring, 0, sizeof(txring)); + memset(&rxbuf, 0, sizeof(rxbuf)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_TX_RING; + txring.queue_num = pdata->queues_num; + rxring.queue_num = pdata->queues_num; + txring.hw_dma_cap |= HW_DMA_CAP_64B; + rxring.hw_dma_cap |= HW_DMA_CAP_64B; + for (q = 0, queue = pdata->queues; q < pdata->queues_num; ++q, ++queue) { + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(q), queue->tx_head); + txring.addr[q] = queue->tx_ring_addr; + rxring.addr[q] = queue->rx_ring_addr; + } + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&txring), sizeof(txring), 0); + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE; + rxbuf.queue_num = pdata->queues_num; + rxbuf.buffer_size = pdata->rx_buffer_len / 64; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxbuf), sizeof(rxbuf), 0); + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_INIT_RX_RING; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxring), sizeof(rxring), 0); + + return 0; +} + +int phytmac_init_msg_ring(struct phytmac *pdata) +{ + u32 size = 0; + + pdata->msg_ring.tx_msg_tail = PHYTMAC_READ(pdata, PHYTMAC_TX_MSG_TAIL) & 0xff; + size = PHYTMAC_READ_BITS(pdata, PHYTMAC_SIZE, TXRING_SIZE); + pdata->msg_ring.tx_msg_ring_size = size; + if (pdata->msg_ring.tx_msg_tail == size) + pdata->msg_ring.tx_msg_tail = 0; + + PHYTMAC_WRITE(pdata, PHYTMAC_MSG_IMR, 0xfffffffe); + if (netif_msg_hw(pdata)) + netdev_info(pdata->ndev, "mac msg ring: tx_msg_ring_size:%d, tx_msg_tail:%d\n", + size, pdata->msg_ring.tx_msg_tail); + + return 0; +} + +static int phytmac_get_feature_all(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + int index; + struct phytmac_feature para; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_CAPS; + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + + memcpy(¶, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, + sizeof(struct phytmac_feature)); + + pdata->queues_max_num = para.queue_num; + if (para.dma_addr_width) + pdata->dma_addr_width = 64; + else + pdata->dma_addr_width = 32; + pdata->dma_data_width = para.dma_data_width; + pdata->max_rx_fs = para.max_rx_fs; + pdata->tx_bd_prefetch = (2 << (para.tx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); + pdata->rx_bd_prefetch = (2 << (para.rx_bd_prefetch - 1)) * + sizeof(struct phytmac_dma_desc); + + if (netif_msg_hw(pdata)) { + netdev_info(pdata->ndev, "feature qnum:%d, daw:%d, dbw:%d, rxfs:%d, rxbd:%d, txbd:%d\n", + pdata->queues_num, pdata->dma_addr_width, pdata->dma_data_width, + pdata->max_rx_fs, pdata->rx_bd_prefetch, pdata->tx_bd_prefetch); + } + + return 0; +} + +void phytmac_get_regs(struct phytmac *pdata, u32 *reg_buff) +{ + u16 cmd_id, cmd_subid; + u16 reg_num; + int index; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_REG_READ; + reg_num = 16; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)®_num, 2, 1); + + index = pdata->msg_ring.tx_msg_tail; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + + memcpy(reg_buff, pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); +} + +static void phytmac_get_hw_stats(struct phytmac *pdata) +{ + u16 cmd_id, cmd_subid; + u8 count; + int i, j, index; + u32 stats[48]; + u64 val; + u64 *p = &pdata->stats.tx_octets; + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 1; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 2; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 0); + + cmd_id = PHYTMAC_MSG_CMD_GET; + cmd_subid = PHYTMAC_MSG_CMD_GET_STATS; + count = 3; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)&count, sizeof(count), 1); + + for (i = 0; i < 3; i++) { + index = pdata->msg_ring.tx_msg_tail + i - 2; + if (index <= 0) + index += pdata->msg_ring.tx_msg_ring_size; + memcpy(&stats[i * 16], pdata->msg_regs + PHYTMAC_MSG(index) + MSG_HDR_LEN, 64); + } + + for (i = 0, j = 0; i < 44; i++) { + if (i == 0 || i == 20) { + val = (u64)stats[i + 1] << 32 | stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } else { + if (i != 1 && i != 21) { + val = stats[i]; + *p += val; + pdata->ethtool_stats[j] = *p; + ++j; + ++p; + } + } + } +} + +static void phytmac_mdio_idle(struct phytmac *pdata) +{ + u32 val; + + /* wait for end of transfer */ + val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); + while (!(val & PHYTMAC_BIT(MIDLE))) { + cpu_relax(); + val = PHYTMAC_READ(pdata, PHYTMAC_NETWORK_STATUS); + } +} + +static int phytmac_mdio_data_read(struct phytmac *pdata, int mii_id, int regnum, int is_c45) +{ + u16 data; + + if (is_c45) { + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_ADDR) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_READ) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C22) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C22_READ) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, regnum) + | PHYTMAC_BITS(CONST, 2))); + } + phytmac_mdio_idle(pdata); + data = PHYTMAC_READ(pdata, PHYTMAC_MDIO) & 0xffff; + phytmac_mdio_idle(pdata); + return data; +} + +static int phytmac_mdio_data_write(struct phytmac *pdata, int mii_id, + int regnum, int is_c45, u16 data) +{ + if (is_c45) { + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_ADDR) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, regnum & 0xFFFF) + | PHYTMAC_BITS(CONST, 2))); + phytmac_mdio_idle(pdata); + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C45) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C45_WRITE) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, (regnum >> 16) & 0x1F) + | PHYTMAC_BITS(VALUE, data) + | PHYTMAC_BITS(CONST, 2))); + } else { + PHYTMAC_WRITE(pdata, PHYTMAC_MDIO, (PHYTMAC_BITS(CLAUSESEL, PHYTMAC_C22) + | PHYTMAC_BITS(MDCOPS, PHYTMAC_C22_WRITE) + | PHYTMAC_BITS(PHYADDR, mii_id) + | PHYTMAC_BITS(REGADDR, regnum) + | PHYTMAC_BITS(VALUE, data) + | PHYTMAC_BITS(CONST, 2))); + } + phytmac_mdio_idle(pdata); + + return 0; +} + +static int phytmac_powerup_hw(struct phytmac *pdata, int on) +{ + u32 status, data0, data1, rdata1; + int ret; + + if (pdata->capacities & PHYTMAC_CAPS_LPI) { + ret = readx_poll_timeout(PHYTMAC_READ_STAT, pdata, status, !status, + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh status is busy"); + + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy"); + + data0 = 0; + data0 = PHYTMAC_SET_BITS(data0, DATA0_MSG, PHYTMAC_MSG_PM); + data0 = PHYTMAC_SET_BITS(data0, DATA0_PRO, PHYTMAC_PRO_ID); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA0, data0); + data1 = 0; + + if (on == PHYTMAC_POWERON) { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATON); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } else { + data1 = PHYTMAC_SET_BITS(data1, DATA1_STAT, PHYTMAC_STATOFF); + data1 = PHYTMAC_SET_BITS(data1, DATA1_STATTYPE, PHYTMAC_STATTYPE); + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_CPP_DATA1, data1); + } + + PHYTMAC_MHU_WRITE(pdata, PHYTMAC_MHU_AP_CPP_SET, 1); + ret = readx_poll_timeout(PHYTMAC_READ_DATA0, pdata, data0, + data0 & PHYTMAC_BIT(DATA0_FREE), + 1, PHYTMAC_TIMEOUT); + if (ret) + netdev_err(pdata->ndev, "mnh data0 is busy\n"); + + rdata1 = PHYTMAC_MHU_READ(pdata, PHYTMAC_MHU_CPP_DATA1); + if (rdata1 == data1) + netdev_err(pdata->ndev, "gmac power %s success, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + else + netdev_err(pdata->ndev, "gmac power %s failed, data1 = %x, rdata1=%x\n", + on ? "up" : "down", data1, rdata1); + } + + pdata->power_state = on; + + return 0; +} + +static int phytmac_set_wake(struct phytmac *pdata, int wake) +{ + u16 cmd_id, cmd_subid; + u8 wol = (u8)wake; + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_WOL; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&wol), 1, 1); + + return 0; +} + +static int phytmac_enable_promise(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + u8 rxcsum = 0; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) { + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE; + } else { + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PROMISE; + if (pdata->ndev->features & NETIF_F_RXCSUM) + rxcsum = 1; + } + + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&rxcsum), 1, 1); + + return 0; +} + +static int phytmac_enable_rxcsum(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_RXCSUM; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_txcsum(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_TXCSUM; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_TXCSUM; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_mdio(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_MDIO; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_MDIO; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_autoneg(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_AUTONEG; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + pdata->autoneg = enable; + return 0; +} + +static int phytmac_enable_pause(struct phytmac *pdata, int enable) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_PAUSE; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_enable_network(struct phytmac *pdata, int enable, int rx_tx) +{ + u16 cmd_id, cmd_subid; + + cmd_id = PHYTMAC_MSG_CMD_SET; + if (enable) + cmd_subid = PHYTMAC_MSG_CMD_SET_ENABLE_NETWORK; + else + cmd_subid = PHYTMAC_MSG_CMD_SET_DISABLE_NETWORK; + + phytmac_msg_send(pdata, cmd_id, cmd_subid, NULL, 0, 1); + + return 0; +} + +static int phytmac_add_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; + struct phytmac_fdir_info fdir; + u16 cmd_id, cmd_subid; + + memset(&fdir, 0, sizeof(fdir)); + + tp4sp_v = &rx_flow->h_u.tcp_ip4_spec; + tp4sp_m = &rx_flow->m_u.tcp_ip4_spec; + if (tp4sp_m->ip4src == 0xFFFFFFFF) { + fdir.ipsrc_en = true; + fdir.ip4src = tp4sp_v->ip4src; + } + + if (tp4sp_m->ip4dst == 0xFFFFFFFF) { + fdir.ipdst_en = true; + fdir.ip4dst = tp4sp_v->ip4dst; + } + + if (tp4sp_m->psrc == 0xFFFF || tp4sp_m->pdst == 0xFFFF) { + fdir.port_en = true; + fdir.dstport = tp4sp_v->pdst; + fdir.srcport = tp4sp_v->psrc; + fdir.dstport_mask = tp4sp_m->pdst; + fdir.srcport_mask = tp4sp_m->psrc; + } + + fdir.location = rx_flow->location; + fdir.queue = rx_flow->ring_cookie; + + if (fdir.ipsrc_en || fdir.ipdst_en || fdir.port_en) { + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_ADD_FDIR; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + } + + return 0; +} + +static int phytmac_del_fdir_entry(struct phytmac *pdata, struct ethtool_rx_flow_spec *rx_flow) +{ + struct phytmac_fdir_info fdir; + u16 cmd_id, cmd_subid; + + memset(&fdir, 0, sizeof(fdir)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_DEL_FDIR; + fdir.location = (u8)rx_flow->location; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(&fdir), sizeof(fdir), 1); + + return 0; +} + +static void phytmac_tx_start(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); + queue->tx_xmit_more = 0; +} + +static u32 phytmac_get_irq_mask(u32 mask) +{ + u32 value = 0; + + value |= (mask & PHYTMAC_INT_TX_COMPLETE) ? PHYTMAC_BIT(TXCOMP) : 0; + value |= (mask & PHYTMAC_INT_TX_ERR) ? PHYTMAC_BIT(DMA_ERR) : 0; + value |= (mask & PHYTMAC_INT_RX_COMPLETE) ? PHYTMAC_BIT(RXCOMP) : 0; + value |= (mask & PHYTMAC_INT_RX_OVERRUN) ? PHYTMAC_BIT(RXOVERRUN) : 0; + value |= (mask & PHYTMAC_INT_RX_DESC_FULL) ? PHYTMAC_BIT(RUSED) : 0; + + return value; +} + +static u32 phytmac_get_irq_status(u32 value) +{ + u32 status = 0; + + status |= (value & PHYTMAC_BIT(TXCOMP)) ? PHYTMAC_INT_TX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(DMA_ERR)) ? PHYTMAC_INT_TX_ERR : 0; + status |= (value & PHYTMAC_BIT(RXCOMP)) ? PHYTMAC_INT_RX_COMPLETE : 0; + status |= (value & PHYTMAC_BIT(RXOVERRUN)) ? PHYTMAC_INT_RX_OVERRUN : 0; + status |= (value & PHYTMAC_BIT(RUSED)) ? PHYTMAC_INT_RX_DESC_FULL : 0; + + return status; +} + +static void phytmac_enable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_ER(queue_index), value); +} + +static void phytmac_disable_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_DR(queue_index), value); +} + +static void phytmac_clear_irq(struct phytmac *pdata, + int queue_index, u32 mask) +{ + u32 value; + + value = phytmac_get_irq_mask(mask); + PHYTMAC_WRITE(pdata, PHYTMAC_INT_SR(queue_index), value); +} + +static unsigned int phytmac_get_irq(struct phytmac *pdata, int queue_index) +{ + u32 status; + u32 value; + + value = PHYTMAC_READ(pdata, PHYTMAC_INT_SR(queue_index)); + status = phytmac_get_irq_status(value); + + return status; +} + +static void phytmac_interface_config(struct phytmac *pdata, unsigned int mode, + const struct phylink_link_state *state) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_CONFIG; + para.interface = state->interface; + para.autoneg = (mode == MLO_AN_FIXED ? 0 : 1); + para.speed = state->speed; + para.duplex = state->duplex; + pdata->autoneg = para.autoneg; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); +} + +static int phytmac_interface_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_MAC_LINK_CONFIG; + para.interface = interface; + para.duplex = duplex; + para.speed = speed; + para.autoneg = pdata->autoneg; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_interface_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static int phytmac_pcs_linkup(struct phytmac *pdata, phy_interface_t interface, + int speed, int duplex) +{ + struct phytmac_interface_info para; + u16 cmd_id, cmd_subid; + + if (interface == PHY_INTERFACE_MODE_USXGMII) { + memset(¶, 0, sizeof(para)); + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_PCS_LINK_UP; + para.interface = interface; + para.duplex = duplex; + para.speed = speed; + para.autoneg = 0; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + } + + return 0; +} + +static int phytmac_pcs_linkdown(struct phytmac *pdata) +{ + return 0; +} + +static unsigned int phytmac_pcs_get_link(struct phytmac *pdata, phy_interface_t interface) +{ + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_NETWORK_STATUS, LINK); + else if (interface == PHY_INTERFACE_MODE_USXGMII) + return PHYTMAC_READ_BITS(pdata, PHYTMAC_USX_LINK_STATUS, USX_LINK); + + return 0; +} + +static unsigned int phytmac_tx_map_desc(struct phytmac_queue *queue, + u32 tx_tail, struct packet_info *packet) +{ + unsigned int i, ctrl; + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + struct phytmac_tx_skb *tx_skb; + unsigned int eof = 1; + + i = tx_tail; + + do { + i--; + tx_skb = phytmac_get_tx_skb(queue, i); + desc = phytmac_get_tx_desc(queue, i); + + ctrl = (u32)tx_skb->length; + if (eof) { + ctrl |= PHYTMAC_BIT(TXLAST); + eof = 0; + } + + if (unlikely(i == (pdata->tx_ring_size - 1))) + ctrl |= PHYTMAC_BIT(TXWRAP); + + if (i == queue->tx_tail) { + ctrl |= PHYTMAC_BITS(TXLSO, packet->lso); + ctrl |= PHYTMAC_BITS(TXTCP_SEQ_SRC, packet->seq); + if (packet->nocrc) + ctrl |= PHYTMAC_BIT(TXNOCRC); + } else { + ctrl |= PHYTMAC_BITS(MSSMFS, packet->mss); + } + + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* make newly desc1 to hardware */ + wmb(); + desc->desc1 = ctrl; + } while (i != queue->tx_tail); + + return 0; +} + +static void phytmac_init_rx_map_desc(struct phytmac_queue *queue, + u32 index) +{ + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + desc->desc1 = 0; + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 |= PHYTMAC_BIT(RXUSED); +} + +static unsigned int phytmac_rx_map_desc(struct phytmac_queue *queue, u32 index, dma_addr_t addr) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc; + + desc = phytmac_get_rx_desc(queue, index); + + if (addr) { + if (unlikely(index == (pdata->rx_ring_size - 1))) + addr |= PHYTMAC_BIT(RXWRAP); + desc->desc1 = 0; + desc->desc2 = upper_32_bits(addr); + /* Make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 = lower_32_bits(addr); + } else { + desc->desc1 = 0; + /* make newly descriptor to hardware */ + dma_wmb(); + desc->desc0 &= ~PHYTMAC_BIT(RXUSED); + } + + return 0; +} + +static unsigned int phytmac_zero_rx_desc_addr(struct phytmac_dma_desc *desc) +{ + desc->desc2 = 0; + desc->desc0 = PHYTMAC_BIT(RXUSED); + + return 0; +} + +static int phytmac_tx_complete(const struct phytmac_dma_desc *desc) +{ + return PHYTMAC_GET_BITS(desc->desc1, TXUSED); +} + +static bool phytmac_rx_complete(const struct phytmac_dma_desc *desc) +{ + dma_addr_t addr; + bool used; + + used = PHYTMAC_GET_BITS(desc->desc0, RXUSED); + addr = ((u64)(desc->desc2) << 32); + addr |= desc->desc0 & 0xfffffff8; + + if (used != 0 && addr != 0) + return true; + else + return false; +} + +static int phytmac_rx_pkt_len(struct phytmac *pdata, const struct phytmac_dma_desc *desc) +{ + if (pdata->capacities & PHYTMAC_CAPS_JUMBO) + return desc->desc1 & PHYTMAC_RXJFRMLEN_MASK; + else + return desc->desc1 & PHYTMAC_RXFRMLEN_MASK; +} + +static bool phytmac_rx_checksum(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + u32 check = value >> PHYTMAC_RXCSUM_INDEX & 0x3; + + return (check == PHYTMAC_RXCSUM_IP_TCP || check == PHYTMAC_RXCSUM_IP_UDP); +} + +static bool phytmac_rx_single_buffer(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return ((value & PHYTMAC_BIT(RXSOF)) && (value & PHYTMAC_BIT(RXEOF))); +} + +static bool phytmac_rx_sof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RXSOF)); +} + +static bool phytmac_rx_eof(const struct phytmac_dma_desc *desc) +{ + u32 value = desc->desc1; + + return (value & PHYTMAC_BIT(RXEOF)); +} + +static void phytmac_clear_rx_desc(struct phytmac_queue *queue, int begin, int end) +{ + unsigned int frag; + unsigned int tmp = end; + struct phytmac_dma_desc *desc; + + if (begin > end) + tmp = end + queue->pdata->rx_ring_size; + + for (frag = begin; frag != end; frag++) { + desc = phytmac_get_rx_desc(queue, frag); + desc->desc0 &= ~PHYTMAC_BIT(RXUSED); + } +} + +static void phytmac_clear_tx_desc(struct phytmac_queue *queue) +{ + struct phytmac *pdata = queue->pdata; + struct phytmac_dma_desc *desc = NULL; + struct phytmac_tx_skb *tx_skb = NULL; + int i; + + for (i = 0; i < queue->pdata->tx_ring_size; i++) { + desc = phytmac_get_tx_desc(queue, i); + tx_skb = phytmac_get_tx_skb(queue, i); + desc->desc2 = upper_32_bits(tx_skb->addr); + desc->desc0 = lower_32_bits(tx_skb->addr); + /* make newly desc to hardware */ + wmb(); + desc->desc1 = PHYTMAC_BIT(TXUSED); + } + desc->desc1 |= PHYTMAC_BIT(TXWRAP); + PHYTMAC_WRITE(pdata, PHYTMAC_TAIL_PTR(queue->index), queue->tx_tail); +} + +static void phytmac_get_time(struct phytmac *pdata, struct timespec64 *ts) +{ + u32 ns, secl, sech; + + ns = PHYTMAC_READ(pdata, PHYTMAC_TIMER_NSEC); + secl = PHYTMAC_READ(pdata, PHYTMAC_TIMER_SEC); + sech = PHYTMAC_READ(pdata, PHYTMAC_TIMER_MSB_SEC); + + ts->tv_nsec = ns; + ts->tv_sec = (((u64)sech << 32) | secl) & TIMER_SEC_MAX_VAL; +} + +void phytmac_set_time(struct phytmac *pdata, time64_t sec, long nsec) +{ + u32 secl, sech; + + secl = (u32)sec; + sech = (sec >> 32) & (0xffff); + + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_NSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_MSB_SEC, sech); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_SEC, secl); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_NSEC, nsec); +} + +void phytmac_clear_time(struct phytmac *pdata) +{ + u32 value; + + pdata->ts_incr.sub_ns = 0; + pdata->ts_incr.ns = 0; + + value = PHYTMAC_READ(pdata, PHYTMAC_TIMER_INCR); + value = PHYTMAC_SET_BITS(value, INCR_NSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR, value); + + value = PHYTMAC_READ(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC); + value = PHYTMAC_SET_BITS(value, INCR_SNSEC, 0); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC, value); + + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, 0); +} + +int phytmac_set_tsmode(struct phytmac *pdata, struct ts_ctrl *ctrl) +{ + u16 cmd_id, cmd_subid; + struct phytmac_ts_config para; + + cmd_id = PHYTMAC_MSG_CMD_SET; + cmd_subid = PHYTMAC_MSG_CMD_SET_TS_CONFIG; + para.tx_mode = ctrl->tx_control; + para.rx_mode = ctrl->rx_control; + para.one_step = ctrl->one_step; + phytmac_msg_send(pdata, cmd_id, cmd_subid, (void *)(¶), sizeof(para), 1); + + return 0; +} + +static int phytmac_set_tsincr(struct phytmac *pdata, struct ts_incr *incr) +{ + u32 value; + + value = PHYTMAC_BITS(INCR_SNSEC, incr->sub_ns); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR_SUB_NSEC, value); + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_INCR, incr->ns); + + return 0; +} + +static void phytmac_ptp_init_hw(struct phytmac *pdata) +{ + struct timespec64 ts; + + ts = ns_to_timespec64(ktime_to_ns(ktime_get_real())); + phytmac_set_time(pdata, ts.tv_sec, ts.tv_nsec); + + phytmac_set_tsincr(pdata, &pdata->ts_incr); +} + +static int phytmac_adjust_fine(struct phytmac *pdata, long ppm, bool negative) +{ + struct ts_incr ts_incr; + u32 tmp; + u64 adj; + + ts_incr.ns = pdata->ts_incr.ns; + ts_incr.sub_ns = pdata->ts_incr.sub_ns; + + /* scaling: unused(8bit) | ns(8bit) | fractions(16bit) */ + tmp = ((u64)ts_incr.ns << PHYTMAC_INCR_SNSECL_INDEX) + ts_incr.sub_ns; + adj = ((u64)ppm * tmp + (USEC_PER_SEC >> 1)) >> PHYTMAC_INCR_SNSECL_INDEX; + + adj = div_u64(adj, USEC_PER_SEC); + adj = negative ? (tmp - adj) : (tmp + adj); + + ts_incr.ns = (adj >> PHYTMAC_INCR_SNSEC_WIDTH) + & ((1 << PHYTMAC_INCR_NSEC_WIDTH) - 1); + ts_incr.sub_ns = adj & ((1 << PHYTMAC_INCR_SNSEC_WIDTH) - 1); + + phytmac_set_tsincr(pdata, &ts_incr); + + return 0; +} + +int phytmac_adjust_time(struct phytmac *pdata, s64 delta, int neg) +{ + u32 adj; + + if (delta > PHYTMAC_ASEC_MAX) { + struct timespec64 now, then; + + then = ns_to_timespec64(delta); + phytmac_get_time(pdata, &now); + now = timespec64_add(now, then); + phytmac_set_time(pdata, now.tv_sec, now.tv_nsec); + } else { + adj = (neg << PHYTMAC_AADD_INDEX) | delta; + PHYTMAC_WRITE(pdata, PHYTMAC_TIMER_ADJUST, adj); + } + + return 0; +} + +static int phytmac_ts_valid(struct phytmac *pdata, struct phytmac_dma_desc *desc, int direction) +{ + int ts_valid = 0; + + if (direction == PHYTMAC_TX) + ts_valid = desc->desc1 & PHYTMAC_BIT(TXTSVALID); + else if (direction == PHYTMAC_RX) + ts_valid = desc->desc0 & PHYTMAC_BIT(RXTSVALID); + return ts_valid; +} + +static void phytmac_get_dma_ts(struct phytmac *pdata, u32 ts_1, u32 ts_2, struct timespec64 *ts) +{ + struct timespec64 ts2; + + ts->tv_sec = (PHYTMAC_GET_BITS(ts_2, TS_SECH) << 2) | + PHYTMAC_GET_BITS(ts_1, TS_SECL); + ts->tv_nsec = PHYTMAC_GET_BITS(ts_1, TS_NSEC); + + phytmac_get_time(pdata, &ts2); + + if (((ts->tv_sec ^ ts2.tv_sec) & (PHYTMAC_TS_SEC_TOP >> 1)) != 0) + ts->tv_sec -= PHYTMAC_TS_SEC_TOP; + + ts->tv_sec += (ts2.tv_sec & (~PHYTMAC_TS_SEC_MASK)); +} + +static unsigned int phytmac_get_ts_rate(struct phytmac *pdata) +{ + return 300000000; +} + +struct phytmac_hw_if phytmac_2p0_hw = { + .init_msg_ring = phytmac_init_msg_ring, + .reset_hw = phytmac_reset_hw, + .init_hw = phytmac_init_hw, + .init_ring_hw = phytmac_init_ring_hw, + .get_feature = phytmac_get_feature_all, + .get_regs = phytmac_get_regs, + .get_stats = phytmac_get_hw_stats, + .set_mac_address = phytmac_set_mac_addr, + .get_mac_address = phytmac_get_mac_addr, + .mdio_read = phytmac_mdio_data_read, + .mdio_write = phytmac_mdio_data_write, + .poweron = phytmac_powerup_hw, + .set_wol = phytmac_set_wake, + .enable_promise = phytmac_enable_promise, + .enable_multicast = phytmac_enable_multicast, + .set_hash_table = phytmac_set_mc_hash, + .enable_rx_csum = phytmac_enable_rxcsum, + .enable_tx_csum = phytmac_enable_txcsum, + .enable_mdio_control = phytmac_enable_mdio, + .enable_autoneg = phytmac_enable_autoneg, + .enable_pause = phytmac_enable_pause, + .enable_network = phytmac_enable_network, + .add_fdir_entry = phytmac_add_fdir_entry, + .del_fdir_entry = phytmac_del_fdir_entry, + + /* mac config */ + .mac_config = phytmac_interface_config, + .mac_linkup = phytmac_interface_linkup, + .mac_linkdown = phytmac_interface_linkdown, + .pcs_linkup = phytmac_pcs_linkup, + .pcs_linkdown = phytmac_pcs_linkdown, + .get_link = phytmac_pcs_get_link, + + /* irq */ + .enable_irq = phytmac_enable_irq, + .disable_irq = phytmac_disable_irq, + .clear_irq = phytmac_clear_irq, + .get_irq = phytmac_get_irq, + + /* tx and rx */ + .tx_map = phytmac_tx_map_desc, + .transmit = phytmac_tx_start, + .tx_complete = phytmac_tx_complete, + .rx_complete = phytmac_rx_complete, + .get_rx_pkt_len = phytmac_rx_pkt_len, + .init_rx_map = phytmac_init_rx_map_desc, + .rx_map = phytmac_rx_map_desc, + .rx_checksum = phytmac_rx_checksum, + .rx_single_buffer = phytmac_rx_single_buffer, + .rx_pkt_start = phytmac_rx_sof, + .rx_pkt_end = phytmac_rx_eof, + .clear_rx_desc = phytmac_clear_rx_desc, + .clear_tx_desc = phytmac_clear_tx_desc, + .zero_rx_desc_addr = phytmac_zero_rx_desc_addr, + + /* ptp */ + .init_ts_hw = phytmac_ptp_init_hw, + .set_time = phytmac_set_time, + .clear_time = phytmac_clear_time, + .get_time = phytmac_get_time, + .set_ts_config = phytmac_set_tsmode, + .set_incr = phytmac_set_tsincr, + .adjust_fine = phytmac_adjust_fine, + .adjust_time = phytmac_adjust_time, + .ts_valid = phytmac_ts_valid, + .get_timestamp = phytmac_get_dma_ts, + .get_ts_rate = phytmac_get_ts_rate, +}; +EXPORT_SYMBOL_GPL(phytmac_2p0_hw); diff --git a/drivers/net/ethernet/phytium/phytmac_v2.h b/drivers/net/ethernet/phytium/phytmac_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..92e4806ac2c1f7f72be6f0f74d34b6624d23800c --- /dev/null +++ b/drivers/net/ethernet/phytium/phytmac_v2.h @@ -0,0 +1,362 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _PHYTMAC_V2_H +#define _PHYTMAC_V2_H + +extern struct phytmac_hw_if phytmac_2p0_hw; + +#define PHYTMAC_MSG_SRAM_SIZE 4096 +#define MSG_HDR_LEN 8 + +#define PHYTMAC_TX_MSG_HEAD 0x000 +#define PHYTMAC_TX_MSG_TAIL 0x004 +#define PHYTMAC_RX_MSG_HEAD 0x008 +#define PHYTMAC_RX_MSG_TAIL 0x00c +#define PHYTMAC_MSG_IMR 0x020 +#define PHYTMAC_MSG_ISR 0x02c + +#define PHYTMAC_SIZE 0x0048 +#define PHYTMAC_NETWORK_STATUS 0x0240 +#define PHYTMAC_PCS_AN_LP 0x0244 +#define PHYTMAC_USX_LINK_STATUS 0x0248 +#define PHYTMAC_MDIO 0x0264 +#define PHYTMAC_TIMER_INCR_SUB_NSEC 0x024c +#define PHYTMAC_TIMER_INCR 0x0250 +#define PHYTMAC_TIMER_MSB_SEC 0x0254 +#define PHYTMAC_TIMER_SEC 0x0258 +#define PHYTMAC_TIMER_NSEC 0x025c +#define PHYTMAC_TIMER_ADJUST 0x0260 +#define PHYTMAC_MSG(i) (((i) - 1) * 0x48) + +#define PHYTMAC_MODULE_ID_GMAC 0x60 +#define PHYTMAC_FLAGS_MSG_COMP 0x1 +#define PHYTMAC_FLAGS_MSG_NOINT 0x2 + +/* Bitfields in PHYTMAC_TX_MSG_TAIL */ +#define PHYTMAC_TX_MSG_INT_INDEX 16 +#define PHYTMAC_TX_MSG_INT_WIDTH 1 + +/* Bitfields in PHYTMAC_MSG_ISR */ +#define PHYTMAC_MSG_COMPLETE_INDEX 0 +#define PHYTMAC_MSG_COMPLETE_WIDTH 1 + +/* Bitfields in PHYTMAC_SIZE */ +#define PHYTMAC_MEM_SIZE_INDEX 0 +#define PHYTMAC_MEM_SIZE_WIDTH 4 +#define PHYTMAC_TXRING_SIZE_INDEX 8 +#define PHYTMAC_TXRING_SIZE_WIDTH 6 + +/* Bitfields in PHYTMAC_TIMER_INCR_SUB_NSEC */ +#define PHYTMAC_INCR_SNSECH_INDEX 0 +#define PHYTMAC_INCR_SNSECH_WIDTH 16 +#define PHYTMAC_INCR_SNSECL_INDEX 24 +#define PHYTMAC_INCR_SNSECL_WIDTH 8 +#define PHYTMAC_INCR_SNSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TIMER_INCR_SUB_NSEC */ +#define PHYTMAC_INCR_SNSEC_INDEX 0 +#define PHYTMAC_INCR_SNSEC_WIDTH 24 + +/* Bitfields in PHYTMAC_TIMER_INCR */ +#define PHYTMAC_INCR_NSEC_INDEX 0 +#define PHYTMAC_INCR_NSEC_WIDTH 8 + +/* Bitfields in PHYTMAC_TIMER_MSB_SEC */ +#define PHYTMAC_TIMER_SECH_INDEX 0 +#define PHYTMAC_TIMER_SECH_WIDTH 16 + +/* Bitfields in PHYTMAC_TIMER_SEC */ +#define PHYTMAC_TIMER_SECL_INDEX 0 +#define PHYTMAC_TIMER_SECL_WIDTH 32 + +/* Bitfields in PHYTMAC_TIMER_NSEC */ +#define PHYTMAC_TIMER_NSEC_INDEX 0 +#define PHYTMAC_TIMER_NSEC_WIDTH 30 + +/* Bitfields in PHYTMAC_TIMER_ADJUST */ +#define PHYTMAC_ASEC_INDEX 0 +#define PHYTMAC_ASEC_WIDTH 30 +#define PHYTMAC_AADD_INDEX 31 +#define PHYTMAC_AADD_WIDTH 1 +#define PHYTMAC_ASEC_MAX ((1 << PHYTMAC_ASEC_WIDTH) - 1) + +#define PHYTMAC_TIMER_SEC_WIDTH (PHYTMAC_TIMER_SECH_WIDTH + PHYTMAC_TIMER_SECL_WIDTH) +#define TIMER_SEC_MAX_VAL (((u64)1 << PHYTMAC_TIMER_SEC_WIDTH) - 1) +#define TIMER_NSEC_MAX_VAL ((1 << PHYTMAC_TIMER_NSEC_WIDTH) - 1) + +#define PHYTMAC_TAIL_PTR(i) (0x0100 + ((i) * 4)) +#define PHYTMAC_INT_ER(i) (0x0140 + ((i) * 4)) +#define PHYTMAC_INT_DR(i) (0x0180 + ((i) * 4)) +#define PHYTMAC_INT_MR(i) (0x01c0 + ((i) * 4)) +#define PHYTMAC_INT_SR(i) (0x0200 + ((i) * 4)) + +#define PHYTMAC_LINK_INDEX 0 /* PCS link status */ +#define PHYTMAC_LINK_WIDTH 1 +#define PHYTMAC_MIDLE_INDEX 2 /* Mdio idle */ +#define PHYTMAC_MIDLE_WIDTH 1 + +/* Int stauts/Enable/Disable/Mask Register */ +#define PHYTMAC_RXCOMP_INDEX 1 /* Rx complete */ +#define PHYTMAC_RXCOMP_WIDTH 1 +#define PHYTMAC_RUSED_INDEX 2 /* Rx used bit read */ +#define PHYTMAC_RUSED_WIDTH 1 +#define PHYTMAC_DMA_ERR_INDEX 6 /* AMBA error */ +#define PHYTMAC_DMA_ERR_WIDTH 1 +#define PHYTMAC_TXCOMP_INDEX 7 /* Tx complete */ +#define PHYTMAC_TXCOMP_WIDTH 1 +#define PHYTMAC_RXOVERRUN_INDEX 10 /* Rx overrun */ +#define PHYTMAC_RXOVERRUN_WIDTH 1 +#define PHYTMAC_RESP_ERR_INDEX 11 /* Resp not ok */ +#define PHYTMAC_RESP_ERR_WIDTH 1 + +/* pcs an lp */ +#define PHYTMAC_AUTO_NEG_INDEX 12 +#define PHYTMAC_AUTO_NEG_WIDTH 1 + +/* Bitfields in USX_STATUS. */ +#define PHYTMAC_USX_LINK_INDEX 0 +#define PHYTMAC_USX_LINK_WIDTH 1 + +/* Mdio read/write Register */ +#define PHYTMAC_VALUE_INDEX 0 /* value */ +#define PHYTMAC_VALUE_WIDTH 16 +#define PHYTMAC_CONST_INDEX 16 /* Must Be 10 */ +#define PHYTMAC_CONST_WIDTH 2 +#define PHYTMAC_REGADDR_INDEX 18 /* Register address */ +#define PHYTMAC_REGADDR_WIDTH 5 +#define PHYTMAC_PHYADDR_INDEX 23 /* Phy address */ +#define PHYTMAC_PHYADDR_WIDTH 5 +#define PHYTMAC_MDCOPS_INDEX 28 +#define PHYTMAC_MDCOPS_WIDTH 2 +#define PHYTMAC_CLAUSESEL_INDEX 30 +#define PHYTMAC_CLAUSESEL_WIDTH 1 +#define PHYTMAC_C22 1 +#define PHYTMAC_C45 0 +#define PHYTMAC_C45_ADDR 0 +#define PHYTMAC_C45_WRITE 1 +#define PHYTMAC_C45_READ 3 +#define PHYTMAC_C22_WRITE 1 +#define PHYTMAC_C22_READ 2 + +/* rx dma desc bit */ +/* DMA descriptor bitfields */ +#define PHYTMAC_RXUSED_INDEX 0 +#define PHYTMAC_RXUSED_WIDTH 1 +#define PHYTMAC_RXWRAP_INDEX 1 +#define PHYTMAC_RXWRAP_WIDTH 1 +#define PHYTMAC_RXTSVALID_INDEX 2 +#define PHYTMAC_RXTSVALID_WIDTH 1 +#define PHYTMAC_RXWADDR_INDEX 2 +#define PHYTMAC_RXWADDR_WIDTH 30 + +#define PHYTMAC_RXFRMLEN_INDEX 0 +#define PHYTMAC_RXFRMLEN_WIDTH 12 +#define PHYTMAC_RXINDEX_INDEX 12 +#define PHYTMAC_RXINDEX_WIDTH 2 +#define PHYTMAC_RXSOF_INDEX 14 +#define PHYTMAC_RXSOF_WIDTH 1 +#define PHYTMAC_RXEOF_INDEX 15 +#define PHYTMAC_RXEOF_WIDTH 1 + +#define PHYTMAC_RXFRMLEN_MASK 0x1FFF +#define PHYTMAC_RXJFRMLEN_MASK 0x3FFF + +#define PHYTMAC_RXTYPEID_MATCH_INDEX 22 +#define PHYTMAC_RXTYPEID_MATCH_WIDTH 2 +#define PHYTMAC_RXCSUM_INDEX 22 +#define PHYTMAC_RXCSUM_WIDTH 2 + +/* Buffer descriptor constants */ +#define PHYTMAC_RXCSUM_NONE 0 +#define PHYTMAC_RXCSUM_IP 1 +#define PHYTMAC_RXCSUM_IP_TCP 2 +#define PHYTMAC_RXCSUM_IP_UDP 3 + +#define PHYTMAC_TXFRMLEN_INDEX 0 +#define PHYTMAC_TXFRMLEN_WIDTH 14 +#define PHYTMAC_TXLAST_INDEX 15 +#define PHYTMAC_TXLAST_WIDTH 1 +#define PHYTMAC_TXNOCRC_INDEX 16 +#define PHYTMAC_TXNOCRC_WIDTH 1 +#define PHYTMAC_MSSMFS_INDEX 16 +#define PHYTMAC_MSSMFS_WIDTH 14 +#define PHYTMAC_TXLSO_INDEX 17 +#define PHYTMAC_TXLSO_WIDTH 2 +#define PHYTMAC_TXTCP_SEQ_SRC_INDEX 19 +#define PHYTMAC_TXTCP_SEQ_SRC_WIDTH 1 +#define PHYTMAC_TXTSVALID_INDEX 23 +#define PHYTMAC_TXTSVALID_WIDTH 1 +#define PHYTMAC_TXWRAP_INDEX 30 +#define PHYTMAC_TXWRAP_WIDTH 1 +#define PHYTMAC_TXUSED_INDEX 31 +#define PHYTMAC_TXUSED_WIDTH 1 + +/* dma ts */ +#define PHYTMAC_TS_NSEC_INDEX 0 +#define PHYTMAC_TS_NSEC_WIDTH 30 +#define PHYTMAC_TS_SECL_INDEX 30 +#define PHYTMAC_TS_SECL_WIDTH 2 +#define PHYTMAC_TS_SECH_INDEX 0 +#define PHYTMAC_TS_SECH_WIDTH 4 +#define PHYTMAC_TS_SEC_MASK 0x3f +#define PHYTMAC_TS_SEC_TOP 0x40 + +#define HW_DMA_CAP_64B 0x1 +#define HW_DMA_CAP_CSUM 0x2 +#define HW_DMA_CAP_PTP 0x4 +#define HW_DMA_CAP_DDW64 0x8 +#define HW_DMA_CAP_DDW128 0x10 + +#define PHYTMAC_DBW64 2 +#define PHYTMAC_DBW128 4 + +enum phytmac_msg_cmd_id { + PHYTMAC_MSG_CMD_DEFAULT = 0, + PHYTMAC_MSG_CMD_SET, + PHYTMAC_MSG_CMD_GET, + PHYTMAC_MSG_CMD_DATA, + PHYTMAC_MSG_CMD_REPORT, +}; + +enum phytmac_default_subid { + PHYTMAC_MSG_CMD_DEFAULT_RESET_HW = 0, + PHYTMAC_MSG_CMD_DEFAULT_RESET_TX_QUEUE, + PHYTMAC_MSG_CMD_DEFAULT_RESET_RX_QUEUE, +}; + +enum phytmac_set_subid { + PHYTMAC_MSG_CMD_SET_INIT_ALL = 0, + PHYTMAC_MSG_CMD_SET_INIT_RING = 1, + PHYTMAC_MSG_CMD_SET_INIT_TX_RING = 2, + PHYTMAC_MSG_CMD_SET_INIT_RX_RING = 3, + PHYTMAC_MSG_CMD_SET_MAC_CONFIG = 4, + PHYTMAC_MSG_CMD_SET_ADDR = 5, + PHYTMAC_MSG_CMD_SET_DMA_RX_BUFSIZE = 6, + PHYTMAC_MSG_CMD_SET_DMA = 7, + PHYTMAC_MSG_CMD_SET_CAPS = 8, + PHYTMAC_MSG_CMD_SET_TS_CONFIG = 9, + PHYTMAC_MSG_CMD_SET_INIT_TX_ENABLE_TRANSMIT = 10, + PHYTMAC_MSG_CMD_SET_INIT_RX_ENABLE_RECEIVE = 11, + PHYTMAC_MSG_CMD_SET_ENABLE_NETWORK = 12, + PHYTMAC_MSG_CMD_SET_DISABLE_NETWORK = 13, + PHYTMAC_MSG_CMD_SET_ENABLE_MDIO = 14, + PHYTMAC_MSG_CMD_SET_DISABLE_MDIO = 15, + PHYTMAC_MSG_CMD_SET_ENABLE_TXCSUM = 16, + PHYTMAC_MSG_CMD_SET_DISABLE_TXCSUM = 17, + PHYTMAC_MSG_CMD_SET_ENABLE_RXCSUM = 18, + PHYTMAC_MSG_CMD_SET_DISABLE_RXCSUM = 19, + PHYTMAC_MSG_CMD_SET_ENABLE_PROMISE = 20, + PHYTMAC_MSG_CMD_SET_DISABLE_PROMISE = 21, + PHYTMAC_MSG_CMD_SET_ENABLE_MC = 22, + PHYTMAC_MSG_CMD_SET_DISABLE_MC = 23, + PHYTMAC_MSG_CMD_SET_ENABLE_HASH_MC = 24, + PHYTMAC_MSG_CMD_SET_ENABLE_PAUSE = 25, + PHYTMAC_MSG_CMD_SET_DISABLE_PAUSE = 26, + PHYTMAC_MSG_CMD_SET_ENABLE_JUMBO = 27, + PHYTMAC_MSG_CMD_SET_DISABLE_JUMBO = 28, + PHYTMAC_MSG_CMD_SET_ENABLE_1536_FRAMES = 29, + PHYTMAC_MSG_CMD_SET_ENABLE_STRIPCRC = 30, + PHYTMAC_MSG_CMD_SET_DISABLE_STRIPCRC = 31, + PHYTMAC_MSG_CMD_SET_PCS_LINK_UP = 32, + PHYTMAC_MSG_CMD_SET_PCS_LINK_DOWN = 33, + PHYTMAC_MSG_CMD_SET_MAC_LINK_CONFIG = 34, + PHYTMAC_MSG_CMD_SET_REG_WRITE = 35, + PHYTMAC_MSG_CMD_SET_ENABLE_BC = 36, + PHYTMAC_MSG_CMD_SET_DISABLE_BC = 37, + PHYTMAC_MSG_CMD_SET_ETH_MATCH = 38, + PHYTMAC_MSG_CMD_SET_ADD_FDIR = 39, + PHYTMAC_MSG_CMD_SET_DEL_FDIR = 40, + PHYTMAC_MSG_CMD_SET_ENABLE_AUTONEG = 41, + PHYTMAC_MSG_CMD_SET_DISABLE_AUTONEG = 42, + PHYTMAC_MSG_CMD_SET_RX_DATA_OFFSET = 43, + PHYTMAC_MSG_CMD_SET_WOL = 44, +}; + +enum phytmac_get_subid { + PHYTMAC_MSG_CMD_GET_ADDR, + PHYTMAC_MSG_CMD_GET_QUEUENUMS, + PHYTMAC_MSG_CMD_GET_CAPS, + PHYTMAC_MSG_CMD_GET_BD_PREFETCH, + PHYTMAC_MSG_CMD_GET_STATS, + PHYTMAC_MSG_CMD_GET_REG_READ, +}; + +struct phytmac_interface_info { + u8 interface; + u8 autoneg; + u16 duplex; + u32 speed; +} __packed; + +struct phytmac_mc_info { + u32 mc_bottom; + u32 mc_top; +} __packed; + +struct phytmac_fdir_info { + u32 ip4src; + u32 ip4dst; + u16 srcport; + u16 srcport_mask; + u16 dstport; + u16 dstport_mask; + u8 location; + u8 queue; + u8 ipsrc_en; + u8 ipdst_en; + u8 port_en; +} __packed; + +struct phytmac_ts_config { + u8 tx_mode; + u8 rx_mode; + u8 one_step; +} __packed; + +struct phytmac_ring_info { + u64 addr[4]; + u8 queue_num; + u8 hw_dma_cap; +} __packed; + +struct phytmac_rxbuf_info { + u8 queue_num; + u8 buffer_size; +} __packed; + +struct phytmac_dma_info { + u16 dma_burst_length; + u8 hw_dma_cap; +} __packed; + +struct phytmac_eth_info { + u16 index; + u16 etype; +} __packed; + +struct phytmac_mac { + u32 addrl; + u16 addrh; +} __packed; + +struct phytmac_feature { + u8 irq_read_clear; + u8 dma_data_width; + u8 dma_addr_width; + u8 tx_pkt_buffer; + u8 rx_pkt_buffer; + u8 pbuf_lso; + u8 queue_num; + u8 tx_bd_prefetch; + u8 rx_bd_prefetch; + u8 max_rx_fs; +} __packed; + +struct phytmac_msg_info { + u16 module_id; + u16 cmd_id; + u16 cmd_subid; + u16 flags; + u8 para[64]; +} __packed; + +#endif diff --git a/drivers/pci/controller/pcie-phytium-ep.c b/drivers/pci/controller/pcie-phytium-ep.c index 545d987ec001398b5fc0d3a9a3a7ff79ea02f622..575d5d33c7de18bb7f4bd4d730a063d265cbb0d5 100644 --- a/drivers/pci/controller/pcie-phytium-ep.c +++ b/drivers/pci/controller/pcie-phytium-ep.c @@ -18,6 +18,8 @@ #include "pcie-phytium-ep.h" #include "pcie-phytium-register.h" +#define PHYTIUM_PCIE_RP_DRIVER_VERSION "1.1.1" + #define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_NONE 0x0 #define PHYTIUM_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x1 @@ -462,9 +464,10 @@ static struct platform_driver phytium_pcie_ep_driver = { .probe = phytium_pcie_ep_probe, .remove = phytium_pcie_ep_remove, }; - +MODULE_DEVICE_TABLE(of, phytium_pcie_ep_of_match); module_platform_driver(phytium_pcie_ep_driver); MODULE_LICENSE("GPL"); +MODULE_VERSION(PHYTIUM_PCIE_RP_DRIVER_VERSION); MODULE_AUTHOR("Yang Xun "); MODULE_DESCRIPTION("Phytium PCIe Controller Endpoint driver"); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index c3b0b10f95cb6bdf97f218af93d3162df51f52e2..5062e52477806fec51baac49271edacc4938a084 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -556,6 +556,10 @@ int pci_iov_init(struct pci_dev *dev) if (!pci_is_pcie(dev)) return -ENODEV; + if ((dev->vendor == PCI_VENDOR_ID_PHYTIUM) + && (dev->device == PCI_DEVICE_ID_PHYTIUM_PE220X)) + return -ENODEV; + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); if (pos) return sriov_init(dev, pos); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 08ebaf7cca8baf14b4ac19345d38cdabf80c110e..39e3de6f5d5a9f915d06103d4bea8b08d0df13d2 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -102,4 +102,6 @@ config ARM_SPE_PMU Extension, which provides periodic sampling of operations in the CPU pipeline and reports this via the perf AUX interface. +source "drivers/perf/phytium/Kconfig" + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index b3902bd37d5328b50b5e4df4170544300d3dca96..2f7a987ea3c7196e89816edc007c2dfa9837fde6 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o +obj-$(CONFIG_PHYTIUM_PMU) += phytium/ \ No newline at end of file diff --git a/drivers/perf/phytium/Kconfig b/drivers/perf/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e713f99b204dbb99a763797a99984eeb8b8c7065 --- /dev/null +++ b/drivers/perf/phytium/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig PHYTIUM_PMU + bool "Phytium PMU support" + help + Say Y here if you want to support Phytium performance monitoring + unit (PMU) drivers. + +if PHYTIUM_PMU + +config PHYT_DDR_PMU + tristate "Phytium SoC DDR PMU driver" + depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST + default m + help + Provides support for Phytium SoC DDR Controller performance + monitoring unit (PMU). + +config PHYT_PCIE_PMU + tristate "Phytium SoC PCIE PMU driver" + depends on (ARCH_PHYTIUM && ACPI) || COMPILE_TEST + default m + help + Provides support for Phytium SoC PCIe Controller performance + monitoring unit (PMU). + +endif + diff --git a/drivers/perf/phytium/Makefile b/drivers/perf/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..af37afc6920c57169c16c053eb3ceb70dff064d2 --- /dev/null +++ b/drivers/perf/phytium/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +# +obj-$(CONFIG_PHYT_DDR_PMU) += phytium_ddr_pmu.o +obj-$(CONFIG_PHYT_PCIE_PMU) += phytium_pcie_pmu.o diff --git a/drivers/perf/phytium/phytium_ddr_pmu.c b/drivers/perf/phytium/phytium_ddr_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..69b2a1d6a4a4e8165faf2ea040089b518c253445 --- /dev/null +++ b/drivers/perf/phytium/phytium_ddr_pmu.c @@ -0,0 +1,754 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium SoC DDR performance monitoring unit support + * + * Copyright (c) 2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "phytium_ddr_pmu: " fmt + +#define PHYTIUM_DDR_MAX_COUNTERS 8 +#define DDR_PERF_DRIVER_VERSION "1.2.1" + +#define DDR_START_TIMER 0x000 +#define DDR_STOP_TIMER 0x004 +#define DDR_CLEAR_EVENT 0x008 +#define DDR_SET_TIMER_L 0x00c +#define DDR_SET_TIMER_H 0x010 +#define DDR_TRIG_MODE 0x014 +#define DDR_NOW_STATE 0x0e0 +#define DDR_EVENT_CYCLES 0x0e4 +#define DDR_TPOINT_END_L 0x0e4 +#define DDR_TPOINT_END_H 0x0e8 +#define DDR_STATE_STOP 0x0ec +#define DDR_EVENT_RXREQ 0x100 +#define DDR_EVENT_RXDAT 0x104 +#define DDR_EVENT_TXDAT 0x108 +#define DDR_EVENT_RXREQ_RNS 0x10c +#define DDR_EVENT_RXREQ_WNSP 0x110 +#define DDR_EVENT_RXREQ_WNSF 0x114 +#define DDR_EVENT_BANDWIDTH 0x200 +#define DDR_W_DATA_BASE 0x200 +#define DDR_CLK_FRE 0xe00 +#define DDR_DATA_WIDTH 0xe04 + +#define to_phytium_ddr_pmu(p) (container_of(p, struct phytium_ddr_pmu, pmu)) + +static int phytium_ddr_pmu_hp_state; + +struct phytium_ddr_pmu_hwevents { + struct perf_event *hw_events[PHYTIUM_DDR_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, PHYTIUM_DDR_MAX_COUNTERS); +}; + +struct phytium_ddr_pmu { + struct device *dev; + void __iomem *base; + void __iomem *csr_base; + struct pmu pmu; + struct phytium_ddr_pmu_hwevents pmu_events; + u32 die_id; + u32 ddr_id; + u32 pmu_id; + int bit_idx; + int on_cpu; + int irq; + struct hlist_node node; +}; + +#define GET_DDR_EVENTID(hwc) (hwc->config_base & 0x7) +#define EVENT_VALID(idx) ((idx >= 0) && (idx < PHYTIUM_DDR_MAX_COUNTERS)) + +static const u32 ddr_counter_reg_offset[] = { + DDR_EVENT_CYCLES, DDR_EVENT_RXREQ, DDR_EVENT_RXDAT, + DDR_EVENT_TXDAT, DDR_EVENT_RXREQ_RNS, DDR_EVENT_RXREQ_WNSP, + DDR_EVENT_RXREQ_WNSF, DDR_EVENT_BANDWIDTH +}; + +ssize_t phytium_ddr_pmu_format_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +ssize_t phytium_ddr_pmu_event_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_ddr_pmu *ddr_pmu = + to_phytium_ddr_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(ddr_pmu->on_cpu)); +} + +#define PHYTIUM_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]){ \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } })[0] \ + .attr.attr) + +#define PHYTIUM_DDR_PMU_FORMAT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_ddr_pmu_format_sysfs_show, \ + (void *)_config) + +#define PHYTIUM_DDR_PMU_EVENT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_ddr_pmu_event_sysfs_show, \ + (unsigned long)_config) + +static struct attribute *phytium_ddr_pmu_format_attr[] = { + PHYTIUM_DDR_PMU_FORMAT_ATTR(event, "config:0-2"), + PHYTIUM_DDR_PMU_FORMAT_ATTR(timer, "config1:0-31"), + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_format_group = { + .name = "format", + .attrs = phytium_ddr_pmu_format_attr, +}; + +static struct attribute *phytium_ddr_pmu_events_attr[] = { + PHYTIUM_DDR_PMU_EVENT_ATTR(cycles, 0x00), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq, 0x01), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxdat, 0x02), + PHYTIUM_DDR_PMU_EVENT_ATTR(txdat, 0x03), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_RNS, 0x04), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_WNSP, 0x05), + PHYTIUM_DDR_PMU_EVENT_ATTR(rxreq_WNSF, 0x06), + PHYTIUM_DDR_PMU_EVENT_ATTR(bandwidth, 0x07), + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_events_group = { + .name = "events", + .attrs = phytium_ddr_pmu_events_attr, +}; + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *phytium_ddr_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group phytium_ddr_pmu_cpumask_attr_group = { + .attrs = phytium_ddr_pmu_cpumask_attrs, +}; + +static const struct attribute_group *phytium_ddr_pmu_attr_groups[] = { + &phytium_ddr_pmu_format_group, + &phytium_ddr_pmu_events_group, + &phytium_ddr_pmu_cpumask_attr_group, + NULL, +}; + +static u32 phytium_ddr_pmu_get_event_timer(struct perf_event *event) +{ + return FIELD_GET(GENMASK(31, 0), event->attr.config1); +} + +static u64 phytium_ddr_pmu_read_counter(struct phytium_ddr_pmu *ddr_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = GET_DDR_EVENTID(hwc); + u32 cycle_l, cycle_h, w_data, ddr_data_width; + u64 val64 = 0; + int i; + u32 counter_offset = ddr_counter_reg_offset[idx]; + + if (!EVENT_VALID(idx)) { + dev_err(ddr_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + switch (idx) { + case 0: + cycle_l = readl(ddr_pmu->base + counter_offset); + cycle_h = readl(ddr_pmu->base + counter_offset + 4); + val64 = (u64)cycle_h << 32 | (u64)cycle_l; + break; + case 7: + ddr_data_width = readl(ddr_pmu->base + DDR_DATA_WIDTH); + for (i = 0; i < (ddr_data_width / 8); i++) { + w_data = readl(ddr_pmu->base + counter_offset + 4 * i); + val64 += w_data; + } + break; + default: + val64 = readl(ddr_pmu->base + counter_offset); + break; + } + + return val64; +} + +static void phytium_ddr_pmu_enable_clk(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->csr_base); + val |= 0xF; + writel(val, ddr_pmu->csr_base); +} + +static void phytium_ddr_pmu_disable_clk(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->csr_base); + val &= ~(0xF); + writel(val, ddr_pmu->csr_base); +} + +static void phytium_ddr_pmu_clear_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_CLEAR_EVENT); +} + +static void phytium_ddr_pmu_start_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_START_TIMER); +} + +static void phytium_ddr_pmu_stop_all_counters(struct phytium_ddr_pmu *ddr_pmu) +{ + writel(0x1, ddr_pmu->base + DDR_STOP_TIMER); +} + +static void phytium_ddr_pmu_set_timer(struct phytium_ddr_pmu *ddr_pmu, + u32 th_val) +{ + u32 val; + + val = readl(ddr_pmu->base + DDR_SET_TIMER_L); + val = readl(ddr_pmu->base + DDR_SET_TIMER_H); + + writel(th_val, ddr_pmu->base + DDR_SET_TIMER_L); + writel(0, ddr_pmu->base + DDR_SET_TIMER_H); +} + +static void phytium_ddr_pmu_reset_timer(struct phytium_ddr_pmu *ddr_pmu) +{ + u32 val; + + val = readl(ddr_pmu->base + DDR_SET_TIMER_L); + val = readl(ddr_pmu->base + DDR_SET_TIMER_H); + + writel(0xFFFFFFFF, ddr_pmu->base + DDR_SET_TIMER_L); + writel(0xFFFFFFFF, ddr_pmu->base + DDR_SET_TIMER_H); +} + +static unsigned long +phytium_ddr_pmu_get_stop_state(struct phytium_ddr_pmu *ddr_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(ddr_pmu->base + DDR_STATE_STOP); + return val; +} + +static unsigned long +phytium_ddr_pmu_get_irq_flag(struct phytium_ddr_pmu *ddr_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(ddr_pmu->csr_base + 4); + return val; +} + +static int phytium_ddr_pmu_mark_event(struct perf_event *event) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + unsigned long *used_mask = ddr_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + + int idx = GET_DDR_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void phytium_ddr_pmu_unmark_event(struct phytium_ddr_pmu *ddr_pmu, + int idx) +{ + if (!EVENT_VALID(idx)) { + dev_err(ddr_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, ddr_pmu->pmu_events.used_mask); +} + +int phytium_ddr_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct phytium_ddr_pmu *ddr_pmu; + u32 event_timer; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + ddr_pmu = to_phytium_ddr_pmu(event->pmu); + + if (event->cpu < 0) { + dev_warn(ddr_pmu->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + if (event->attr.config > PHYTIUM_DDR_MAX_COUNTERS) + return -EINVAL; + + if (ddr_pmu->on_cpu == -1) + return -EINVAL; + + event_timer = phytium_ddr_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_ddr_pmu_set_timer(ddr_pmu, event_timer); + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + event->cpu = ddr_pmu->on_cpu; + + return 0; +} + +void phytium_ddr_pmu_event_update(struct perf_event *event) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta; + + delta = phytium_ddr_pmu_read_counter(ddr_pmu, hwc); + local64_add(delta, &event->count); +} + +void phytium_ddr_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state = 0; + perf_event_update_userpage(event); +} + +void phytium_ddr_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_UPDATE) + phytium_ddr_pmu_event_update(event); +} + +int phytium_ddr_pmu_event_add(struct perf_event *event, int flags) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state |= PERF_HES_STOPPED; + + idx = phytium_ddr_pmu_mark_event(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + ddr_pmu->pmu_events.hw_events[idx] = event; + + return 0; +} + +void phytium_ddr_pmu_event_del(struct perf_event *event, int flags) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long val; + u32 event_timer; + + phytium_ddr_pmu_event_stop(event, PERF_EF_UPDATE); + val = phytium_ddr_pmu_get_irq_flag(ddr_pmu); + val = phytium_ddr_pmu_get_stop_state(ddr_pmu); + phytium_ddr_pmu_unmark_event(ddr_pmu, hwc->idx); + + event_timer = phytium_ddr_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_ddr_pmu_reset_timer(ddr_pmu); + + perf_event_update_userpage(event); + ddr_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} + +void phytium_ddr_pmu_enable(struct pmu *pmu) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(pmu); + int event_added = bitmap_weight(ddr_pmu->pmu_events.used_mask, + PHYTIUM_DDR_MAX_COUNTERS); + + if (event_added) { + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + phytium_ddr_pmu_start_all_counters(ddr_pmu); + } +} + +void phytium_ddr_pmu_disable(struct pmu *pmu) +{ + struct phytium_ddr_pmu *ddr_pmu = to_phytium_ddr_pmu(pmu); + int event_added = bitmap_weight(ddr_pmu->pmu_events.used_mask, + PHYTIUM_DDR_MAX_COUNTERS); + + if (event_added) + phytium_ddr_pmu_stop_all_counters(ddr_pmu); +} + +static const struct acpi_device_id phytium_ddr_pmu_acpi_match[] = { + { + "PHYT0043", + }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, phytium_ddr_pmu_acpi_match); + +static irqreturn_t phytium_ddr_pmu_overflow_handler(int irq, void *dev_id) +{ + struct phytium_ddr_pmu *ddr_pmu = dev_id; + struct perf_event *event; + unsigned long overflown, stop_state; + int idx; + unsigned long *used_mask = ddr_pmu->pmu_events.used_mask; + + int event_added = bitmap_weight(used_mask, PHYTIUM_DDR_MAX_COUNTERS); + + overflown = phytium_ddr_pmu_get_irq_flag(ddr_pmu); + + if (!test_bit(ddr_pmu->bit_idx, &overflown)) + return IRQ_NONE; + + stop_state = phytium_ddr_pmu_get_stop_state(ddr_pmu); + + if (bitmap_weight(&stop_state, 6)) { + for_each_set_bit(idx, used_mask, PHYTIUM_DDR_MAX_COUNTERS) { + event = ddr_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + phytium_ddr_pmu_event_update(event); + } + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + phytium_ddr_pmu_start_all_counters(ddr_pmu); + + return IRQ_HANDLED; + } + + if (!event_added) { + phytium_ddr_pmu_clear_all_counters(ddr_pmu); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int phytium_ddr_pmu_init_irq(struct phytium_ddr_pmu *ddr_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, + phytium_ddr_pmu_overflow_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), ddr_pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Fail to request IRQ:%d ret:%d\n", irq, + ret); + return ret; + } + + ddr_pmu->irq = irq; + + return 0; +} + +static int phytium_ddr_pmu_init_data(struct platform_device *pdev, + struct phytium_ddr_pmu *ddr_pmu) +{ + struct resource *res, *clkres; + + if (device_property_read_u32(&pdev->dev, "phytium,die-id", + &ddr_pmu->die_id)) { + dev_err(&pdev->dev, "Can not read phytium,die-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,ddr-id", + &ddr_pmu->ddr_id)) { + dev_err(&pdev->dev, "Can not read phytium,ddr-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,pmu-id", + &ddr_pmu->pmu_id)) { + dev_err(&pdev->dev, "Can not read ddr pmu-id!\n"); + return -EINVAL; + } + + ddr_pmu->bit_idx = ddr_pmu->ddr_id * 2 + ddr_pmu->pmu_id; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddr_pmu->base = devm_ioremap_resource(&pdev->dev, res); + + if (IS_ERR(ddr_pmu->base)) { + dev_err(&pdev->dev, + "ioremap failed for ddr_pmu base resource\n"); + return PTR_ERR(ddr_pmu->base); + } + + clkres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!clkres) { + dev_err(&pdev->dev, "failed for get ddr_pmu clk resource.\n"); + return -EINVAL; + } + + ddr_pmu->csr_base = + devm_ioremap(&pdev->dev, clkres->start, resource_size(clkres)); + if (IS_ERR(ddr_pmu->csr_base)) { + dev_err(&pdev->dev, + "ioremap failed for ddr_pmu csr resource\n"); + return PTR_ERR(ddr_pmu->csr_base); + } + + return 0; +} + +static int phytium_ddr_pmu_dev_probe(struct platform_device *pdev, + struct phytium_ddr_pmu *ddr_pmu) +{ + int ret; + + ret = phytium_ddr_pmu_init_data(pdev, ddr_pmu); + if (ret) + return ret; + + ret = phytium_ddr_pmu_init_irq(ddr_pmu, pdev); + if (ret) + return ret; + + ddr_pmu->dev = &pdev->dev; + ddr_pmu->on_cpu = -1; + + return 0; +} + +static int phytium_ddr_pmu_probe(struct platform_device *pdev) +{ + struct phytium_ddr_pmu *ddr_pmu; + char *name; + int ret; + + ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL); + if (!ddr_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, ddr_pmu); + + ret = phytium_ddr_pmu_dev_probe(pdev, ddr_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance( + phytium_ddr_pmu_hp_state, &ddr_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "phyt%u_ddr%u_pmu%u", + ddr_pmu->die_id, ddr_pmu->ddr_id, + ddr_pmu->pmu_id); + ddr_pmu->pmu = (struct pmu){ + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = phytium_ddr_pmu_event_init, + .pmu_enable = phytium_ddr_pmu_enable, + .pmu_disable = phytium_ddr_pmu_disable, + .add = phytium_ddr_pmu_event_add, + .del = phytium_ddr_pmu_event_del, + .start = phytium_ddr_pmu_event_start, + .stop = phytium_ddr_pmu_event_stop, + .read = phytium_ddr_pmu_event_update, + .attr_groups = phytium_ddr_pmu_attr_groups, + }; + + ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); + if (ret) { + dev_err(ddr_pmu->dev, "DDR PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + phytium_ddr_pmu_hp_state, + &ddr_pmu->node); + } + + phytium_ddr_pmu_enable_clk(ddr_pmu); + + pr_info("Phytium DDR PMU: "); + pr_info(" die_id = %d ddr_id = %d pmu_id = %d.\n", ddr_pmu->die_id, + ddr_pmu->ddr_id, ddr_pmu->pmu_id); + + return ret; +} + +static int phytium_ddr_pmu_remove(struct platform_device *pdev) +{ + struct phytium_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); + + phytium_ddr_pmu_disable_clk(ddr_pmu); + + perf_pmu_unregister(&ddr_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + phytium_ddr_pmu_hp_state, &ddr_pmu->node); + + return 0; +} + +static struct platform_driver phytium_ddr_pmu_driver = { + .driver = { + .name = "phytium_ddr_pmu", + .acpi_match_table = ACPI_PTR(phytium_ddr_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = phytium_ddr_pmu_probe, + .remove = phytium_ddr_pmu_remove, +}; + +int phytium_ddr_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_ddr_pmu *ddr_pmu = + hlist_entry_safe(node, struct phytium_ddr_pmu, node); + + if (!cpumask_test_cpu(cpu, cpumask_of_node(ddr_pmu->die_id))) + return 0; + + if (ddr_pmu->on_cpu != -1) { + if (!cpumask_test_cpu(ddr_pmu->on_cpu, cpumask_of_node(ddr_pmu->die_id))) { + perf_pmu_migrate_context(&ddr_pmu->pmu, ddr_pmu->on_cpu, cpu); + ddr_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(ddr_pmu->irq, cpumask_of(cpu))); + } + return 0; + } + + ddr_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(ddr_pmu->irq, cpumask_of(cpu))); + + return 0; +} + +int phytium_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_ddr_pmu *ddr_pmu = + hlist_entry_safe(node, struct phytium_ddr_pmu, node); + unsigned int target; + cpumask_t available_cpus; + + if (ddr_pmu->on_cpu != cpu) + return 0; + + if (cpumask_and(&available_cpus, cpumask_of_node(ddr_pmu->die_id), cpu_online_mask) && + cpumask_andnot(&available_cpus, &available_cpus, cpumask_of(cpu))) + target = cpumask_last(&available_cpus); + else { + cpumask_andnot(&available_cpus, cpu_online_mask, cpumask_of(cpu)); + target = cpumask_last(&available_cpus); + } + + if (target >= nr_cpu_ids) { + dev_err(ddr_pmu->dev, "offline cpu%d with no target to migrate.\n", + cpu); + return 0; + } + + perf_pmu_migrate_context(&ddr_pmu->pmu, cpu, target); + WARN_ON(irq_set_affinity_hint(ddr_pmu->irq, cpumask_of(target))); + ddr_pmu->on_cpu = target; + + return 0; +} + +static int __init phytium_ddr_pmu_module_init(void) +{ + int ret; + + phytium_ddr_pmu_hp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/phytium/ddrpmu:online", + phytium_ddr_pmu_online_cpu, phytium_ddr_pmu_offline_cpu); + if (phytium_ddr_pmu_hp_state < 0) { + pr_err("DDR PMU: setup hotplug, phytium_ddr_pmu_hp_state = %d\n", + phytium_ddr_pmu_hp_state); + return phytium_ddr_pmu_hp_state; + } + + ret = platform_driver_register(&phytium_ddr_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + phytium_ddr_pmu_hp_state); + + return ret; +} +module_init(phytium_ddr_pmu_module_init); + +static void __exit phytium_ddr_pmu_module_exit(void) +{ + platform_driver_unregister(&phytium_ddr_pmu_driver); + cpuhp_remove_multi_state(phytium_ddr_pmu_hp_state); +} +module_exit(phytium_ddr_pmu_module_exit); + +MODULE_DESCRIPTION("Phytium DDR PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DDR_PERF_DRIVER_VERSION); +MODULE_AUTHOR("Hu Xianghua "); diff --git a/drivers/perf/phytium/phytium_pcie_pmu.c b/drivers/perf/phytium/phytium_pcie_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..9451e6d6c45d7a0d83f3b89f1e5174de4e8e8d66 --- /dev/null +++ b/drivers/perf/phytium/phytium_pcie_pmu.c @@ -0,0 +1,901 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Soc PCIe performance monitoring unit support + * + * Copyright (c) 2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "phytium_pcie_pmu: " fmt + +#define PCIE_PERF_DRIVER_VERSION "1.2.1" + +#define PHYTIUM_PCIE_MAX_COUNTERS 18 + +#define PCIE_START_TIMER 0x000 +#define PCIE_STOP_TIMER 0x004 +#define PCIE_CLEAR_EVENT 0x008 +#define PCIE_SET_TIMER_L 0x00c +#define PCIE_SET_TIMER_H 0x010 +#define PCIE_TRIG_MODE 0x014 + +#define PCIE_NOW_STATE 0x0e0 +#define PCIE_EVENT_CYCLES 0x0e4 +#define PCIE_TPOINT_END_L 0x0e4 +#define PCIE_TPOINT_END_H 0x0e8 +#define PCIE_STATE_STOP 0x0ec + +#define PCIE_EVENT_AW 0x100 +#define PCIE_EVENT_W_LAST 0x104 +#define PCIE_EVENT_B 0x108 +#define PCIE_EVENT_AR 0x10c +#define PCIE_EVENT_R_LAST 0x110 +#define PCIE_EVENT_R_FULL 0x114 +#define PCIE_EVENT_R_ERR 0x118 +#define PCIE_EVENT_W_ERR 0x11c +#define PCIE_EVENT_DELAY_RD 0x120 +#define PCIE_EVENT_DELAY_WR 0x124 +#define PCIE_EVENT_RD_MAX 0x128 +#define PCIE_EVENT_RD_MIN 0x12c +#define PCIE_EVENT_WR_MAX 0x130 +#define PCIE_EVENT_WR_MIN 0x134 + +#define PCIE_EVENT_W_DATA 0x200 +#define PCIE_W_DATA_BASE 0x200 + +#define PCIE_EVENT_RDELAY_TIME 0x300 +#define PCIE_RDELAY_TIME_BASE 0x300 + +#define PCIE_EVENT_WDELAY_TIME 0x700 +#define PCIE_WDELAY_TIME_BASE 0x700 + +#define PCIE_CLK_FRE 0xe00 +#define PCIE_DATA_WIDTH 0xe04 + +#define to_phytium_pcie_pmu(p) (container_of(p, struct phytium_pcie_pmu, pmu)) + +static int phytium_pcie_pmu_hp_state; + +struct phytium_pcie_pmu_hwevents { + struct perf_event *hw_events[PHYTIUM_PCIE_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, PHYTIUM_PCIE_MAX_COUNTERS); +}; + +struct phytium_pcie_pmu { + struct device *dev; + void __iomem *base; + void __iomem *csr_base; + void __iomem *irq_reg; + struct pmu pmu; + struct phytium_pcie_pmu_hwevents pmu_events; + u32 die_id; + u32 pmu_id; + int on_cpu; + int irq; + struct hlist_node node; + int ctrler_id; + int real_ctrler; + u32 clk_bits; +}; + +#define GET_PCIE_EVENTID(hwc) (hwc->config_base & 0x1F) + +#define EVENT_VALID(idx) ((idx >= 0) && (idx < PHYTIUM_PCIE_MAX_COUNTERS)) + +static const u32 pcie_counter_reg_offset[] = { + PCIE_EVENT_CYCLES, PCIE_EVENT_AW, PCIE_EVENT_W_LAST, + PCIE_EVENT_B, PCIE_EVENT_AR, PCIE_EVENT_R_LAST, + PCIE_EVENT_R_FULL, PCIE_EVENT_R_ERR, PCIE_EVENT_W_ERR, + PCIE_EVENT_DELAY_RD, PCIE_EVENT_DELAY_WR, PCIE_EVENT_RD_MAX, + PCIE_EVENT_RD_MIN, PCIE_EVENT_WR_MAX, PCIE_EVENT_WR_MIN, + PCIE_EVENT_W_DATA, PCIE_EVENT_RDELAY_TIME, PCIE_EVENT_WDELAY_TIME +}; + +ssize_t phytium_pcie_pmu_format_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} + +ssize_t phytium_pcie_pmu_event_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phytium_pcie_pmu *pcie_pmu = + to_phytium_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} + +#define PHYTIUM_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]){ \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } })[0] \ + .attr.attr) + +#define PHYTIUM_PCIE_PMU_FORMAT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_pcie_pmu_format_sysfs_show, \ + (void *)_config) + +#define PHYTIUM_PCIE_PMU_EVENT_ATTR(_name, _config) \ + PHYTIUM_PMU_ATTR(_name, phytium_pcie_pmu_event_sysfs_show, \ + (unsigned long)_config) + +static struct attribute *phytium_pcie_pmu_format_attr[] = { + PHYTIUM_PCIE_PMU_FORMAT_ATTR(event, "config:0-4"), + PHYTIUM_PCIE_PMU_FORMAT_ATTR(ctrler, "config:8-10"), + PHYTIUM_PCIE_PMU_FORMAT_ATTR(timer, "config1:0-31"), + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_format_group = { + .name = "format", + .attrs = phytium_pcie_pmu_format_attr, +}; + +static struct attribute *phytium_pcie_pmu_events_attr[] = { + PHYTIUM_PCIE_PMU_EVENT_ATTR(cycles, 0x00), + PHYTIUM_PCIE_PMU_EVENT_ATTR(aw, 0x01), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_last, 0x02), + PHYTIUM_PCIE_PMU_EVENT_ATTR(b, 0x03), + PHYTIUM_PCIE_PMU_EVENT_ATTR(ar, 0x04), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_last, 0x05), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_full, 0x06), + PHYTIUM_PCIE_PMU_EVENT_ATTR(r_err, 0x07), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_err, 0x08), + PHYTIUM_PCIE_PMU_EVENT_ATTR(delay_rd, 0x09), + PHYTIUM_PCIE_PMU_EVENT_ATTR(delay_wr, 0x0a), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rd_max, 0x0b), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rd_min, 0x0c), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wr_max, 0x0d), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wr_min, 0x0e), + PHYTIUM_PCIE_PMU_EVENT_ATTR(w_data, 0x0f), + PHYTIUM_PCIE_PMU_EVENT_ATTR(rdelay_time, 0x10), + PHYTIUM_PCIE_PMU_EVENT_ATTR(wdelay_time, 0x11), + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_events_group = { + .name = "events", + .attrs = phytium_pcie_pmu_events_attr, +}; + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *phytium_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group phytium_pcie_pmu_cpumask_attr_group = { + .attrs = phytium_pcie_pmu_cpumask_attrs, +}; + +static const struct attribute_group *phytium_pcie_pmu_attr_groups[] = { + &phytium_pcie_pmu_format_group, + &phytium_pcie_pmu_events_group, + &phytium_pcie_pmu_cpumask_attr_group, + NULL, +}; + +static u32 phytium_pcie_pmu_get_event_ctrler(struct perf_event *event) +{ + return FIELD_GET(GENMASK(10, 8), event->attr.config); +} + +static u32 phytium_pcie_pmu_get_event_timer(struct perf_event *event) +{ + return FIELD_GET(GENMASK(31, 0), event->attr.config1); +} + +static u64 phytium_pcie_pmu_read_counter(struct phytium_pcie_pmu *pcie_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = GET_PCIE_EVENTID(hwc); + u32 cycle_l, cycle_h, rdelay_l, rdelay_h, w_data, wdelay_l, wdelay_h, + pcie_data_width; + u64 val64 = 0; + int i; + u32 counter_offset = pcie_counter_reg_offset[idx]; + + if (!EVENT_VALID(idx)) { + dev_err(pcie_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + switch (idx) { + case 0: + cycle_l = readl(pcie_pmu->base + counter_offset); + cycle_h = readl(pcie_pmu->base + counter_offset + 4); + val64 = (u64)cycle_h << 32 | (u64)cycle_l; + break; + case 15: + pcie_data_width = readl(pcie_pmu->base + PCIE_DATA_WIDTH); + for (i = 0; i < (pcie_data_width / 8); i++) { + w_data = readl(pcie_pmu->base + counter_offset + 4 * i); + val64 += w_data; + } + break; + case 16: + for (i = 0; i <= 127; i = i + 2) { + rdelay_l = + readl(pcie_pmu->base + counter_offset + 4 * i); + rdelay_h = readl(pcie_pmu->base + counter_offset + + 4 * i + 4); + val64 += (u64)rdelay_h << 32 | (u64)rdelay_l; + } + break; + case 17: + for (i = 0; i <= 63; i++) { + wdelay_l = + readl(pcie_pmu->base + counter_offset + 4 * i); + wdelay_h = readl(pcie_pmu->base + counter_offset + + 4 * i + 4); + val64 += (u64)wdelay_h << 32 | (u64)wdelay_l; + } + break; + default: + val64 = readl(pcie_pmu->base + counter_offset); + break; + } + return val64; +} + +static void phytium_pcie_pmu_enable_clk(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->csr_base); + val |= (pcie_pmu->clk_bits); + writel(val, pcie_pmu->csr_base); +} + +static void phytium_pcie_pmu_disable_clk(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->csr_base); + val &= ~(pcie_pmu->clk_bits); + writel(val, pcie_pmu->csr_base); +} + +static void phytium_pcie_pmu_select_ctrler(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val, offset = 0; + + if (pcie_pmu->pmu_id != 2) + offset = 0xc; + + val = readl(pcie_pmu->csr_base + offset); + + if (pcie_pmu->pmu_id == 2) { + val &= 0xffffffcf; + val |= pcie_pmu->real_ctrler; + } else { + val &= 0xfffffffc; + val |= pcie_pmu->real_ctrler; + } + + writel(val, pcie_pmu->csr_base + offset); +} + +static void +phytium_pcie_pmu_clear_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_CLEAR_EVENT); +} + +static void +phytium_pcie_pmu_start_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_START_TIMER); +} + +static void +phytium_pcie_pmu_stop_all_counters(struct phytium_pcie_pmu *pcie_pmu) +{ + writel(0x1, pcie_pmu->base + PCIE_STOP_TIMER); +} + +static void phytium_pcie_pmu_set_timer(struct phytium_pcie_pmu *pcie_pmu, + u32 th_val) +{ + u32 val; + + val = readl(pcie_pmu->base + PCIE_SET_TIMER_L); + val = readl(pcie_pmu->base + PCIE_SET_TIMER_H); + + writel(th_val, pcie_pmu->base + PCIE_SET_TIMER_L); + writel(0, pcie_pmu->base + PCIE_SET_TIMER_H); +} + +static void phytium_pcie_pmu_reset_timer(struct phytium_pcie_pmu *pcie_pmu) +{ + u32 val; + + val = readl(pcie_pmu->base + PCIE_SET_TIMER_L); + val = readl(pcie_pmu->base + PCIE_SET_TIMER_H); + + writel(0xFFFFFFFF, pcie_pmu->base + PCIE_SET_TIMER_L); + writel(0xFFFFFFFF, pcie_pmu->base + PCIE_SET_TIMER_H); +} + +static unsigned long +phytium_pcie_pmu_get_stop_state(struct phytium_pcie_pmu *pcie_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(pcie_pmu->base + PCIE_STATE_STOP); + return val; +} + +static unsigned long +phytium_pcie_pmu_get_irq_flag(struct phytium_pcie_pmu *pcie_pmu) +{ + unsigned long val; + + val = (unsigned long)readl(pcie_pmu->irq_reg); + return val; +} + +static int phytium_pcie_pmu_mark_event(struct perf_event *event) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + unsigned long *used_mask = pcie_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + + int idx = GET_PCIE_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void phytium_pcie_pmu_unmark_event(struct phytium_pcie_pmu *pcie_pmu, + int idx) +{ + if (!EVENT_VALID(idx)) { + dev_err(pcie_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, pcie_pmu->pmu_events.used_mask); +} + +int phytium_pcie_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct phytium_pcie_pmu *pcie_pmu; + u32 event_ctrler, event_timer; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + pcie_pmu = to_phytium_pcie_pmu(event->pmu); + + if (event->cpu < 0) { + dev_warn(pcie_pmu->dev, "Can't provide per-task data!\n"); + return -EINVAL; + } + + if ((event->attr.config & 0x1F) > PHYTIUM_PCIE_MAX_COUNTERS) + return -EINVAL; + + if (pcie_pmu->on_cpu == -1) + return -EINVAL; + + event_timer = phytium_pcie_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_pcie_pmu_set_timer(pcie_pmu, event_timer); + + event_ctrler = phytium_pcie_pmu_get_event_ctrler(event); + switch (pcie_pmu->pmu_id) { + case 0: + if (event_ctrler != 0) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu0!\n", + event_ctrler); + return -EINVAL; + } + break; + case 1: + if ((event_ctrler < 1) || (event_ctrler > 3)) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu1!\n", + event_ctrler); + return -EINVAL; + } + break; + case 2: + if ((event_ctrler < 4) || (event_ctrler > 7)) { + dev_warn(pcie_pmu->dev, + "Wrong ctrler id(%d) for pcie-pmu2!\n", + event_ctrler); + return -EINVAL; + } + break; + default: + dev_err(pcie_pmu->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + return -EINVAL; + } + + pcie_pmu->ctrler_id = event_ctrler; + switch (pcie_pmu->pmu_id) { + case 0: + case 1: + pcie_pmu->real_ctrler = pcie_pmu->ctrler_id; + break; + case 2: + pcie_pmu->real_ctrler = (pcie_pmu->ctrler_id - 4) * 16; + break; + default: + dev_err(pcie_pmu->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + return -EINVAL; + } + phytium_pcie_pmu_select_ctrler(pcie_pmu); + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + event->cpu = pcie_pmu->on_cpu; + return 0; +} + +void phytium_pcie_pmu_event_update(struct perf_event *event) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta; + + delta = phytium_pcie_pmu_read_counter(pcie_pmu, hwc); + local64_add(delta, &event->count); +} + +void phytium_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state = 0; + perf_event_update_userpage(event); +} + +void phytium_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_UPDATE) + phytium_pcie_pmu_event_update(event); +} + +int phytium_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state |= PERF_HES_STOPPED; + + idx = phytium_pcie_pmu_mark_event(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + pcie_pmu->pmu_events.hw_events[idx] = event; + + return 0; +} + +void phytium_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + unsigned long val; + u32 event_timer; + + phytium_pcie_pmu_event_stop(event, PERF_EF_UPDATE); + val = phytium_pcie_pmu_get_irq_flag(pcie_pmu); + val = phytium_pcie_pmu_get_stop_state(pcie_pmu); + phytium_pcie_pmu_unmark_event(pcie_pmu, hwc->idx); + + event_timer = phytium_pcie_pmu_get_event_timer(event); + if (event_timer != 0) + phytium_pcie_pmu_reset_timer(pcie_pmu); + + perf_event_update_userpage(event); + pcie_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} + +void phytium_pcie_pmu_enable(struct pmu *pmu) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(pmu); + int event_added = bitmap_weight(pcie_pmu->pmu_events.used_mask, + PHYTIUM_PCIE_MAX_COUNTERS); + + if (event_added) { + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + phytium_pcie_pmu_start_all_counters(pcie_pmu); + } +} + +void phytium_pcie_pmu_disable(struct pmu *pmu) +{ + struct phytium_pcie_pmu *pcie_pmu = to_phytium_pcie_pmu(pmu); + int event_added = bitmap_weight(pcie_pmu->pmu_events.used_mask, + PHYTIUM_PCIE_MAX_COUNTERS); + + if (event_added) + phytium_pcie_pmu_stop_all_counters(pcie_pmu); +} + +static const struct acpi_device_id phytium_pcie_pmu_acpi_match[] = { + { + "PHYT0044", + }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, phytium_pcie_pmu_acpi_match); + +static irqreturn_t phytium_pcie_pmu_overflow_handler(int irq, void *dev_id) +{ + struct phytium_pcie_pmu *pcie_pmu = dev_id; + struct perf_event *event; + unsigned long overflown, stop_state; + int idx; + unsigned long *used_mask = pcie_pmu->pmu_events.used_mask; + int event_added = bitmap_weight(used_mask, PHYTIUM_PCIE_MAX_COUNTERS); + + overflown = phytium_pcie_pmu_get_irq_flag(pcie_pmu); + + if (!test_bit(pcie_pmu->pmu_id + 4, &overflown)) + return IRQ_NONE; + + stop_state = phytium_pcie_pmu_get_stop_state(pcie_pmu); + + if (bitmap_weight(&stop_state, 6)) { + for_each_set_bit(idx, used_mask, PHYTIUM_PCIE_MAX_COUNTERS) { + event = pcie_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + phytium_pcie_pmu_event_update(event); + } + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + phytium_pcie_pmu_start_all_counters(pcie_pmu); + + return IRQ_HANDLED; + } + if (!event_added) { + phytium_pcie_pmu_clear_all_counters(pcie_pmu); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int phytium_pcie_pmu_init_irq(struct phytium_pcie_pmu *pcie_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, + phytium_pcie_pmu_overflow_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, + dev_name(&pdev->dev), pcie_pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Fail to request IRQ:%d ret:%d\n", irq, + ret); + return ret; + } + + pcie_pmu->irq = irq; + + return 0; +} + +static int phytium_pcie_pmu_init_data(struct platform_device *pdev, + struct phytium_pcie_pmu *pcie_pmu) +{ + struct resource *res, *clkres, *irqres; + + if (device_property_read_u32(&pdev->dev, "phytium,die-id", + &pcie_pmu->die_id)) { + dev_err(&pdev->dev, "Can not read phytium,die-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "phytium,pmu-id", + &pcie_pmu->pmu_id)) { + dev_err(&pdev->dev, "Can not read phytium,pmu-id!\n"); + return -EINVAL; + } + + switch (pcie_pmu->pmu_id) { + case 0: + pcie_pmu->clk_bits = 0x1; + break; + case 1: + pcie_pmu->clk_bits = 0xe; + break; + case 2: + pcie_pmu->clk_bits = 0xf; + break; + default: + dev_err(&pdev->dev, "Unsupported pmu id:%d!\n", + pcie_pmu->pmu_id); + break; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie_pmu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pcie_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for pcie_pmu resource\n"); + return PTR_ERR(pcie_pmu->base); + } + + clkres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!clkres) { + dev_err(&pdev->dev, "failed for get pcie_pmu clk resource.\n"); + return -EINVAL; + } + + pcie_pmu->csr_base = + devm_ioremap(&pdev->dev, clkres->start, resource_size(clkres)); + if (IS_ERR(pcie_pmu->csr_base)) { + dev_err(&pdev->dev, + "ioremap failed for pcie_pmu csr resource\n"); + return PTR_ERR(pcie_pmu->csr_base); + } + + irqres = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!irqres) { + dev_err(&pdev->dev, + "failed for get pcie_pmu irq reg resource.\n"); + return -EINVAL; + } + + pcie_pmu->irq_reg = + devm_ioremap(&pdev->dev, irqres->start, resource_size(irqres)); + if (IS_ERR(pcie_pmu->irq_reg)) { + dev_err(&pdev->dev, + "ioremap failed for pcie_pmu irq resource\n"); + return PTR_ERR(pcie_pmu->irq_reg); + } + + return 0; +} + +static int phytium_pcie_pmu_dev_probe(struct platform_device *pdev, + struct phytium_pcie_pmu *pcie_pmu) +{ + int ret; + + ret = phytium_pcie_pmu_init_data(pdev, pcie_pmu); + if (ret) + return ret; + + ret = phytium_pcie_pmu_init_irq(pcie_pmu, pdev); + if (ret) + return ret; + pcie_pmu->dev = &pdev->dev; + pcie_pmu->on_cpu = -1; + pcie_pmu->ctrler_id = -1; + + return 0; +} + +static int phytium_pcie_pmu_probe(struct platform_device *pdev) +{ + struct phytium_pcie_pmu *pcie_pmu; + char *name; + int ret; + + pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, pcie_pmu); + + ret = phytium_pcie_pmu_dev_probe(pdev, pcie_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance( + phytium_pcie_pmu_hp_state, &pcie_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "phyt%u_pcie_pmu%u", + pcie_pmu->die_id, pcie_pmu->pmu_id); + pcie_pmu->pmu = (struct pmu){ + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = phytium_pcie_pmu_event_init, + .pmu_enable = phytium_pcie_pmu_enable, + .pmu_disable = phytium_pcie_pmu_disable, + .add = phytium_pcie_pmu_event_add, + .del = phytium_pcie_pmu_event_del, + .start = phytium_pcie_pmu_event_start, + .stop = phytium_pcie_pmu_event_stop, + .read = phytium_pcie_pmu_event_update, + .attr_groups = phytium_pcie_pmu_attr_groups, + }; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + dev_err(pcie_pmu->dev, "PCIE PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + phytium_pcie_pmu_hp_state, + &pcie_pmu->node); + } + + phytium_pcie_pmu_enable_clk(pcie_pmu); + + pr_info("Phytium PCIe PMU: "); + pr_info("die_id = %d pmu_id = %d.\n", pcie_pmu->die_id, + pcie_pmu->pmu_id); + + return ret; +} + +static int phytium_pcie_pmu_remove(struct platform_device *pdev) +{ + struct phytium_pcie_pmu *pcie_pmu = platform_get_drvdata(pdev); + + phytium_pcie_pmu_disable_clk(pcie_pmu); + + perf_pmu_unregister(&pcie_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + phytium_pcie_pmu_hp_state, &pcie_pmu->node); + + return 0; +} + +static struct platform_driver phytium_pcie_pmu_driver = { + .driver = { + .name = "phytium_pcie_pmu", + .acpi_match_table = ACPI_PTR(phytium_pcie_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = phytium_pcie_pmu_probe, + .remove = phytium_pcie_pmu_remove, +}; + +int phytium_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_pcie_pmu *pcie_pmu = + hlist_entry_safe(node, struct phytium_pcie_pmu, node); + + if (!cpumask_test_cpu(cpu, cpumask_of_node(pcie_pmu->die_id))) + return 0; + + if (pcie_pmu->on_cpu != -1) { + if (!cpumask_test_cpu(pcie_pmu->on_cpu, cpumask_of_node(pcie_pmu->die_id))) { + perf_pmu_migrate_context(&pcie_pmu->pmu, pcie_pmu->on_cpu, cpu); + pcie_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(pcie_pmu->irq, cpumask_of(cpu))); + } + return 0; + } + + pcie_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity_hint(pcie_pmu->irq, cpumask_of(cpu))); + + return 0; +} + +int phytium_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct phytium_pcie_pmu *pcie_pmu = + hlist_entry_safe(node, struct phytium_pcie_pmu, node); + unsigned int target; + cpumask_t available_cpus; + + if (pcie_pmu->on_cpu != cpu) + return 0; + + if (cpumask_and(&available_cpus, cpumask_of_node(pcie_pmu->die_id), cpu_online_mask) && + cpumask_andnot(&available_cpus, &available_cpus, cpumask_of(cpu))) + target = cpumask_last(&available_cpus); + else { + cpumask_andnot(&available_cpus, cpu_online_mask, cpumask_of(cpu)); + target = cpumask_last(&available_cpus); + } + + if (target >= nr_cpu_ids) { + dev_err(pcie_pmu->dev, "offline cpu%d with no target to migrate.\n", + cpu); + return 0; + } + + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + WARN_ON(irq_set_affinity_hint(pcie_pmu->irq, cpumask_of(target))); + pcie_pmu->on_cpu = target; + + return 0; +} + +static int __init phytium_pcie_pmu_module_init(void) +{ + int ret; + + phytium_pcie_pmu_hp_state = + cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/phytium/pciepmu:online", + phytium_pcie_pmu_online_cpu, phytium_pcie_pmu_offline_cpu); + if (phytium_pcie_pmu_hp_state < 0) { + pr_err("PCIE PMU: setup hotplug, ret = %d\n", + phytium_pcie_pmu_hp_state); + return phytium_pcie_pmu_hp_state; + } + + ret = platform_driver_register(&phytium_pcie_pmu_driver); + if (ret) + cpuhp_remove_multi_state( + phytium_pcie_pmu_hp_state); + + return ret; +} +module_init(phytium_pcie_pmu_module_init); + +static void __exit phytium_pcie_pmu_module_exit(void) +{ + platform_driver_unregister(&phytium_pcie_pmu_driver); + cpuhp_remove_multi_state(phytium_pcie_pmu_hp_state); +} +module_exit(phytium_pcie_pmu_module_exit); + +MODULE_DESCRIPTION("Phytium PCIe PMU driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(PCIE_PERF_DRIVER_VERSION); +MODULE_AUTHOR("Hu Xianghua "); diff --git a/drivers/pwm/pwm-phytium.c b/drivers/pwm/pwm-phytium.c index 10b74e13ccb65dc10628f395c8f1c550a7039a9a..0f25489e9d01bb8f402cf27f9a86eed59d5934c0 100644 --- a/drivers/pwm/pwm-phytium.c +++ b/drivers/pwm/pwm-phytium.c @@ -47,6 +47,8 @@ #define PWM_N(x) ((0x400)*(x)) #define MAX_PARAMETER 2 +#define PWM_DRIVER_VERSION "1.1.1" + struct phytium_pwm_state { int rst; int cntmod; @@ -570,3 +572,4 @@ module_platform_driver(pwm_phytium_driver); MODULE_DESCRIPTION("Phytium SoC PWM driver"); MODULE_AUTHOR("Yang Liu "); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(PWM_DRIVER_VERSION); diff --git a/drivers/spi/spi-phytium-pci.c b/drivers/spi/spi-phytium-pci.c index f6bec235442bba3506acca6969fcf4c3dd6b68d3..50797a4310540274e68e9e79584daf0b24d999d8 100644 --- a/drivers/spi/spi-phytium-pci.c +++ b/drivers/spi/spi-phytium-pci.c @@ -26,6 +26,7 @@ #include "spi-phytium.h" #define DRIVER_NAME "phytium_spi_pci" +#define DRIVER_VERSION "1.0.0" static int phytium_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -107,6 +108,7 @@ static const struct pci_device_id phytium_device_pci_tbl[] = { { PCI_VDEVICE(PHYTIUM, 0xdc2c) }, {}, }; +MODULE_DEVICE_TABLE(pci, phytium_device_pci_tbl); static struct pci_driver phytium_spi_pci_driver = { .name = DRIVER_NAME, @@ -123,3 +125,4 @@ module_pci_driver(phytium_spi_pci_driver); MODULE_AUTHOR("Yiqun Zhang "); MODULE_DESCRIPTION("PCI Driver for Phytium SPI controller core"); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c index 6993741161a99eaa0361f301d8c9457b70dc9b39..2b2517c6f9007c60082559c943533f5c36904459 100644 --- a/drivers/spi/spi-phytium-plat.c +++ b/drivers/spi/spi-phytium-plat.c @@ -28,6 +28,7 @@ #include "spi-phytium.h" #define DRIVER_NAME "phytium_spi" +#define DRIVER_VERSION "1.0.0" struct phytium_spi_clk { struct phytium_spi fts; @@ -213,3 +214,4 @@ module_platform_driver(phytium_spi_driver); MODULE_AUTHOR("Yiqun Zhang "); MODULE_DESCRIPTION("Platform Driver for Phytium SPI controller core"); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index dc47c5493ae280e12662db9294873b089cdba88f..72f14cbf5577edab629ff40672bb15d3423e27df 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -523,7 +523,7 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1, #if defined(CONFIG_OPTEE_DEFAULT_METHOD_HVC) #define DEFAULT_CONDUIT_METHOD optee_smccc_hvc #elif defined(CONFIG_OPTEE_DEFAULT_METHOD_SMC) -#define DEFAULT_CONDUIT_METHOD optee_smccc_hvc +#define DEFAULT_CONDUIT_METHOD optee_smccc_smc #else #define DEFAULT_CONDUIT_METHOD ERR_PTR(-ENXIO) #endif diff --git a/drivers/usb/phytium/host.c b/drivers/usb/phytium/host.c index 73059131bf3f68d43b718f3354fb3c48098df5e0..831b239c95e60bbb42d9faf2703c80aafb7eaf03 100644 --- a/drivers/usb/phytium/host.c +++ b/drivers/usb/phytium/host.c @@ -23,6 +23,75 @@ #define HOST_EP_NUM 16 +static void dump_ep_remap_pool(struct HOST_CTRL *priv, bool dirIn) +{ + int index, dir = 0; + + if (!dirIn) + dir = 1; + + pr_info("%s dir endpoint remap table\n", dir ? "OUT" : "IN"); + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) + pr_info("ep_remap_pool[%d][%d]->%d\n", dir, index, + priv->ep_remap_pool[dir][index]); +} + +static int get_epnum_from_pool(struct HOST_CTRL *priv, int real_epNum, bool dirIn) +{ + int index, dir = 0; + int ret = 0; + + if (!priv || real_epNum == 0) + return 0; + + if (!dirIn) + dir = 1; + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (priv->ep_remap_pool[dir][index] == real_epNum) { + ret = index; + goto out; + } + } + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (!priv->ep_remap_pool[dir][index]) { + priv->ep_remap_pool[dir][index] = real_epNum; + ret = index; + goto out; + } + } + + if (index > MAX_INSTANCE_EP_NUM) + return index; + +out: + return ret; +} + +static int release_epnum_from_pool(struct HOST_CTRL *priv, int real_epNum, bool dirIn) +{ + int index = 0; + int dir = 0; + + if (!priv || real_epNum == 0) + return 0; + + if (!dirIn) + dir = 1; + + for (index = 1; index <= MAX_INSTANCE_EP_NUM; index++) { + if (priv->ep_remap_pool[dir][index] == real_epNum) { + priv->ep_remap_pool[dir][index] = 0; + + return 0; + } + } + + return 0; +} + static inline struct HOST_REQ *getUsbRequestEntry(struct list_head *list) { return (struct HOST_REQ *)((uintptr_t)list - (uintptr_t)&(((struct HOST_REQ *)0)->list)); @@ -220,6 +289,8 @@ static inline void connectHostDetect(struct HOST_CTRL *priv, uint8_t otgState) if (!priv) return; + + memset(priv->ep_remap_pool, 0, sizeof(priv->ep_remap_pool)); pr_debug("otgState:0x%x pirv->otgState:0x%x\n", otgState, priv->otgState); if (priv->custom_regs) { phytium_write32(&priv->custom_regs->wakeup, 0); @@ -571,7 +642,7 @@ static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, phytium_write8(&priv->regs->ep[hwEp->hwEpNum - 1].txcon, regCon); if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { retval = priv->hostCallbacks.getEpToggle(priv, - usbReq->usbDev, usbEpPriv->epNum, 0); + usbReq->usbDev, usbHEp->device_epNum, 0); if (retval) { phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | ENDPRST_IO_TX); @@ -592,7 +663,7 @@ static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, usbEpPriv->maxPacketSize); phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].txctrl, - usbEpPriv->epNum); + usbHEp->device_epNum); phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); @@ -629,7 +700,7 @@ static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, if (usbEpPriv->type != USB_ENDPOINT_XFER_ISOC) { if (priv->hostCallbacks.getEpToggle) { retval = priv->hostCallbacks.getEpToggle(priv, - usbReq->usbDev, usbEpPriv->epNum, 1); + usbReq->usbDev, usbHEp->device_epNum, 1); if (retval) { phytium_write8(&priv->regs->endprst, hwEp->hwEpNum); phytium_write8(&priv->regs->endprst, hwEp->hwEpNum | @@ -646,7 +717,7 @@ static void hostEpProgram(struct HOST_CTRL *priv, struct HostEp *hwEp, usbEpPriv->maxPacketSize); phytium_write8(&priv->regs->epExt[hwEp->hwEpNum - 1].rxctrl, - usbEpPriv->epNum); + usbHEp->device_epNum); phytium_write8(&priv->regs->fnaddr, usbEpPriv->faddress); @@ -837,7 +908,7 @@ static void scheduleNextTransfer(struct HOST_CTRL *priv, endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; if (priv->hostCallbacks.setEpToggle) priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, - usbHEpPriv->epNum, usbHEpPriv->isIn, endprst); + usbEp->device_epNum, usbHEpPriv->isIn, endprst); } else { if (waitForBusyBit(priv, hwEp) > 0) { usbReq->status = HOST_ESHUTDOWN; @@ -849,7 +920,7 @@ static void scheduleNextTransfer(struct HOST_CTRL *priv, endprst = (phytium_read8(&priv->regs->endprst) & ENDPRST_TOGSETQ) ? 1 : 0; if (priv->hostCallbacks.setEpToggle) priv->hostCallbacks.setEpToggle(priv, usbReq->usbDev, - usbHEpPriv->epNum, usbHEpPriv->isIn, endprst); + usbEp->device_epNum, usbHEpPriv->isIn, endprst); } break; } @@ -1169,9 +1240,8 @@ static void host_endpoint_update(struct phytium_cusb *config, if (!config || !udev || !ep) return; - epnum = usb_endpoint_num(&ep->desc); - if (epnum > MAX_INSTANCE_EP_NUM) - epnum = MAX_INSTANCE_EP_NUM; + epnum = get_epnum_from_pool(config->host_priv, usb_endpoint_num(&ep->desc), + usb_endpoint_dir_in(&ep->desc)); if (usb_endpoint_dir_out(&ep->desc)) { if (udev->out_ep[epnum] == NULL) { @@ -1243,9 +1313,13 @@ static int hc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) req->isoFramesDesc = NULL; req->isoFramesNumber = urb->number_of_packets; - req->epNum = usb_endpoint_num(host_ep_desc); - if (req->epNum > MAX_INSTANCE_EP_NUM) + req->epNum = get_epnum_from_pool(config->host_priv, usb_endpoint_num(host_ep_desc), + usb_endpoint_dir_in(host_ep_desc)); + if (req->epNum > MAX_INSTANCE_EP_NUM) { + pr_err("Not enough endpoint resource for remap\n"); + dump_ep_remap_pool(priv, usb_endpoint_num(host_ep_desc)); req->epNum = MAX_INSTANCE_EP_NUM; + } if (usb_endpoint_dir_in(host_ep_desc)) { if (!usbDev->in_ep[req->epNum]) @@ -1280,6 +1354,7 @@ static int hc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) req->status = EINPROGRESS; req->usbDev = &usbDev->udev; req->usbEp = req->epIsIn ? usbDev->in_ep[req->epNum] : usbDev->out_ep[req->epNum]; + req->usbEp->device_epNum = usb_endpoint_num(host_ep_desc); if (!req->epNum) usbDev->ep0_hep.desc.wMaxPacketSize = urb->dev->ep0.desc.wMaxPacketSize; @@ -1321,6 +1396,10 @@ static int hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) goto err_giveback; ret = config->host_obj->host_reqDequeue(priv, urb->hcpriv, status); + + release_epnum_from_pool(config->host_priv, usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe)); + kfree(urb->hcpriv); urb->hcpriv = NULL; done: @@ -1338,10 +1417,18 @@ static int hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) static void hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ld_ep) { struct HOST_USB_DEVICE *usbDev; - int ep_num = usb_endpoint_num(&ld_ep->desc); + int ep_num; + struct phytium_cusb *config; + struct HOST_CTRL *priv; + + config = *(struct phytium_cusb **)hcd->hcd_priv; + if (!config) + return; - if (ep_num > MAX_INSTANCE_EP_NUM) - ep_num = MAX_INSTANCE_EP_NUM; + ep_num = get_epnum_from_pool(config->host_priv, usb_endpoint_num(&ld_ep->desc), + usb_endpoint_dir_in(&ld_ep->desc)); + + priv = config->host_priv; usbDev = (struct HOST_USB_DEVICE *)ld_ep->hcpriv; if (!usbDev) @@ -1349,18 +1436,20 @@ static void hc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *l if (ld_ep->desc.bEndpointAddress) { if (usb_endpoint_dir_in(&ld_ep->desc)) { - if (!usbDev->in_ep[ep_num]) { + if (usbDev->in_ep[ep_num]) { usbDev->in_ep[ep_num]->userExt = NULL; INIT_LIST_HEAD(&usbDev->in_ep[ep_num]->reqList); kfree(usbDev->in_ep[ep_num]); usbDev->in_ep[ep_num] = NULL; + priv->ep_remap_pool[0][ep_num] = 0; } } else { - if (!usbDev->out_ep[ep_num]) { + if (usbDev->out_ep[ep_num]) { usbDev->out_ep[ep_num]->userExt = NULL; INIT_LIST_HEAD(&usbDev->out_ep[ep_num]->reqList); kfree(usbDev->out_ep[ep_num]); usbDev->out_ep[ep_num] = NULL; + priv->ep_remap_pool[1][ep_num] = 0; } } } @@ -1699,6 +1788,7 @@ static uint32_t initEndpoints(struct HOST_CTRL *priv) priv->hwEpOutCount = 0; phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | 0); phytium_write8(&priv->regs->ep0fifoctrl, FIFOCTRL_FIFOAUTO | FIFOCTRL_IO_TX | 0); + memset(priv->ep_remap_pool, 0, sizeof(priv->ep_remap_pool)); for (epNum = 0; epNum < HOST_EP_NUM; epNum++) { priv->in[epNum].isInEp = 1; @@ -1958,6 +2048,12 @@ unsigned int get_endpoint_interval(struct usb_endpoint_descriptor desc, int spee if (usb_endpoint_xfer_control(&desc) || usb_endpoint_xfer_bulk(&desc)) { if (desc.bInterval == 0) return interval; + + if (usb_endpoint_xfer_bulk(&desc)) { + interval = 1; + return interval; + } + interval = fls(desc.bInterval) - 1; interval = clamp_val(interval, 0, 15); interval = 1 << interval; diff --git a/drivers/usb/phytium/host_api.h b/drivers/usb/phytium/host_api.h index 3d45258278c475fdd4ef15810499e41c73d27948..b99d2b4980fbe09ab31f8591b0cd528f6516d6f5 100644 --- a/drivers/usb/phytium/host_api.h +++ b/drivers/usb/phytium/host_api.h @@ -10,6 +10,7 @@ #define MAX_SUPPORTED_DEVICES 16 #define USB_PORT_STAT_RESUME (1 << 31) #define MAX_INSTANCE_EP_NUM 6 +#define ENDPOINT_DIR 2 enum HOST_OtgState { HOST_OTG_STATE_A_IDLE, @@ -63,6 +64,7 @@ struct HOST_EP { struct list_head reqList; void *userExt; uint8_t *hcPriv; + uint8_t device_epNum; }; struct HOST_USB_DEVICE { @@ -241,6 +243,7 @@ struct HOST_CTRL { struct HOST_USB_DEVICE *host_devices_table[MAX_SUPPORTED_DEVICES]; struct CUSTOM_REGS *custom_regs; struct VHUB_REGS *vhub_regs; + int ep_remap_pool[ENDPOINT_DIR][MAX_INSTANCE_EP_NUM + 1]; }; struct HOST_OBJ *HOST_GetInstance(void); diff --git a/drivers/usb/phytium/platform.c b/drivers/usb/phytium/platform.c index 7d39e4b6034d191d0b306b7b15ceadfb6887b79e..bcf5859a4d184f7e4bc28d7d8c4b8f8571f1ba8d 100644 --- a/drivers/usb/phytium/platform.c +++ b/drivers/usb/phytium/platform.c @@ -12,6 +12,7 @@ #include "core.h" #include "hw-regs.h" +#define PHYTIUM_OTG_V1_VERSION "1.0.1" #define PHYTIUM_OTG_USB_LOADED 3 #define USB2_2_BASE_ADDRESS 0x31800000 @@ -21,12 +22,14 @@ static const struct of_device_id phytium_otg_of_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, phytium_otg_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id phytium_otg_acpi_match[] = { { "PHYT0037", 0 }, { } }; +MODULE_DEVICE_TABLE(acpi, phytium_otg_acpi_match); #endif static int phytium_get_dr_mode(struct phytium_cusb *config) @@ -214,3 +217,4 @@ module_platform_driver(phytium_otg_driver); MODULE_AUTHOR("Chen Zhenhua "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Phytium usb platform wrapper"); +MODULE_VERSION(PHYTIUM_OTG_V1_VERSION); diff --git a/drivers/w1/masters/phytium_w1.c b/drivers/w1/masters/phytium_w1.c index 6a7a7fe312adcd8f9d599a0b1c952595c8496866..46eb1a15e528cde152b13b5a2a24a11f82e8a77c 100644 --- a/drivers/w1/masters/phytium_w1.c +++ b/drivers/w1/masters/phytium_w1.c @@ -53,6 +53,8 @@ #define PHY_W1M_MAX_USER 4 +#define W1_DRIVER_VERSION "1.1.1" + static DECLARE_WAIT_QUEUE_HEAD(w1m_wait_queue); struct w1m_data { @@ -577,3 +579,5 @@ module_platform_driver(phytium_w1m_driver); MODULE_AUTHOR("Zhu Mingshuai "); MODULE_DESCRIPTION("Phytium w1 bus master driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(W1_DRIVER_VERSION); + diff --git a/include/linux/irq.h b/include/linux/irq.h index 266d3c327ec9955a720c7c01c9a66beb43b072b6..34dd5e306ae22070749ac25d5567ca6dfc44d77b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -637,6 +637,7 @@ static inline int irq_set_parent(int irq, int parent_irq) */ extern void handle_level_irq(struct irq_desc *desc); extern void handle_fasteoi_irq(struct irq_desc *desc); +extern void handle_fasteoi_edge_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2019eca4cbdc16ceaf18b98f8a83e41493d6c66b..794047b879c22f3a17a57c41421d5a51873292a6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3119,5 +3119,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff #define PCI_VENDOR_ID_PHYTIUM 0x1db7 +#define PCI_DEVICE_ID_PHYTIUM_PE220X 0xdc3e #endif /* _LINUX_PCI_IDS_H */ diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index ea1ab93b55c30c1c521f52ddb74068ee89810723..93269d0b0857de3abcda7e2e7f67523fa9b537b2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -721,6 +721,81 @@ static inline void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *c } #endif /* !CONFIG_IPIPE */ +/** + * handle_fasteoi_edge_irq - irq handler for transparent controllers + * edge type IRQ. + * @desc: the interrupt description structure for this irq + */ +void handle_fasteoi_edge_irq(struct irq_desc *desc) +{ + struct irq_chip *chip = desc->irq_data.chip; + + raw_spin_lock(&desc->lock); + + if (!irq_may_run(desc)) { + desc->istate |= IRQS_PENDING; + mask_irq(desc); + goto out; + } + + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); + + /* + * If its disabled or no action available + * then mask it and get out of here: + */ + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; + mask_irq(desc); + goto out; + } + + kstat_incr_irqs_this_cpu(desc); +#ifndef CONFIG_IPIPE + if (desc->istate & IRQS_ONESHOT) + mask_irq(desc); +#endif + do { + if (unlikely(!desc->action)) { + mask_irq(desc); + goto out; + } + + /* + * When another irq arrived while we were handling + * one, we could have masked the irq. + * Reenable it, if it was not disabled in meantime. + */ + if (unlikely(desc->istate & IRQS_PENDING)) { + if (!irqd_irq_disabled(&desc->irq_data) && + irqd_irq_masked(&desc->irq_data)) + unmask_irq(desc); + } + + handle_irq_event(desc); + + } while ((desc->istate & IRQS_PENDING) && + !irqd_irq_disabled(&desc->irq_data)); + +#ifdef CONFIG_IPIPE + /* + * IRQCHIP_EOI_IF_HANDLED is ignored as the I-pipe always + * sends EOI. + */ + cond_release_fasteoi_irq(desc, chip); +#else /* !CONFIG_IPIPE */ + cond_unmask_eoi_irq(desc, chip); +#endif /* !CONFIG_IPIPE */ + + raw_spin_unlock(&desc->lock); + return; +out: + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) + chip->irq_eoi(&desc->irq_data); + raw_spin_unlock(&desc->lock); +} +EXPORT_SYMBOL_GPL(handle_fasteoi_edge_irq); + /** * handle_fasteoi_irq - irq handler for transparent controllers * @desc: the interrupt description structure for this irq diff --git a/kernel/power/swap.c b/kernel/power/swap.c index e9494c29f1ca49bd32aea248291278b2cdda5796..e5f71d53c27f4660224a4ab8de61f59ad5d5b233 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -653,6 +653,13 @@ static int lzo_compress_threadfn(void *data) d->ret = lzo1x_1_compress(d->unc, d->unc_len, d->cmp + LZO_HEADER, &d->cmp_len, d->wrk); +#ifdef CONFIG_ARM64 + /* + * Ensure that compressed data is indeed written to memory + * before atomic_set on weakly-ordered architectures. + */ + smp_wmb(); +#endif atomic_set(&d->stop, 1); wake_up(&d->done); } @@ -805,6 +812,10 @@ static int save_image_lzo(struct swap_map_handle *handle, for (run_threads = thr, thr = 0; thr < run_threads; thr++) { wait_event(data[thr].done, atomic_read(&data[thr].stop)); +#ifdef CONFIG_ARM64 + /* Force data access later than written */ + smp_rmb(); +#endif atomic_set(&data[thr].stop, 0); ret = data[thr].ret; diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index e2cc69db137952ab0f5924686c45e378e039c1f7..508b79b7cfa70b727dd67efa8fced6721da04dec 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -8,6 +8,7 @@ * * This file is released under the GPLv2 and any later version. */ +#include #include #include #include @@ -168,7 +169,7 @@ static void set_state(struct multi_stop_data *msdata, /* Reset ack counter. */ atomic_set(&msdata->thread_ack, msdata->num_threads); smp_wmb(); - msdata->state = newstate; + WRITE_ONCE(msdata->state, newstate); } /* Last one to ack a state moves to the next state. */ @@ -182,7 +183,7 @@ static void ack_state(struct multi_stop_data *msdata) static int multi_cpu_stop(void *data) { struct multi_stop_data *msdata = data; - enum multi_stop_state curstate = MULTI_STOP_NONE; + enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; int cpu = smp_processor_id(), err = 0; unsigned long flags; bool is_active; @@ -202,8 +203,9 @@ static int multi_cpu_stop(void *data) do { /* Chill out and ensure we re-read multi_stop_state. */ cpu_relax_yield(); - if (msdata->state != curstate) { - curstate = msdata->state; + newstate = READ_ONCE(msdata->state); + if (newstate != curstate) { + curstate = newstate; switch (curstate) { case MULTI_STOP_DISABLE_IRQ: local_irq_disable(); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 9cc8e92f4b000acfb78753adb2a2c6503a577433..10c18478a39485e7da357f6b7ac5a56c2c02eba5 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7099,16 +7099,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); switch (data->verdict.code) { - default: - switch (data->verdict.code & NF_VERDICT_MASK) { - case NF_ACCEPT: - case NF_DROP: - case NF_QUEUE: - break; - default: - return -EINVAL; - } - /* fall through */ + case NF_ACCEPT: + case NF_DROP: + case NF_QUEUE: + break; case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: @@ -7127,6 +7121,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, chain->use++; data->verdict.chain = chain; break; + default: + return -EINVAL; } desc->len = sizeof(data->verdict); diff --git a/net/packet/internal.h b/net/packet/internal.h index f10294800aafb3673083acd87c21944e313d9a6a..907f4cd2a7188e4258c3144cdf5c1ff0d96288ca 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -135,6 +135,7 @@ struct packet_sock { struct net_device __rcu *cached_dev; int (*xmit)(struct sk_buff *skb); struct packet_type prot_hook ____cacheline_aligned_in_smp; + atomic_t tp_drops ____cacheline_aligned_in_smp; }; static struct packet_sock *pkt_sk(struct sock *sk) diff --git a/sound/pci/hda/hda_phytium.c b/sound/pci/hda/hda_phytium.c index bc5282d1d51931e511973209fa9bad9d8020f73d..370b582f0b1d3d4ec04f1efbe6f778ad13d73926 100644 --- a/sound/pci/hda/hda_phytium.c +++ b/sound/pci/hda/hda_phytium.c @@ -36,6 +36,8 @@ #include "hda_intel_trace.h" +#define PHYTIUM_HDA_V1_VERSION "1.0.0" + /* position fix mode */ enum { POS_FIX_AUTO, @@ -257,37 +259,6 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) return 1; /* OK, it's fine */ } -static int hda_ft_dma_configure(struct device *dev) -{ - const struct of_device_id *match_of; - const struct acpi_device_id *match_acpi; - - if (dev->of_node) { - match_of = of_match_device(dev->driver->of_match_table, dev); - if (!match_of) { - dev_err(dev, "Error DT match data is missing\n"); - return -ENODEV; - } - set_dma_ops(dev, NULL); - /* - * Because there is no way to transfer to non-coherent dma in - * of_dma_configure if 'dma-coherent' is described in DT, - * use acpi_dma_configure to alloc dma_ops correctly. - */ - acpi_dma_configure(dev, DEV_DMA_NON_COHERENT); - } else if (has_acpi_companion(dev)) { - match_acpi = acpi_match_device(dev->driver->acpi_match_table, dev); - if (!match_acpi) { - dev_err(dev, "Error ACPI match data is missing\n"); - return -ENODEV; - } - set_dma_ops(dev, NULL); - acpi_dma_configure(dev, DEV_DMA_NON_COHERENT); - } - - return 0; -} - /* The work for pending PCM period updates. */ static void azx_irq_pending_work(struct work_struct *work) { @@ -855,6 +826,7 @@ static int azx_first_init(struct azx *chip) unsigned int dma_bits = 64; struct resource *res; + const struct acpi_device_id *match; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hda->regs = devm_ioremap_resource(hddev, res); @@ -892,9 +864,15 @@ static int azx_first_init(struct azx *chip) chip->align_buffer_size = 1; } - err = hda_ft_dma_configure(hddev); - if (err < 0) - return err; + if (has_acpi_companion(hddev)) { + match = acpi_match_device(hddev->driver->acpi_match_table, hddev); + if (!match) { + dev_err(hddev, "Error ACPI match data is missing\n"); + return -ENODEV; + } + set_dma_ops(hddev, NULL); + acpi_dma_configure(hddev, DEV_DMA_NON_COHERENT); + } /* allow 64bit DMA address if supported by H/W */ if (!(gcap & AZX_GCAP_64OK)) @@ -1247,3 +1225,4 @@ module_platform_driver(ft_platform_hda); MODULE_DESCRIPTION("FT HDA bus driver"); MODULE_LICENSE("GPL v2"); +MODULE_VERSION(PHYTIUM_HDA_V1_VERSION); diff --git a/sound/soc/codecs/es8336.c b/sound/soc/codecs/es8336.c index 7b3f2fc15e423942391e0b4e37afcfe234f58b63..6b8e2c8d3483b5cb6ea28e604ed756ad56aef6b4 100644 --- a/sound/soc/codecs/es8336.c +++ b/sound/soc/codecs/es8336.c @@ -31,6 +31,10 @@ #include #include "es8336.h" +#define ES8336_V1_VERSION "1.0.0" + +#define ES8336_MUTE (1 << 5) + static struct snd_soc_component *es8336_component; static const struct reg_default es8336_reg_defaults[] = { @@ -666,23 +670,24 @@ static int es8336_pcm_hw_params(struct snd_pcm_substream *substream, return 0; } -static int es8336_mute(struct snd_soc_dai *dai, int mute) +static int es8336_mute(struct snd_soc_dai *dai, int mute, int direction) { struct snd_soc_component *component = dai->component; struct es8336_priv *es8336 = snd_soc_component_get_drvdata(component); es8336->muted = mute; - if (mute) { + if (!es8336->hp_inserted) + es8336_enable_spk(es8336, true); + else es8336_enable_spk(es8336, false); - msleep(100); - snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x20); - } else if (dai->playback_active) { - snd_soc_component_write(component, ES8336_DAC_SET1_REG30, 0x00); - msleep(130); - if (!es8336->hp_inserted) - es8336_enable_spk(es8336, true); - } - return 0; + if (direction) + return snd_soc_component_update_bits(dai->component, ES8336_ADC_MUTE_REG26, + ES8336_MUTE, + mute ? ES8336_MUTE : 0); + else + return snd_soc_component_update_bits(dai->component, ES8336_DAC_SET1_REG30, + ES8336_MUTE, + mute ? ES8336_MUTE : 0); } static int es8336_set_bias_level(struct snd_soc_component *component, @@ -731,7 +736,7 @@ static const struct snd_soc_dai_ops es8336_ops = { .hw_params = es8336_pcm_hw_params, .set_fmt = es8336_set_dai_fmt, .set_sysclk = es8336_set_dai_sysclk, - .digital_mute = es8336_mute, + .mute_stream = es8336_mute, .shutdown = es8336_pcm_shutdown, }; @@ -1070,3 +1075,4 @@ static struct i2c_driver es8336_i2c_driver = { module_i2c_driver(es8336_i2c_driver); MODULE_DESCRIPTION("ASoC es8336 driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(ES8336_V1_VERSION); diff --git a/sound/soc/codecs/es8388.c b/sound/soc/codecs/es8388.c index 9ddc557328f9cac550cd60cb75570fcd2a4928cc..3c1472149113af54b22454c03b699c1e4b0ddcc8 100644 --- a/sound/soc/codecs/es8388.c +++ b/sound/soc/codecs/es8388.c @@ -28,6 +28,8 @@ #include #include +#define ES8388_V1_VERSION "1.0.0" + static const unsigned int rates_12288[] = { 8000, 12000, 16000, 24000, 32000, 48000, 96000, }; @@ -424,11 +426,16 @@ static const struct snd_soc_dapm_route es8388_dapm_routes[] = { { "ROUT2", NULL, "Right Out 2" }, }; -static int es8388_mute(struct snd_soc_dai *dai, int mute) +static int es8388_mute(struct snd_soc_dai *dai, int mute, int direction) { - return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, - ES8388_DACCONTROL3_DACMUTE, - mute ? ES8388_DACCONTROL3_DACMUTE : 0); + if (direction) + return snd_soc_component_update_bits(dai->component, ES8388_ADCCONTROL7, + ES8388_ADCCONTROL7_ADC_MUTE, + mute ? ES8388_ADCCONTROL7_ADC_MUTE : 0); + else + return snd_soc_component_update_bits(dai->component, ES8388_DACCONTROL3, + ES8388_DACCONTROL3_DACMUTE, + mute ? ES8388_DACCONTROL3_DACMUTE : 0); } static int es8388_startup(struct snd_pcm_substream *substream, @@ -667,7 +674,7 @@ static int es8388_set_bias_level(struct snd_soc_component *component, static const struct snd_soc_dai_ops es8388_dai_ops = { .startup = es8388_startup, .hw_params = es8388_hw_params, - .digital_mute = es8388_mute, + .mute_stream = es8388_mute, .set_sysclk = es8388_set_sysclk, .set_fmt = es8388_set_dai_fmt, }; @@ -817,3 +824,4 @@ module_i2c_driver(es8388_i2c_driver); MODULE_DESCRIPTION("ASoC ES8388 driver"); MODULE_AUTHOR("Yiqun Zhang "); MODULE_LICENSE("GPL"); +MODULE_VERSION(ES8388_V1_VERSION); diff --git a/sound/soc/phytium/local.h b/sound/soc/phytium/local.h index 753d0ef3a0c398524a707a52e1f58be23932b5b5..4e85504c06a18d10ba4fe00f45c59d635bf50f01 100644 --- a/sound/soc/phytium/local.h +++ b/sound/soc/phytium/local.h @@ -89,6 +89,35 @@ /****************/ +#define I2S_HEADPHONE_ENABLE 1 +#define I2S_HEADPHONE_DISABLE 0 + +#define I2S_GPIO(x) BIT(x) +#define I2S_GPIO_BASE 0xD00 + +/* I2S GPIO registers */ +#define I2S_GPIO_SWPORTA_DR 0x00 /* WR Port A Output Data Register */ + #define I2S_HEADPHONE_FRONT (!I2S_GPIO(1)) + #define I2S_HEADPHONE_REAR I2S_GPIO(1) +#define I2S_GPIO_SWPORTA_DDR 0x04 /* WR Port A Data Direction Register */ + #define I2S_GPIO_INPUT(x) (!I2S_GPIO(x)) + #define I2S_GPIO_OUTPUT(x) I2S_GPIO(x) +#define I2S_GPIO_EXT_PORTA 0x08 /* RO Port A Input Data Register */ +#define I2S_GPIO_SWPORTB_DR 0x0c /* WR Port B Output Data Register */ +#define I2S_GPIO_SWPORTB_DDR 0x10 /* WR Port B Data Direction Register */ +#define I2S_GPIO_EXT_PORTB 0x14 /* RO Port B Input Data Register */ + +#define I2S_GPIO_INTEN 0x18 /* WR Port A Interrput Enable Register */ +#define I2S_GPIO_INTMASK 0x1c /* WR Port A Interrupt Mask Register */ +#define I2S_GPIO_INTTYPE_LEVEL 0x20 /* WR Port A Interrupt Level Register */ + #define I2S_GPIO_LEVEL(x) (!I2S_GPIO(x)) + #define I2S_GPIO_EDGE(x) I2S_GPIO(x) +#define I2S_GPIO_INT_POLARITY 0x24 /* WR Port A Interrupt Polarity Register */ + #define I2S_GPIO_DOWN(x) (!I2S_GPIO(x)) + #define I2S_GPIO_UP(x) I2S_GPIO(x) +#define I2S_GPIO_INTSTATUS 0x28 /* RO Port A Interrupt Status Register */ +#define I2S_GPIO_DEBOUNCE 0x34 /* WR Debounce Enable Register */ +#define I2S_GPIO_PORTA_EOI 0x38 /* WO Port A Clear Interrupt Register */ /* max number of fragments - we may use more if allocating more pages for BDL */ #define BDL_SIZE 4096 @@ -294,7 +323,10 @@ struct i2s_phytium { u32 paddr; void __iomem *regs; void __iomem *regs_db; + void __iomem *regs_gpio; int irq_id; + int gpio_irq_id; + bool detect; /* for pending irqs */ struct work_struct irq_pending_work; @@ -302,6 +334,7 @@ struct i2s_phytium { /* sync probing */ struct completion probe_wait; struct work_struct probe_work; + struct delayed_work i2s_gpio_work; /* extra flags */ unsigned int pcie:1; @@ -319,7 +352,7 @@ struct i2s_phytium { u32 xfer_resolution; u32 ccr; u32 clk_base; - + u32 cfg; struct i2s_clk_config_data config; /*azx_dev*/ diff --git a/sound/soc/phytium/phytium_i2s.c b/sound/soc/phytium/phytium_i2s.c index 67a932617013e0b104870b7a5d360b62b4cb2edb..a278f906e5634389e3c05bb30274ef27346f75c9 100644 --- a/sound/soc/phytium/phytium_i2s.c +++ b/sound/soc/phytium/phytium_i2s.c @@ -26,10 +26,12 @@ #include #include #include +#include #include #include #include "local.h" +#define PHYTIUM_I2S_V1_VERSION "1.0.0" #define NUM_CAPTURE 1 #define NUM_PLAYBACK 1 @@ -46,11 +48,25 @@ #define EIGHT_CHANNEL_SUPPORT 8 /* up to 7.1 */ struct pdata_px210_mfd { - struct device *dev; + struct device *dev; char *name; int clk_base; }; +static struct snd_soc_jack hs_jack; + +/* Headset jack detection DAPM pins */ +static struct snd_soc_jack_pin hs_jack_pins[] = { + { + .pin = "FrontIn", + .mask = SND_JACK_MICROPHONE, + }, + { + .pin = "Front", + .mask = SND_JACK_HEADPHONE, + }, +}; + static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val) { writel(val, io_base + reg); @@ -157,6 +173,72 @@ irqreturn_t azx_i2s_interrupt(int irq, void *dev_id) return IRQ_RETVAL(handled); } +int azx_i2s_enable_gpio(struct i2s_phytium *i2s) +{ + u32 val; + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_SWPORTA_DDR, + (I2S_GPIO_INPUT(0) | I2S_GPIO_OUTPUT(1))); + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_SWPORTA_DDR); + if (val != (I2S_GPIO_INPUT(0) | I2S_GPIO_OUTPUT(1))) + goto enable_err; + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_INTMASK, !I2S_GPIO(0)); + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_INTMASK); + if (val != !I2S_GPIO(0)) + goto enable_err; + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_INTTYPE_LEVEL, I2S_GPIO_EDGE(0)); + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_INTTYPE_LEVEL); + if (val != I2S_GPIO_EDGE(0)) + goto enable_err; + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_INT_POLARITY, I2S_GPIO_DOWN(0)); + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_INT_POLARITY); + if (val != I2S_GPIO_DOWN(0)) + goto enable_err; + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_INTEN, I2S_GPIO(0)); + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_INTEN); + if (val != I2S_GPIO(0)) + goto enable_err; + + return 0; +enable_err: + return -EBUSY; +} + +static void i2s_gpio_jack_work(struct work_struct *work) +{ + struct i2s_phytium *i2s = container_of(work, struct i2s_phytium, i2s_gpio_work.work); + u32 val; + + val = i2s_read_reg(i2s->regs_gpio, I2S_GPIO_INT_POLARITY); + if (val == 1) { + snd_soc_jack_report(&hs_jack, I2S_HEADPHONE_DISABLE, SND_JACK_HEADSET); + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_SWPORTA_DR, I2S_HEADPHONE_REAR); + } else { + snd_soc_jack_report(&hs_jack, I2S_HEADPHONE_ENABLE, SND_JACK_HEADSET); + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_SWPORTA_DR, I2S_HEADPHONE_FRONT); + } + val = ~val; + val &= 0x1; + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_INT_POLARITY, val); +} + +irqreturn_t azx_i2s_gpio_interrupt(int irq, void *dev_id) +{ + struct azx *chip = dev_id; + struct i2s_phytium *i2s = container_of(chip, struct i2s_phytium, chip); + bool handled = true; + + queue_delayed_work(system_power_efficient_wq, &i2s->i2s_gpio_work, + msecs_to_jiffies(100)); + + i2s_write_reg(i2s->regs_gpio, I2S_GPIO_PORTA_EOI, I2S_GPIO(0)); + return IRQ_RETVAL(handled); +} + static int azx_acquire_irq(struct azx *chip, int do_disconnect) { struct i2sc_bus *bus = azx_bus(chip); @@ -171,6 +253,22 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect) return err; } + if (i2s->detect) { + err = azx_i2s_enable_gpio(i2s); + if (err < 0) { + dev_err(i2s->dev, "failed to enable gpio irq\n"); + return err; + } + + err = devm_request_irq(i2s->dev, i2s->gpio_irq_id, azx_i2s_gpio_interrupt, + IRQF_SHARED, + "phytium i2s gpio", chip); + if (err < 0) { + dev_err(i2s->dev, "failed to request gpio irq\n"); + return err; + } + } + bus->irq = i2s->irq_id; return 0; @@ -276,6 +374,7 @@ static int phytium_i2s_hw_params(struct snd_pcm_substream *substream, i2s_write_reg(dev->regs, CLK_CFG0, cfg); i2s_write_reg(dev->regs, CLK_CFG1, 0xf); } + dev->cfg = cfg; return 0; } @@ -370,6 +469,13 @@ static int phytium_i2s_resume(struct snd_soc_dai *dai) phytium_i2s_config(dev, SNDRV_PCM_STREAM_PLAYBACK); if (dai->capture_active) phytium_i2s_config(dev, SNDRV_PCM_STREAM_CAPTURE); + + i2s_write_reg(dev->regs, CLK_CFG0, dev->cfg); + i2s_write_reg(dev->regs, CLK_CFG1, 0xf); + + if (dev->detect) + azx_i2s_enable_gpio(dev); + return 0; } #else @@ -377,6 +483,21 @@ static int phytium_i2s_resume(struct snd_soc_dai *dai) #define phytium_i2s_resume NULL #endif +static int phytium_i2s_component_probe(struct snd_soc_component *component) +{ + struct snd_soc_card *card = component->card; + int ret; + + ret = snd_soc_card_jack_new(card, "Headset Jack", SND_JACK_HEADSET, + &hs_jack, hs_jack_pins, + ARRAY_SIZE(hs_jack_pins)); + if (ret < 0) { + dev_err(component->dev, "Cannot create jack\n"); + return ret; + } + return 0; +} + static struct snd_soc_dai_driver phytium_i2s_dai = { .playback = { .stream_name = "i2s-Playback", @@ -544,6 +665,25 @@ static int phytium_pcm_hw_params(struct snd_pcm_substream *substream, azx_dev->core.period_bytes = 0; azx_dev->core.format_val = 0; + switch (params_format(hw_params)) { + case SNDRV_PCM_FORMAT_S16_LE: + azx_dev->core.format_val = 2; + break; + + case SNDRV_PCM_FORMAT_S24_LE: + azx_dev->core.format_val = 0; + break; + + case SNDRV_PCM_FORMAT_S32_LE: + azx_dev->core.format_val = 0; + break; + + default: + dev_err(dev->dev, "phytium-i2s: unsupported PCM fmt"); + return -EINVAL; + } + + ret = chip->ops->substream_alloc_pages(chip, substream, params_buffer_bytes(hw_params)); @@ -677,12 +817,10 @@ int snd_i2s_stream_set_params(struct i2s_stream *azx_dev, period_bytes = snd_pcm_lib_period_bytes(substream); if (bufsize != azx_dev->bufsize || period_bytes != azx_dev->period_bytes || - format_val != azx_dev->format_val || runtime->no_period_wakeup != azx_dev->no_period_wakeup) { azx_dev->bufsize = bufsize; azx_dev->period_bytes = period_bytes; - azx_dev->format_val = format_val; azx_dev->no_period_wakeup = runtime->no_period_wakeup; err = snd_i2s_stream_setup_periods(azx_dev); if (err < 0) @@ -701,31 +839,31 @@ int snd_i2s_stream_setup(struct i2s_stream *azx_dev, int pcie, u32 paddr) else runtime = NULL; - i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8180); + i2s_write_reg(azx_dev->sd_addr, DMA_CHAL_CONFG0, 0x8081); i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, 0x80000003); if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { - i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); - i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); - if (pcie) - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c8); - else - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), paddr + 0x1c8); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), 0x2);//0x2 - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0);//0x0 - } else { i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), (u32)azx_dev->bdl.addr); i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), upper_32_bits(azx_dev->bdl.addr)); if (pcie) - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), 0x1c8); else - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), paddr + 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(1), paddr + 0x1c8); i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(1), azx_dev->bufsize); i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(1), azx_dev->frags - 1); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), 0x8);//0x8 - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(1), azx_dev->format_val);//0x2 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(1), 0x0);//0x0 + } else { + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), (u32)azx_dev->bdl.addr); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), upper_32_bits(azx_dev->bdl.addr)); + if (pcie) + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), 0x1c0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DEV_ADDR(0), paddr + 0x1c0); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CBL(0), azx_dev->bufsize); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_LVI(0), azx_dev->frags - 1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DSIZE(0), azx_dev->format_val << 2);//0x8 + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_DLENTH(0), 0x0); } if (runtime && runtime->period_size > 64) @@ -781,9 +919,9 @@ static int phytium_pcm_prepare(struct snd_pcm_substream *substream) void snd_i2s_stream_clear(struct i2s_stream *azx_dev) { if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); - else i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x0); + else + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x0); azx_dev->running = false; } @@ -796,9 +934,9 @@ void snd_i2s_stream_stop(struct i2s_stream *azx_dev) void snd_i2s_stream_start(struct i2s_stream *azx_dev, bool fresh_start) { if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x1); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x1); else - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0x5); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0x5); azx_dev->running = true; } @@ -872,19 +1010,6 @@ void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) if (azx_dev->sd_addr) { if (azx_dev->direction == SNDRV_PCM_STREAM_PLAYBACK) { - mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); - mask &= ~BIT(0); - i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); - while (cnt--) { - if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) - break; - } - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); - i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); - i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); - i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); - } else { mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); mask &= ~BIT(1); i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); @@ -897,6 +1022,21 @@ void snd_i2s_stream_cleanup(struct i2s_stream *azx_dev) i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(1), 0); i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(1), 0); i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(1), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); + } else { + mask = i2s_read_reg(azx_dev->sd_addr, DMA_MASK_INT); + mask &= ~BIT(0); + i2s_write_reg(azx_dev->sd_addr, DMA_MASK_INT, mask); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + while (cnt--) { + if (i2s_read_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0)) == 0) + break; + } + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 2); + i2s_write_reg(azx_dev->sd_addr, DMA_CHALX_CTL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPL(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_BDLPU(0), 0); + i2s_write_reg(azx_dev->sd_addr, DMA_STS, azx_dev->sd_int_sta_mask); } } } @@ -925,10 +1065,12 @@ static snd_pcm_uframes_t phytium_pcm_pointer(struct snd_pcm_substream *substream { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct i2s_phytium *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai); + u32 pos = 0; - int stream = substream->stream; - - u32 pos = i2s_read_reg(dev->regs_db, DMA_LPIB(stream)); + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + pos = i2s_read_reg(dev->regs_db, DMA_LPIB(1)); + else + pos = i2s_read_reg(dev->regs_db, DMA_LPIB(0)); return bytes_to_frames(substream->runtime, pos); } @@ -948,6 +1090,7 @@ static const struct snd_soc_component_driver phytium_i2s_component = { .pcm_new = phytium_pcm_new, .pcm_free = phytium_pcm_free, .ops = &phytium_pcm_ops, + .probe = phytium_i2s_component_probe, }; /* Maximum bit resolution of a channel - not uniformly spaced */ @@ -1094,9 +1237,9 @@ void snd_i2s_stream_init(struct i2sc_bus *bus, struct i2s_stream *stream, stream->sd_addr = bus->remap_addr; if (idx == 0) - stream->sd_int_sta_mask = 1 << idx; - else stream->sd_int_sta_mask = 1 << 8; + else + stream->sd_int_sta_mask = 1; stream->index = idx; stream->direction = direction; @@ -1248,6 +1391,8 @@ static int i2s_phytium_create(struct platform_device *pdev, } INIT_WORK(&i2s->probe_work, azx_probe_work); + if (i2s->detect) + INIT_DELAYED_WORK(&i2s->i2s_gpio_work, i2s_gpio_jack_work); *rchip = chip; return 0; } @@ -1307,17 +1452,28 @@ static int phytium_i2s_probe(struct platform_device *pdev) i2s->paddr = res->start; i2s->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2s->regs)) + return PTR_ERR(i2s->regs); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); i2s->regs_db = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(i2s->regs)) - return PTR_ERR(i2s->regs); + if (IS_ERR(i2s->regs_db)) + return PTR_ERR(i2s->regs_db); i2s->irq_id = platform_get_irq(pdev, 0); if (i2s->irq_id < 0) return i2s->irq_id; + i2s->gpio_irq_id = platform_get_irq(pdev, 1); + i2s->detect = true; + + if (i2s->gpio_irq_id < 0) + i2s->detect = false; + else + i2s->regs_gpio = i2s->regs + I2S_GPIO_BASE; + i2s->i2s_reg_comp1 = I2S_COMP_PARAM_1; i2s->i2s_reg_comp2 = I2S_COMP_PARAM_2; @@ -1411,3 +1567,4 @@ module_platform_driver(phytium_i2s_driver); MODULE_DESCRIPTION("Phytium I2S Driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Zhang Yiqun "); +MODULE_VERSION(PHYTIUM_I2S_V1_VERSION); diff --git a/sound/soc/phytium/pmdk_es8336.c b/sound/soc/phytium/pmdk_es8336.c index c2e7891c0fffa1f2114c10a935e1a80e509ee92f..407e07e7f8bc1c673cf23f9080d2b8aea2368afb 100644 --- a/sound/soc/phytium/pmdk_es8336.c +++ b/sound/soc/phytium/pmdk_es8336.c @@ -8,6 +8,7 @@ #include #include +#define PMDK_ES8336_VERSION "1.0.0" /* PMDK widgets */ static const struct snd_soc_dapm_widget pmdk_es8336_dapm_widgets[] = { @@ -93,3 +94,4 @@ module_platform_driver(pmdk_sound_driver); MODULE_AUTHOR("Zhang Yiqun "); MODULE_DESCRIPTION("ALSA SoC PMDK ES8336"); MODULE_LICENSE("GPL"); +MODULE_VERSION(PMDK_ES8336_VERSION); diff --git a/sound/soc/phytium/pmdk_es8388.c b/sound/soc/phytium/pmdk_es8388.c index 3c1d3ee9bfc3d5d41078deacf46cc1abef6d09c2..801eca0cb38324d2baec281ae8f8dbfa5e5c9b79 100644 --- a/sound/soc/phytium/pmdk_es8388.c +++ b/sound/soc/phytium/pmdk_es8388.c @@ -10,6 +10,8 @@ #include #include +#define PMDK_ES8388_VERSION "1.0.0" + static struct snd_soc_jack hs_jack; /* Headset jack detection DAPM pins */ @@ -166,3 +168,4 @@ module_platform_driver(pmdk_sound_driver); MODULE_AUTHOR("Zhang Yiqun"); MODULE_DESCRIPTION("ALSA SoC PMDK ES8388"); MODULE_LICENSE("GPL"); +MODULE_VERSION(PMDK_ES8388_VERSION);