diff --git a/bsp/qemu-virt64-aarch64/.config b/bsp/qemu-virt64-aarch64/.config index cadcb24bb7ee9e9d26899c97131ede26011d2b8a..758084b49147935290b1d354ebfae9c30e10d63d 100644 --- a/bsp/qemu-virt64-aarch64/.config +++ b/bsp/qemu-virt64-aarch64/.config @@ -64,12 +64,12 @@ CONFIG_RT_USING_HEAP=y # CONFIG_RT_USING_DEVICE=y CONFIG_RT_USING_DEVICE_OPS=y +# CONFIG_RT_USING_DM is not set CONFIG_RT_USING_INTERRUPT_INFO=y CONFIG_RT_USING_CONSOLE=y CONFIG_RT_CONSOLEBUF_SIZE=256 CONFIG_RT_CONSOLE_DEVICE_NAME="uart0" CONFIG_RT_VER_NUM=0x50000 -# CONFIG_RT_USING_DM is not set CONFIG_ARCH_CPU_64BIT=y CONFIG_RT_USING_CACHE=y # CONFIG_RT_USING_CPU_FFS is not set @@ -177,6 +177,7 @@ CONFIG_RT_USING_PIN=y # CONFIG_RT_USING_MTD_NOR is not set # CONFIG_RT_USING_MTD_NAND is not set # CONFIG_RT_USING_PM is not set +# CONFIG_RT_USING_FDT is not set CONFIG_RT_USING_RTC=y # CONFIG_RT_USING_ALARM is not set # CONFIG_RT_USING_SOFT_RTC is not set @@ -745,4 +746,5 @@ CONFIG_BSP_USING_VIRTIO_CONSOLE=y CONFIG_BSP_USING_VIRTIO_GPU=y CONFIG_BSP_USING_VIRTIO_INPUT=y CONFIG_BSP_USING_GIC=y -CONFIG_BSP_USING_GIC390=y +CONFIG_BSP_USING_GICV2=y +# CONFIG_BSP_USING_GICV3 is not set diff --git a/bsp/qemu-virt64-aarch64/Kconfig b/bsp/qemu-virt64-aarch64/Kconfig index 07a0454272436dad5a4cbb4d1c1d704f225f2f5d..2f53613bd976e3e818d0cd27a3cfd843b3cc62ec 100644 --- a/bsp/qemu-virt64-aarch64/Kconfig +++ b/bsp/qemu-virt64-aarch64/Kconfig @@ -28,7 +28,6 @@ config SOC_VIRT64_AARCH64 select RT_USING_USER_MAIN select RT_USING_GIC select BSP_USING_GIC - select BSP_USING_GIC390 select ARCH_MM_MMU default y diff --git a/bsp/qemu-virt64-aarch64/README.md b/bsp/qemu-virt64-aarch64/README.md index dc33c8277dda875cb90555406b6221339df95e01..e831c163eaa061bcc43fb5788ac2a702b4826f0f 100644 --- a/bsp/qemu-virt64-aarch64/README.md +++ b/bsp/qemu-virt64-aarch64/README.md @@ -33,7 +33,7 @@ RAM starts at 0x4000_0000 ## 3. 执行 -当要执行编译好的RT-Thread时,在这个bsp目录下已经提供了运行脚本文件:qemu.bat/qemu.sh +当要执行编译好的RT-Thread时,在这个bsp目录下已经提供了运行脚本文件:qemu.bat/qemu.sh,工程可配置为使用`Cortex-A53/A57/A72`等芯片,GIC支持`V2/V3`版本,其中`V2`最多可配置8个处理器。 这个执行脚本默认把串口输出到stdio(即控制台)上,所以直接执行脚本后就可以输出结果了。 @@ -51,6 +51,15 @@ msh /> telnet 127.0.0.1 4321 ``` +如果使用tap网卡模式,以设备tap0为例,将qemu运行脚本 +``` +-netdev user,id=net0 +``` +修改为 +``` +-netdev tap,id=net0,ifname=tap0 +``` + ## 4. 支持情况 | 驱动 | 支持情况 | 备注 | diff --git a/bsp/qemu-virt64-aarch64/applications/console.c b/bsp/qemu-virt64-aarch64/applications/console.c index 3f8be481891bdc082f4927e468e8f1295af6a2b8..a0a6f57db5caae6e0fa629cf1ff3b91eedcdb1e3 100644 --- a/bsp/qemu-virt64-aarch64/applications/console.c +++ b/bsp/qemu-virt64-aarch64/applications/console.c @@ -10,6 +10,10 @@ #include +#ifdef RT_USING_POSIX +#include +#endif + #include static int console_init() @@ -31,3 +35,47 @@ static int console_init() return status; } INIT_ENV_EXPORT(console_init); + +#ifdef FINSH_USING_MSH + +static int console(int argc, char **argv) +{ + rt_err_t result = RT_EOK; + + if (argc > 1) + { + if (!rt_strcmp(argv[1], "set")) + { + rt_kprintf("console change to %s\n", argv[2]); + rt_console_set_device(argv[2]); + + #ifdef RT_USING_POSIX + { + rt_device_t dev = rt_device_find(argv[2]); + + if (dev != RT_NULL) + { + console_set_iodev(dev); + } + } + #else + finsh_set_device(argv[2]); + #endif /* RT_USING_POSIX */ + } + else + { + rt_kprintf("Unknown command. Please enter 'console' for help\n"); + result = -RT_ERROR; + } + } + else + { + rt_kprintf("Usage: \n"); + rt_kprintf("console set - change console by name\n"); + result = -RT_ERROR; + } + return result; +} +MSH_CMD_EXPORT(console, set console name); + +#endif /* FINSH_USING_MSH */ diff --git a/bsp/qemu-virt64-aarch64/applications/graphic.c b/bsp/qemu-virt64-aarch64/applications/graphic.c index 2207f9f88721c853470be470a4a54fe66d380167..5a83ce762cbf9ccf4d94899e5549fd65624b1dd1 100644 --- a/bsp/qemu-virt64-aarch64/applications/graphic.c +++ b/bsp/qemu-virt64-aarch64/applications/graphic.c @@ -24,6 +24,7 @@ static rt_uint32_t cur_points[2]; static rt_uint32_t cur_last_points[2]; static rt_bool_t cur_event_sync; static rt_uint32_t color[2] = { 0xff0000, 0x0000ff }; +static rt_uint8_t cursor[VIRTIO_GPU_CURSOR_IMG_SIZE] ALIGN(VIRTIO_PAGE_SIZE); void tablet_event_handler(struct virtio_input_event event) { @@ -100,6 +101,8 @@ void graphic_thread(void *param) if (graphic_info.framebuffer != RT_NULL) { + int i = 0; + rt_memset(graphic_info.framebuffer, 0xff, graphic_info.width * graphic_info.height * graphic_info.bits_per_pixel); @@ -111,6 +114,16 @@ void graphic_thread(void *param) rt_device_control(device, RTGRAPHIC_CTRL_RECT_UPDATE, &rect_info); + while (i < sizeof(cursor) / 4) + { + /* R: 0x4c G: 0xaf B: 0x50 A: 0.8 */ + ((rt_uint32_t *)cursor)[i] = 0xcc4caf50; + ++i; + } + + rt_device_control(device, VIRTIO_DEVICE_CTRL_CURSOR_SETUP, cursor); + rt_device_control(device, VIRTIO_DEVICE_CTRL_CURSOR_MOVE, (rt_uint32_t[]){0, 0}); + gpu_dev = device; } } diff --git a/bsp/qemu-virt64-aarch64/drivers/Kconfig b/bsp/qemu-virt64-aarch64/drivers/Kconfig index 706722a808eefd2719b4dae4cac1322106f7f0de..f14fc73fc183ca22e6df3c1ea3b473e81c38c64a 100644 --- a/bsp/qemu-virt64-aarch64/drivers/Kconfig +++ b/bsp/qemu-virt64-aarch64/drivers/Kconfig @@ -66,7 +66,15 @@ menu "AARCH64 qemu virt64 configs" bool default y - config BSP_USING_GIC390 - bool - default y + choice + prompt "GIC Version" + default BSP_USING_GICV2 + + config BSP_USING_GICV2 + bool "GICv2" + + config BSP_USING_GICV3 + bool "GICv3" + endchoice + endmenu diff --git a/bsp/qemu-virt64-aarch64/drivers/virt.h b/bsp/qemu-virt64-aarch64/drivers/virt.h index e38f01fbb09544fd76f537aed7847a4b35ba9717..6b15523d7bf13c1c3b0de0934b3ce411379bdf25 100644 --- a/bsp/qemu-virt64-aarch64/drivers/virt.h +++ b/bsp/qemu-virt64-aarch64/drivers/virt.h @@ -60,15 +60,39 @@ extern rt_mmu_info mmu_info; #define GIC_PL390_HYPERVISOR_BASE 0x08030000 #define GIC_PL390_VIRTUAL_CPU_BASE 0x08040000 +/* GICv3 */ +#define GIC_PL500_DISTRIBUTOR_PPTR GIC_PL390_DISTRIBUTOR_PPTR +#define GIC_PL500_REDISTRIBUTOR_PPTR 0x080a0000 +#define GIC_PL500_CONTROLLER_PPTR GIC_PL390_CONTROLLER_PPTR +#define GIC_PL500_ITS_PPTR 0x08080000 + /* the basic constants and interfaces needed by gic */ rt_inline rt_ubase_t platform_get_gic_dist_base(void) { +#ifdef BSP_USING_GICV2 return GIC_PL390_DISTRIBUTOR_PPTR; +#else + return GIC_PL500_DISTRIBUTOR_PPTR; +#endif +} + +rt_inline rt_ubase_t platform_get_gic_redist_base(void) +{ + return GIC_PL500_REDISTRIBUTOR_PPTR; } rt_inline rt_ubase_t platform_get_gic_cpu_base(void) { +#ifdef BSP_USING_GICV2 return GIC_PL390_CONTROLLER_PPTR; +#else + return GIC_PL500_CONTROLLER_PPTR; +#endif +} + +rt_inline rt_ubase_t platform_get_gic_its_base(void) +{ + return GIC_PL500_ITS_PPTR; } #endif diff --git a/bsp/qemu-virt64-aarch64/rtconfig.h b/bsp/qemu-virt64-aarch64/rtconfig.h index 0aa46e8aaa6ecdbc8292aa91916321b9f15b337c..8c5a491a7b79bf87102384b00e7badb5ce1d6955 100644 --- a/bsp/qemu-virt64-aarch64/rtconfig.h +++ b/bsp/qemu-virt64-aarch64/rtconfig.h @@ -299,6 +299,6 @@ #define BSP_USING_VIRTIO_GPU #define BSP_USING_VIRTIO_INPUT #define BSP_USING_GIC -#define BSP_USING_GIC390 +#define BSP_USING_GICV2 #endif diff --git a/components/drivers/virtio/virtio_console.c b/components/drivers/virtio/virtio_console.c index 44878e2c0d992e8e3e95474d64df77bb607a38e2..269ffb5d1b5a2368e34798f618d6e550fd02efd8 100644 --- a/components/drivers/virtio/virtio_console.c +++ b/components/drivers/virtio/virtio_console.c @@ -34,6 +34,8 @@ struct port_device struct rt_spinlock spinlock_rx, spinlock_tx; #endif + struct rt_device_notify rx_notify_helper; + struct { char rx_char, tx_char; @@ -234,12 +236,6 @@ static rt_err_t virtio_console_port_open(rt_device_t dev, rt_uint16_t oflag) { struct port_device *port_dev = (struct port_device *)dev; - /* Can't use by others, just support only one */ - if (port_dev->parent.ref_count > 1) - { - return -RT_EBUSY; - } - if (port_dev->port_id == 0 && virtio_has_feature(&port_dev->console->virtio_dev, VIRTIO_CONSOLE_F_MULTIPORT)) { /* Port0 is reserve in multiport */ @@ -383,6 +379,14 @@ static rt_err_t virtio_console_port_control(rt_device_t dev, int cmd, void *args switch (cmd) { + case RT_DEVICE_CTRL_NOTIFY_SET: + if (args == RT_NULL) + { + status = -RT_ERROR; + break; + } + rt_memcpy(&port_dev->rx_notify_helper, args, sizeof(port_dev->rx_notify_helper)); + break; case RT_DEVICE_CTRL_CLR_INT: /* Disable RX */ port_dev->rx_notify = RT_FALSE; @@ -606,13 +610,22 @@ static void virtio_console_isr(int irqno, void *param) id = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].id; len = queue_rx->used->ring[queue_rx->used_idx % queue_rx->num].len; - if (port_dev->parent.rx_indicate != RT_NULL && port_dev->rx_notify) + if (port_dev->rx_notify) { #ifdef RT_USING_SMP rt_spin_unlock_irqrestore(&port_dev->spinlock_rx, level); #endif - /* rx_indicate call virtio_console_port_read to inc used_idx */ - port_dev->parent.rx_indicate(&port_dev->parent, len); + /* Will call virtio_console_port_read to inc used_idx */ + + if (port_dev->parent.rx_indicate != RT_NULL) + { + port_dev->parent.rx_indicate(&port_dev->parent, len); + } + + if (port_dev->rx_notify_helper.notify != RT_NULL) + { + port_dev->rx_notify_helper.notify(port_dev->rx_notify_helper.dev); + } #ifdef RT_USING_SMP level = rt_spin_lock_irqsave(&port_dev->spinlock_rx); diff --git a/components/drivers/virtio/virtio_gpu.c b/components/drivers/virtio/virtio_gpu.c index 6f7f3564db8c9ca32d49d4fde91e4d1d237b80ce..67ce28a8764a66016ec7f8b582507bebd303a79d 100644 --- a/components/drivers/virtio/virtio_gpu.c +++ b/components/drivers/virtio/virtio_gpu.c @@ -84,10 +84,6 @@ static void virtio_gpu_ctrl_send_command(struct virtio_gpu_device *virtio_gpu_de #endif } - rt_hw_dsb(); - - virtio_gpu_dev->info[idx[0]].ctrl_valid = RT_TRUE; - rt_memcpy(&virtio_gpu_dev->gpu_request, cmd, cmd_len); virtio_fill_desc(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[0], @@ -98,6 +94,8 @@ static void virtio_gpu_ctrl_send_command(struct virtio_gpu_device *virtio_gpu_de rt_memset(ret_res, 0, res_len); + virtio_gpu_dev->info[idx[0]].ctrl_valid = RT_TRUE; + virtio_submit_chain(virtio_dev, VIRTIO_GPU_QUEUE_CTRL, idx[0]); virtio_queue_notify(virtio_dev, VIRTIO_GPU_QUEUE_CTRL); diff --git a/components/drivers/virtio/virtio_net.c b/components/drivers/virtio/virtio_net.c index 87f130a92e246ee0b48d03b3f37fc757e1bcc233..9b44cf6bb21af00e8acf7723007ac67901695226 100644 --- a/components/drivers/virtio/virtio_net.c +++ b/components/drivers/virtio/virtio_net.c @@ -23,41 +23,41 @@ static rt_err_t virtio_net_tx(rt_device_t dev, struct pbuf *p) struct virtio_device *virtio_dev = &virtio_net_dev->virtio_dev; struct virtq *queue_tx = &virtio_dev->queues[VIRTIO_NET_QUEUE_TX]; - while (p != RT_NULL) - { #ifdef RT_USING_SMP - rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock); + rt_base_t level = rt_spin_lock_irqsave(&virtio_dev->spinlock); #endif - id = (queue_tx->avail->idx * 2) % queue_tx->num; - virtio_net_dev->info[id].hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - virtio_net_dev->info[id].hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; - virtio_net_dev->info[id].hdr.hdr_len = 0; - virtio_net_dev->info[id].hdr.gso_size = 0; - virtio_net_dev->info[id].hdr.csum_start = 0; - virtio_net_dev->info[id].hdr.csum_offset = p->tot_len + sizeof(virtio_net_dev->info[id].hdr); + id = (queue_tx->avail->idx * 2) % queue_tx->num; + + virtio_net_dev->info[id].hdr.flags = 0; + virtio_net_dev->info[id].hdr.gso_type = 0; + virtio_net_dev->info[id].hdr.hdr_len = 0; + virtio_net_dev->info[id].hdr.gso_size = 0; + virtio_net_dev->info[id].hdr.csum_start = 0; + virtio_net_dev->info[id].hdr.csum_offset = 0; + virtio_net_dev->info[id].hdr.num_buffers = 0; - virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id); - virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1); + pbuf_copy_partial(p, virtio_net_dev->info[id].rx_buffer, p->tot_len, 0); - virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id, - VIRTIO_VA2PA(&virtio_net_dev->info[id].hdr), VIRTIO_NET_HDR_SIZE, VIRTQ_DESC_F_NEXT, id + 1); + virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id); + virtio_free_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1); - virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1, - VIRTIO_VA2PA(p->payload), p->tot_len, 0, 0); + virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id, + VIRTIO_VA2PA(&virtio_net_dev->info[id].hdr), VIRTIO_NET_HDR_SIZE, VIRTQ_DESC_F_NEXT, id + 1); - virtio_submit_chain(virtio_dev, VIRTIO_NET_QUEUE_TX, id); + virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_TX, id + 1, + VIRTIO_VA2PA(virtio_net_dev->info[id].rx_buffer), p->tot_len, 0, 0); - virtio_queue_notify(virtio_dev, VIRTIO_NET_QUEUE_TX); + virtio_submit_chain(virtio_dev, VIRTIO_NET_QUEUE_TX, id); - virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX); - virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX); + virtio_queue_notify(virtio_dev, VIRTIO_NET_QUEUE_TX); + + virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX); + virtio_alloc_desc(virtio_dev, VIRTIO_NET_QUEUE_TX); #ifdef RT_USING_SMP - rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level); + rt_spin_unlock_irqrestore(&virtio_dev->spinlock, level); #endif - p = p->next; - } return RT_EOK; } @@ -144,7 +144,7 @@ static rt_err_t virtio_net_init(rt_device_t dev) for (i = 0; i < queue_rx->num; ++i) { rt_uint16_t id = (i * 2) % queue_rx->num; - void *addr = virtio_net_dev->info[i].buffer; + void *addr = virtio_net_dev->info[i].tx_buffer; /* Descriptor for net_hdr */ virtio_fill_desc(virtio_dev, VIRTIO_NET_QUEUE_RX, id, @@ -185,7 +185,7 @@ static rt_err_t virtio_net_control(rt_device_t dev, int cmd, void *args) break; } - rt_memcpy(args, virtio_net_dev->mac, sizeof(virtio_net_dev->mac)); + rt_memcpy(args, virtio_net_dev->config->mac, sizeof(virtio_net_dev->config->mac)); break; default: status = -RT_EINVAL; @@ -234,7 +234,6 @@ static void virtio_net_isr(int irqno, void *param) rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq) { - int i; static int dev_no = 0; char dev_name[RT_NAME_MAX]; struct virtio_device *virtio_dev; @@ -251,6 +250,8 @@ rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq) virtio_dev->irq = irq; virtio_dev->mmio_base = mmio_base; + virtio_net_dev->config = (struct virtio_net_config *)virtio_dev->mmio_config->config; + #ifdef RT_USING_SMP rt_spin_lock_init(&virtio_dev->spinlock); #endif @@ -260,14 +261,7 @@ rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq) virtio_dev->mmio_config->driver_features = virtio_dev->mmio_config->device_features & ~( (1 << VIRTIO_NET_F_CTRL_VQ) | - (1 << VIRTIO_NET_F_GUEST_TSO4) | - (1 << VIRTIO_NET_F_GUEST_TSO6) | - (1 << VIRTIO_NET_F_GUEST_UFO) | - (1 << VIRTIO_NET_F_MRG_RXBUF) | - (1 << VIRTIO_F_RING_EVENT_IDX)) & - (1 << VIRTIO_NET_F_CSUM) & - (1 << VIRTIO_NET_F_GUEST_CSUM) & - (1 << VIRTIO_NET_F_MAC); + (1 << VIRTIO_F_RING_EVENT_IDX)); virtio_status_driver_ok(virtio_dev); @@ -287,11 +281,6 @@ rt_err_t rt_virtio_net_init(rt_ubase_t *mmio_base, rt_uint32_t irq) goto _alloc_fail; } - for (i = 0; i < sizeof(virtio_net_dev->mac) / sizeof(virtio_net_dev->mac[0]); ++i) - { - virtio_net_dev->mac[i] = virtio_dev->mmio_config->config[i]; - } - virtio_net_dev->parent.parent.type = RT_Device_Class_NetIf; #ifdef RT_USING_DEVICE_OPS virtio_net_dev->parent.parent.ops = &virtio_net_ops; diff --git a/components/drivers/virtio/virtio_net.h b/components/drivers/virtio/virtio_net.h index ab41ccfeaded57fa568c237cfc0d464ef182da16..369979019d7f5bd632f8e6f998392ec55cbedac6 100644 --- a/components/drivers/virtio/virtio_net.h +++ b/components/drivers/virtio/virtio_net.h @@ -71,28 +71,42 @@ struct virtio_net_hdr rt_uint16_t gso_size; rt_uint16_t csum_start; rt_uint16_t csum_offset; - rt_uint16_t num_buffers; /* Only if VIRTIO_NET_F_MRG_RXBUF */ + rt_uint16_t num_buffers; } __attribute__ ((packed)); #define VIRTIO_NET_MSS 1514 -/* Disable VIRTIO_NET_F_MRG_RXBUF */ -#define VIRTIO_NET_HDR_SIZE (sizeof(struct virtio_net_hdr) - sizeof(rt_uint16_t)) +#define VIRTIO_NET_HDR_SIZE (sizeof(struct virtio_net_hdr)) #define VIRTIO_NET_PAYLOAD_MAX_SIZE (VIRTIO_NET_HDR_SIZE + VIRTIO_NET_MSS) +struct virtio_net_config +{ + rt_uint8_t mac[6]; + rt_uint16_t status; + rt_uint16_t max_virtqueue_pairs; + rt_uint16_t mtu; + rt_uint32_t speed; + rt_uint8_t duplex; + rt_uint8_t rss_max_key_size; + rt_uint16_t rss_max_indirection_table_length; + rt_uint32_t supported_hash_types; +} __attribute__((packed)); + struct virtio_net_device { struct eth_device parent; struct virtio_device virtio_dev; - rt_uint8_t mac[6]; + struct virtio_net_config *config; struct { /* Transmit hdr */ struct virtio_net_hdr hdr; + /* Transmit buffer */ + rt_uint8_t tx_buffer[VIRTIO_NET_PAYLOAD_MAX_SIZE]; /* Receive buffer */ - rt_uint8_t buffer[VIRTIO_NET_PAYLOAD_MAX_SIZE]; + rt_uint8_t rx_buffer[VIRTIO_NET_PAYLOAD_MAX_SIZE]; } info[VIRTIO_NET_RTX_QUEUE_SIZE]; }; diff --git a/include/rthw.h b/include/rthw.h index 6492bec2e90a93abf94af9eb672dac85be603aaf..ca2187ebf33af8a73a1072ed04c6a85de5452b05 100644 --- a/include/rthw.h +++ b/include/rthw.h @@ -27,6 +27,9 @@ extern "C" { /* * Some macros define */ +#ifndef HWREG64 +#define HWREG64(x) (*((volatile rt_uint64_t *)(x))) +#endif #ifndef HWREG32 #define HWREG32(x) (*((volatile rt_uint32_t *)(x))) #endif diff --git a/libcpu/aarch64/common/gic.c b/libcpu/aarch64/common/gic.c index 7c3150635dc54271c880d3550aa50022c3390bde..3cb9052cfc9c305256aaefe6614c530994da4ec7 100644 --- a/libcpu/aarch64/common/gic.c +++ b/libcpu/aarch64/common/gic.c @@ -14,6 +14,8 @@ #include #include +#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV2) + #include "gic.h" #include "cp15.h" @@ -514,3 +516,4 @@ long gic_dump(void) } MSH_CMD_EXPORT(gic_dump, show gic status); +#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV2) */ diff --git a/libcpu/aarch64/common/gicv3.c b/libcpu/aarch64/common/gicv3.c new file mode 100644 index 0000000000000000000000000000000000000000..eb7c8f2f6e4cb728983eb9bd5092f6e9b2e5444f --- /dev/null +++ b/libcpu/aarch64/common/gicv3.c @@ -0,0 +1,862 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2013-07-20 Bernard first version + * 2014-04-03 Grissiom many enhancements + * 2018-11-22 Jesven add rt_hw_ipi_send() + * add rt_hw_ipi_handler_install() + * 2022-03-08 GuEe-GUI add BSP bind SPI CPU self support + * add GICv3 AArch64 system register interface + * modify arm_gic_redist_init() args + * modify arm_gic_cpu_init() args + * modify arm_gic_send_affinity_sgi() args + * remove arm_gic_redist_address_set() + * remove arm_gic_cpu_interface_address_set() + * remove arm_gic_secondary_cpu_init() + * remove get_main_cpu_affval() + * remove arm_gic_cpumask_to_affval() + */ + +#include +#include + +#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) + +#include +#include + +#include + +#ifndef ARM_SPI_BIND_CPU_ID +#define ARM_SPI_BIND_CPU_ID 0 +#endif + +#ifndef RT_USING_SMP +#define RT_CPUS_NR 1 +extern int rt_hw_cpu_id(void); +#else +extern rt_uint64_t rt_cpu_mpidr_early[]; +#endif /* RT_USING_SMP */ + +struct arm_gic +{ + rt_uint64_t offset; /* the first interrupt index in the vector table */ + rt_uint64_t redist_hw_base[RT_CPUS_NR]; /* the pointer of the gic redistributor */ + rt_uint64_t dist_hw_base; /* the base address of the gic distributor */ + rt_uint64_t cpu_hw_base[RT_CPUS_NR]; /* the base address of the gic cpu interface */ +}; + +/* 'ARM_GIC_MAX_NR' is the number of cores */ +static struct arm_gic _gic_table[ARM_GIC_MAX_NR]; +static unsigned int _gic_max_irq; + +#define GET_GICV3_REG(reg, out) __asm__ volatile ("mrs %0, " reg:"=r"(out)::"memory"); +#define SET_GICV3_REG(reg, in) __asm__ volatile ("msr " reg ", %0"::"r"(in):"memory"); + +/* AArch64 System register interface to GICv3 */ +#define ICC_IAR0_EL1 "S3_0_C12_C8_0" +#define ICC_IAR1_EL1 "S3_0_C12_C12_0" +#define ICC_EOIR0_EL1 "S3_0_C12_C8_1" +#define ICC_EOIR1_EL1 "S3_0_C12_C12_1" +#define ICC_HPPIR0_EL1 "S3_0_C12_C8_2" +#define ICC_HPPIR1_EL1 "S3_0_C12_C12_2" +#define ICC_BPR0_EL1 "S3_0_C12_C8_3" +#define ICC_BPR1_EL1 "S3_0_C12_C12_3" +#define ICC_DIR_EL1 "S3_0_C12_C11_1" +#define ICC_PMR_EL1 "S3_0_C4_C6_0" +#define ICC_RPR_EL1 "S3_0_C12_C11_3" +#define ICC_CTLR_EL1 "S3_0_C12_C12_4" +#define ICC_CTLR_EL3 "S3_6_C12_C12_4" +#define ICC_SRE_EL1 "S3_0_C12_C12_5" +#define ICC_SRE_EL2 "S3_4_C12_C9_5" +#define ICC_SRE_EL3 "S3_6_C12_C12_5" +#define ICC_IGRPEN0_EL1 "S3_0_C12_C12_6" +#define ICC_IGRPEN1_EL1 "S3_0_C12_C12_7" +#define ICC_IGRPEN1_EL3 "S3_6_C12_C12_7" +#define ICC_SGI0R_EL1 "S3_0_C12_C11_7" +#define ICC_SGI1R_EL1 "S3_0_C12_C11_5" +#define ICC_ASGI1R_EL1 "S3_0_C12_C11_6" + +/* Macro to access the Distributor Control Register (GICD_CTLR) */ +#define GICD_CTLR_RWP (1 << 31) +#define GICD_CTLR_E1NWF (1 << 7) +#define GICD_CTLR_DS (1 << 6) +#define GICD_CTLR_ARE_NS (1 << 5) +#define GICD_CTLR_ARE_S (1 << 4) +#define GICD_CTLR_ENGRP1S (1 << 2) +#define GICD_CTLR_ENGRP1NS (1 << 1) +#define GICD_CTLR_ENGRP0 (1 << 0) + +/* Macro to access the Redistributor Control Register (GICR_CTLR) */ +#define GICR_CTLR_UWP (1 << 31) +#define GICR_CTLR_DPG1S (1 << 26) +#define GICR_CTLR_DPG1NS (1 << 25) +#define GICR_CTLR_DPG0 (1 << 24) +#define GICR_CTLR_RWP (1 << 3) +#define GICR_CTLR_IR (1 << 2) +#define GICR_CTLR_CES (1 << 1) +#define GICR_CTLR_EnableLPI (1 << 0) + +/* Macro to access the Generic Interrupt Controller Interface (GICC) */ +#define GIC_CPU_CTRL(hw_base) HWREG32((hw_base) + 0x00U) +#define GIC_CPU_PRIMASK(hw_base) HWREG32((hw_base) + 0x04U) +#define GIC_CPU_BINPOINT(hw_base) HWREG32((hw_base) + 0x08U) +#define GIC_CPU_INTACK(hw_base) HWREG32((hw_base) + 0x0cU) +#define GIC_CPU_EOI(hw_base) HWREG32((hw_base) + 0x10U) +#define GIC_CPU_RUNNINGPRI(hw_base) HWREG32((hw_base) + 0x14U) +#define GIC_CPU_HIGHPRI(hw_base) HWREG32((hw_base) + 0x18U) +#define GIC_CPU_IIDR(hw_base) HWREG32((hw_base) + 0xFCU) + +/* Macro to access the Generic Interrupt Controller Distributor (GICD) */ +#define GIC_DIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U) +#define GIC_DIST_TYPE(hw_base) HWREG32((hw_base) + 0x004U) +#define GIC_DIST_IGROUP(hw_base, n) HWREG32((hw_base) + 0x080U + ((n) / 32U) * 4U) +#define GIC_DIST_ENABLE_SET(hw_base, n) HWREG32((hw_base) + 0x100U + ((n) / 32U) * 4U) +#define GIC_DIST_ENABLE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x180U + ((n) / 32U) * 4U) +#define GIC_DIST_PENDING_SET(hw_base, n) HWREG32((hw_base) + 0x200U + ((n) / 32U) * 4U) +#define GIC_DIST_PENDING_CLEAR(hw_base, n) HWREG32((hw_base) + 0x280U + ((n) / 32U) * 4U) +#define GIC_DIST_ACTIVE_SET(hw_base, n) HWREG32((hw_base) + 0x300U + ((n) / 32U) * 4U) +#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x380U + ((n) / 32U) * 4U) +#define GIC_DIST_PRI(hw_base, n) HWREG32((hw_base) + 0x400U + ((n) / 4U) * 4U) +#define GIC_DIST_TARGET(hw_base, n) HWREG32((hw_base) + 0x800U + ((n) / 4U) * 4U) +#define GIC_DIST_CONFIG(hw_base, n) HWREG32((hw_base) + 0xc00U + ((n) / 16U) * 4U) +#define GIC_DIST_SOFTINT(hw_base) HWREG32((hw_base) + 0xf00U) +#define GIC_DIST_CPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf10U + ((n) / 4U) * 4U) +#define GIC_DIST_SPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf20U + ((n) / 4U) * 4U) +#define GIC_DIST_ICPIDR2(hw_base) HWREG32((hw_base) + 0xfe8U) +#define GIC_DIST_IROUTER(hw_base, n) HWREG64((hw_base) + 0x6000U + (n) * 8U) + +/* SGI base address is at 64K offset from Redistributor base address */ +#define GIC_RSGI_OFFSET 0x10000 + +/* Macro to access the Generic Interrupt Controller Redistributor (GICR) */ +#define GIC_RDIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U) +#define GIC_RDIST_IIDR(hw_base) HWREG32((hw_base) + 0x004U) +#define GIC_RDIST_TYPER(hw_base) HWREG64((hw_base) + 0x008U) +#define GIC_RDIST_TSTATUSR(hw_base) HWREG32((hw_base) + 0x010U) +#define GIC_RDIST_WAKER(hw_base) HWREG32((hw_base) + 0x014U) +#define GIC_RDIST_SETLPIR(hw_base) HWREG32((hw_base) + 0x040U) +#define GIC_RDIST_CLRLPIR(hw_base) HWREG32((hw_base) + 0x048U) +#define GIC_RDIST_PROPBASER(hw_base) HWREG32((hw_base) + 0x070U) +#define GIC_RDIST_PENDBASER(hw_base) HWREG32((hw_base) + 0x078U) +#define GIC_RDIST_INVLPIR(hw_base) HWREG32((hw_base) + 0x0A0U) +#define GIC_RDIST_INVALLR(hw_base) HWREG32((hw_base) + 0x0B0U) +#define GIC_RDIST_SYNCR(hw_base) HWREG32((hw_base) + 0x0C0U) + +#define GIC_RDISTSGI_IGROUPR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x080U + (n) * 4U) +#define GIC_RDISTSGI_ISENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x100U) +#define GIC_RDISTSGI_ICENABLER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x180U) +#define GIC_RDISTSGI_ISPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x200U) +#define GIC_RDISTSGI_ICPENDR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x280U) +#define GIC_RDISTSGI_ISACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x300U) +#define GIC_RDISTSGI_ICACTIVER0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x380U) +#define GIC_RDISTSGI_IPRIORITYR(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0x400U + ((n) / 4U) * 4U) +#define GIC_RDISTSGI_ICFGR0(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC00U) +#define GIC_RDISTSGI_ICFGR1(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xC04U) +#define GIC_RDISTSGI_IGRPMODR0(hw_base, n) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xD00U + (n) * 4) +#define GIC_RDISTSGI_NSACR(hw_base) HWREG32((hw_base) + GIC_RSGI_OFFSET + 0xE00U) + +int arm_gic_get_active_irq(rt_uint64_t index) +{ + int irq; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + GET_GICV3_REG(ICC_IAR1_EL1, irq); + + irq = (irq & 0x1ffffff) + _gic_table[index].offset; + return irq; +} + +void arm_gic_ack(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT(irq >= 0); + + __DSB(); + SET_GICV3_REG(ICC_EOIR1_EL1, irq); +} + +void arm_gic_mask(rt_uint64_t index, int irq) +{ + rt_uint64_t mask = 1 << (irq % 32); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq < 32) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + GIC_RDISTSGI_ICENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask; + } + else + { + GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +void arm_gic_umask(rt_uint64_t index, int irq) +{ + rt_uint64_t mask = 1 << (irq % 32); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq < 32) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + GIC_RDISTSGI_ISENABLER0(_gic_table[index].redist_hw_base[cpu_id]) = mask; + } + else + { + GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq) +{ + rt_uint64_t pend; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq >= 16) + { + pend = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1; + } + else + { + /* INTID 0-15 Software Generated Interrupt */ + pend = (GIC_DIST_SPENDSGI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff; + /* No CPU identification offered */ + if (pend != 0) + { + pend = 1; + } + else + { + pend = 0; + } + } + + return pend; +} + +void arm_gic_set_pending_irq(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq >= 16) + { + GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) = 1 << (irq % 32); + } + else + { + /* INTID 0-15 Software Generated Interrupt */ + /* Forward the interrupt to the CPU interface that requested it */ + GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) = (irq | 0x02000000); + } +} + +void arm_gic_clear_pending_irq(rt_uint64_t index, int irq) +{ + rt_uint64_t mask; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq >= 16) + { + mask = 1 << (irq % 32); + GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; + } + else + { + mask = 1 << ((irq % 4) * 8); + GIC_DIST_CPENDSGI(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config) +{ + rt_uint64_t icfgr; + rt_uint64_t shift; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + icfgr = GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq); + shift = (irq % 16) << 1; + + icfgr &= (~(3 << shift)); + icfgr |= (config << shift); + + GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) = icfgr; +} + +rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + return (GIC_DIST_CONFIG(_gic_table[index].dist_hw_base, irq) >> ((irq % 16) >> 1)); +} + +void arm_gic_clear_active(rt_uint64_t index, int irq) +{ + rt_uint64_t mask = 1 << (irq % 32); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; +} + +/* Set up the cpu mask for the specific interrupt */ +void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask) +{ + rt_uint64_t old_tgt; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq); + + old_tgt &= ~(0x0ff << ((irq % 4) * 8)); + old_tgt |= cpumask << ((irq % 4) * 8); + + GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt; +} + +rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + return (GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff; +} + +void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority) +{ + rt_uint64_t mask; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq < 32) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + mask = GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq); + mask &= ~(0xff << ((irq % 4) * 8)); + mask |= ((priority & 0xff) << ((irq % 4) * 8)); + GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) = mask; + } + else + { + mask = GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq); + mask &= ~(0xff << ((irq % 4) * 8)); + mask |= ((priority & 0xff) << ((irq % 4) * 8)); + GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) = mask; + } +} + +rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + if (irq < 32) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + return (GIC_RDISTSGI_IPRIORITYR(_gic_table[index].redist_hw_base[cpu_id], irq) >> ((irq % 4) * 8)) & 0xff; + } + else + { + return (GIC_DIST_PRI(_gic_table[index].dist_hw_base, irq) >> ((irq % 4) * 8)) & 0xff; + } +} + +void arm_gic_set_system_register_enable_mask(rt_uint64_t index, rt_uint64_t value) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + value &= 0xff; + /* set priority mask */ + SET_GICV3_REG(ICC_SRE_EL1, value); + __ISB(); +} + +rt_uint64_t arm_gic_get_system_register_enable_mask(rt_uint64_t index) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + rt_uint64_t value; + + GET_GICV3_REG(ICC_SRE_EL1, value); + return value; +} + +void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + priority &= 0xff; + /* set priority mask */ + SET_GICV3_REG(ICC_PMR_EL1, priority); +} + +rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + rt_uint64_t priority; + + GET_GICV3_REG(ICC_PMR_EL1, priority); + return priority; +} + +void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point) +{ + index = index; + binary_point &= 0x7; + + SET_GICV3_REG(ICC_BPR1_EL1, binary_point); +} + +rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index) +{ + rt_uint64_t binary_point; + + index = index; + GET_GICV3_REG(ICC_BPR1_EL1, binary_point); + return binary_point; +} + +rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq) +{ + rt_uint64_t pending, active; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + active = (GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1; + pending = (GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1; + + return ((active << 1) | pending); +} + +#ifdef RT_USING_SMP +void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode) +{ + const int cpu_mask_cpu_max_nr = sizeof(cpu_masks[0]) * 8; + rt_uint64_t int_id = (irq & 0xf) << 24; + rt_uint64_t irm = routing_mode << 40; /* Interrupt Routing Mode */ + + if (routing_mode == GICV3_ROUTED_TO_SPEC) + { + int cpu_id, cpu_mask_bit, i, cpu_masks_nr = RT_CPUS_NR / cpu_mask_cpu_max_nr; + rt_uint16_t target_list; + rt_uint64_t rs = 0; /* Range Selector */ + rt_uint64_t affinity_val, next_affinity_val; + + if (cpu_masks_nr * cpu_mask_cpu_max_nr != RT_CPUS_NR) + { + ++cpu_masks_nr; + } + + for (i = cpu_id = 0; i < cpu_masks_nr;) + { + /* No cpu in this mask */ + if (cpu_masks[i] == 0) + { + ++i; + cpu_id += cpu_mask_cpu_max_nr; + continue; + } + + /* Get last cpu affinity value */ + affinity_val = rt_cpu_mpidr_early[cpu_id] & 0xff00ffff00ULL; + + /* Read 16 cpus information */ + for (cpu_mask_bit = 0; cpu_mask_bit < 16; ++cpu_mask_bit, ++cpu_id) + { + /* MPIDR_EL1: aff3[39:32], aff2[23:16], aff1[15:8] */ + next_affinity_val = rt_cpu_mpidr_early[cpu_id] & 0xff00ffff00ULL; + + /* Affinity value is different, read end */ + if (affinity_val != next_affinity_val) + { + break; + } + } + + /* Get all valid cpu mask */ + target_list = (0xffff >> (16 - cpu_mask_bit)) & cpu_masks[i]; + /* Clear read mask */ + cpu_masks[i] >>= cpu_mask_bit; + /* ICC_SGI1R_EL1: aff3[55:48], aff2[39:32], aff1[23:16] */ + affinity_val <<= 8; + + __DSB(); + /* Interrupts routed to the PEs specified by Aff3.Aff2.Aff1.. */ + SET_GICV3_REG(ICC_SGI1R_EL1, affinity_val | (rs << 44) | irm | int_id | target_list); + __ISB(); + + /* Check if reset the range selector */ + rs = affinity_val != next_affinity_val ? 0 : rs + 1; + } + } + else + { + __DSB(); + /* Interrupts routed to all PEs in the system, excluding "self". */ + SET_GICV3_REG(ICC_SGI1R_EL1, irm | int_id); + __ISB(); + } +} +#endif /* RT_USING_SMP */ + +rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index) +{ + rt_uint64_t irq; + RT_ASSERT(index < ARM_GIC_MAX_NR); + + index = index; + GET_GICV3_REG(ICC_HPPIR1_EL1, irq); + + return irq; +} + +rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index) +{ + rt_uint64_t ret = 0; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + if (_gic_table[index].cpu_hw_base != RT_NULL) + { + ret = GIC_CPU_IIDR(_gic_table[index].cpu_hw_base); + } + + return ret; +} + +void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group) +{ + rt_uint64_t igroupr; + rt_uint64_t shift; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + RT_ASSERT(group <= 1); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + igroupr = GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq); + shift = (irq % 32); + igroupr &= (~(1U << shift)); + igroupr |= ((group & 0x1U) << shift); + + GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) = igroupr; +} + +rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq) +{ + RT_ASSERT(index < ARM_GIC_MAX_NR); + + irq = irq - _gic_table[index].offset; + RT_ASSERT(irq >= 0); + + return (GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, irq) >> (irq % 32)) & 0x1UL; +} + +static int arm_gicv3_wait_rwp(rt_uint64_t index, rt_uint64_t irq) +{ + rt_uint64_t rwp_bit; + rt_uint64_t base; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + if (irq < 32) + { + rt_int32_t cpu_id = rt_hw_cpu_id(); + + base = _gic_table[index].redist_hw_base[cpu_id]; + rwp_bit = GICR_CTLR_RWP; + } + else + { + base = _gic_table[index].dist_hw_base; + rwp_bit = GICD_CTLR_RWP; + } + + while (HWREG32(base) & rwp_bit) + { + } + + return 0; +} + +int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start) +{ + int i; + unsigned int gic_type; + rt_uint64_t main_cpu_affinity_val; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + _gic_table[index].dist_hw_base = dist_base; + _gic_table[index].offset = irq_start; + + + /* Find out how many interrupts are supported. */ + gic_type = GIC_DIST_TYPE(dist_base); + _gic_max_irq = ((gic_type & 0x1f) + 1) * 32; + + /* + * The GIC only supports up to 1020 interrupt sources. + * Limit this to either the architected maximum, or the + * platform maximum. + */ + if (_gic_max_irq > 1020) + { + _gic_max_irq = 1020; + } + if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */ + { + _gic_max_irq = ARM_GIC_NR_IRQS; + } + + GIC_DIST_CTRL(dist_base) = 0; + /* Wait for register write pending */ + arm_gicv3_wait_rwp(0, 32); + + /* Set all global interrupts to be level triggered, active low. */ + for (i = 32; i < _gic_max_irq; i += 16) + { + GIC_DIST_CONFIG(dist_base, i) = 0; + } + + arm_gicv3_wait_rwp(0, 32); + +#ifdef RT_USING_SMP + main_cpu_affinity_val = rt_cpu_mpidr_early[ARM_SPI_BIND_CPU_ID]; +#else + __asm__ volatile ("mrs %0, mpidr_el1":"=r"(main_cpu_affinity_val)); +#endif + + /* aff3[39:32], aff2[23:16], aff1[15:8], aff0[7:0] */ + main_cpu_affinity_val &= 0xff00ffffffULL; + + /* Set all global interrupts to this CPU only. */ + for (i = 32; i < _gic_max_irq; i++) + { + GIC_DIST_IROUTER(dist_base, i) = main_cpu_affinity_val | (GICV3_ROUTED_TO_SPEC << 31); + } + + arm_gicv3_wait_rwp(0, 32); + + /* Set priority on spi interrupts. */ + for (i = 32; i < _gic_max_irq; i += 4) + { + GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0; + } + + arm_gicv3_wait_rwp(0, 32); + /* Disable all interrupts. */ + for (i = 0; i < _gic_max_irq; i += 32) + { + GIC_DIST_PENDING_CLEAR(dist_base, i) = 0xffffffff; + GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff; + } + + arm_gicv3_wait_rwp(0, 32); + /* All interrupts defaults to IGROUP1(IRQ). */ + for (i = 0; i < _gic_max_irq; i += 32) + { + GIC_DIST_IGROUP(dist_base, i) = 0xffffffff; + } + + arm_gicv3_wait_rwp(0, 32); + + /* + * The Distributor control register (GICD_CTLR) must be configured to enable the interrupt groups and to set the routing mode. + * Enable Affinity routing (ARE bits) The ARE bits in GICD_CTLR control whether affinity routing is enabled. + * If affinity routing is not enabled, GICv3 can be configured for legacy operation. + * Whether affinity routing is enabled or not can be controlled separately for Secure and Non-secure state. + * Enables GICD_CTLR contains separate enable bits for Group 0, Secure Group 1 and Non-secure Group 1: + * GICD_CTLR.EnableGrp1S enables distribution of Secure Group 1 interrupts. + * GICD_CTLR.EnableGrp1NS enables distribution of Non-secure Group 1 interrupts. + * GICD_CTLR.EnableGrp0 enables distribution of Group 0 interrupts. + */ + GIC_DIST_CTRL(dist_base) = GICD_CTLR_ARE_NS | GICD_CTLR_ENGRP1NS; + + return 0; +} + +int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base) +{ + int i; + int cpu_id = rt_hw_cpu_id(); + static int master_cpu_id = -1; + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + if (master_cpu_id < 0) + { + master_cpu_id = cpu_id; + rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, &master_cpu_id, sizeof(master_cpu_id)); + } + + if (!_gic_table[index].redist_hw_base[master_cpu_id]) + { + _gic_table[index].redist_hw_base[master_cpu_id] = redist_base; + } + redist_base = _gic_table[index].redist_hw_base[master_cpu_id]; + + redist_base += cpu_id * (2 << 16); + _gic_table[index].redist_hw_base[cpu_id] = redist_base; + + /* redistributor enable */ + GIC_RDIST_WAKER(redist_base) &= ~(1 << 1); + while (GIC_RDIST_WAKER(redist_base) & (1 << 2)) + { + } + + /* Disable all sgi and ppi interrupt */ + GIC_RDISTSGI_ICENABLER0(redist_base) = 0xffffffff; + arm_gicv3_wait_rwp(0, 0); + + /* Clear all inetrrupt pending */ + GIC_RDISTSGI_ICPENDR0(redist_base) = 0xffffffff; + + /* the corresponding interrupt is Group 1 or Non-secure Group 1. */ + GIC_RDISTSGI_IGROUPR0(redist_base, 0) = 0xffffffff; + GIC_RDISTSGI_IGRPMODR0(redist_base, 0) = 0xffffffff; + + /* Configure default priorities for SGI 0:15 and PPI 16:31. */ + for (i = 0; i < 32; i += 4) + { + GIC_RDISTSGI_IPRIORITYR(redist_base, i) = 0xa0a0a0a0U; + } + + /* Trigger level for PPI interrupts*/ + GIC_RDISTSGI_ICFGR1(redist_base) = 0; + return 0; +} + +int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base) +{ + rt_uint64_t value; + int cpu_id = rt_hw_cpu_id(); + + RT_ASSERT(index < ARM_GIC_MAX_NR); + + _gic_table[index].cpu_hw_base[cpu_id] = cpu_base; + + value = arm_gic_get_system_register_enable_mask(index); + value |= (1 << 0); + arm_gic_set_system_register_enable_mask(index, value); + SET_GICV3_REG(ICC_CTLR_EL1, 0); + + arm_gic_set_interface_prior_mask(index, 0xff); + + /* Enable group1 interrupt */ + value = 1; + SET_GICV3_REG(ICC_IGRPEN1_EL1, value); + + arm_gic_set_binary_point(0, 0); + + /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts. */ + value = 1; /* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1 interrupts.*/ + value |= 1 << 18; /* Targeted SGIs with affinity level 0 values of 0 - 255 are supported. */ + SET_GICV3_REG(ICC_CTLR_EL1, value); + + return 0; +} + +void arm_gic_dump_type(rt_uint64_t index) +{ + unsigned int gic_type; + + gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base); + rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n", + (GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4) & 0xf, + _gic_table[index].dist_hw_base, + _gic_max_irq, + gic_type & (1 << 10) ? "has" : "no", + gic_type); +} + +void arm_gic_dump(rt_uint64_t index) +{ + int i; + unsigned int val; + + val = arm_gic_get_high_pending_irq(0); + rt_kprintf("--- high pending priority: %d(%08x)\n", val, val); + + rt_kprintf("--- hw mask ---\n"); + for (i = 0; i < _gic_max_irq / 32; ++i) + { + rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32)); + } + + rt_kprintf("\b\b\n--- hw pending ---\n"); + for (i = 0; i < _gic_max_irq / 32; ++i) + { + rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32)); + } + + rt_kprintf("\b\b\n--- hw active ---\n"); + for (i = 0; i < _gic_max_irq / 32; ++i) + { + rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32)); + } + + rt_kprintf("\b\b\n"); +} + +long gic_dump(void) +{ + arm_gic_dump_type(0); + arm_gic_dump(0); + + return 0; +} +MSH_CMD_EXPORT(gic_dump, show gic status); + +#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */ diff --git a/libcpu/aarch64/common/gicv3.h b/libcpu/aarch64/common/gicv3.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc7720b3769a63a499a718c29b7c927df5cffca --- /dev/null +++ b/libcpu/aarch64/common/gicv3.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2006-2022, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2013-07-20 Bernard first version + * 2014-04-03 Grissiom many enhancements + * 2018-11-22 Jesven add rt_hw_ipi_send() + * add rt_hw_ipi_handler_install() + */ + +#ifndef __GICV3_H__ +#define __GICV3_H__ + +#include + +#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) + +#define GICV3_ROUTED_TO_ALL 1UL +#define GICV3_ROUTED_TO_SPEC 0UL + +int arm_gic_get_active_irq(rt_uint64_t index); +void arm_gic_ack(rt_uint64_t index, int irq); + +void arm_gic_mask(rt_uint64_t index, int irq); +void arm_gic_umask(rt_uint64_t index, int irq); + +rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq); +void arm_gic_set_pending_irq(rt_uint64_t index, int irq); +void arm_gic_clear_pending_irq(rt_uint64_t index, int irq); + +void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config); +rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq); + +void arm_gic_clear_active(rt_uint64_t index, int irq); + +void arm_gic_set_cpu(rt_uint64_t index, int irq, unsigned int cpumask); +rt_uint64_t arm_gic_get_target_cpu(rt_uint64_t index, int irq); + +void arm_gic_set_priority(rt_uint64_t index, int irq, rt_uint64_t priority); +rt_uint64_t arm_gic_get_priority(rt_uint64_t index, int irq); + +void arm_gic_set_interface_prior_mask(rt_uint64_t index, rt_uint64_t priority); +rt_uint64_t arm_gic_get_interface_prior_mask(rt_uint64_t index); + +void arm_gic_set_binary_point(rt_uint64_t index, rt_uint64_t binary_point); +rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index); + +rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq); + +#ifdef RT_USING_SMP +void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode); +#endif + +rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index); + +rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index); + +void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group); +rt_uint64_t arm_gic_get_group(rt_uint64_t index, int irq); + +int arm_gic_redist_address_set(rt_uint64_t index, rt_uint64_t redist_addr, int cpu_id); +int arm_gic_cpu_interface_address_set(rt_uint64_t index, rt_uint64_t interface_addr, int cpu_id); + +int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start); +int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base); +int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base); + +void arm_gic_dump_type(rt_uint64_t index); +void arm_gic_dump(rt_uint64_t index); + +#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV3) */ + +#endif + diff --git a/libcpu/aarch64/common/interrupt.c b/libcpu/aarch64/common/interrupt.c index 71f48db366ef867148938a72118e4e33403424dd..0d6b24341df5edc43715b8fb1402f1ab46ccaf17 100644 --- a/libcpu/aarch64/common/interrupt.c +++ b/libcpu/aarch64/common/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2018, RT-Thread Development Team + * Copyright (c) 2006-2022, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * @@ -13,6 +13,7 @@ #include #include "interrupt.h" #include "gic.h" +#include "gicv3.h" /* exception and interrupt handler table */ struct rt_irq_desc isr_table[MAX_HANDLERS]; @@ -24,6 +25,10 @@ rt_ubase_t rt_interrupt_to_thread = 0; rt_ubase_t rt_thread_switch_interrupt_flag = 0; #endif +#ifndef RT_CPUS_NR +#define RT_CPUS_NR 1 +#endif + const unsigned int VECTOR_BASE = 0x00; extern void rt_cpu_vector_set_base(void *addr); extern void *system_vectors; @@ -85,6 +90,9 @@ void rt_hw_interrupt_init(void) #else rt_uint64_t gic_cpu_base; rt_uint64_t gic_dist_base; +#ifdef BSP_USING_GICV3 + rt_uint64_t gic_rdist_base; +#endif rt_uint64_t gic_irq_start; /* initialize vector table */ @@ -97,15 +105,23 @@ void rt_hw_interrupt_init(void) #ifdef RT_USING_USERSPACE gic_dist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_DEVICE); gic_cpu_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_DEVICE); +#ifdef BSP_USING_GICV3 + gic_rdist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_redist_base(), + RT_CPUS_NR * (2 << 16), MMU_MAP_K_DEVICE); +#endif #else gic_dist_base = platform_get_gic_dist_base(); gic_cpu_base = platform_get_gic_cpu_base(); + gic_rdist_base = platform_get_gic_redist_base(); #endif gic_irq_start = GIC_IRQ_START; arm_gic_dist_init(0, gic_dist_base, gic_irq_start); arm_gic_cpu_init(0, gic_cpu_base); +#ifdef BSP_USING_GICV3 + arm_gic_redist_init(0, gic_rdist_base); +#endif #endif } @@ -359,7 +375,11 @@ rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler, #ifdef RT_USING_SMP void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask) { +#ifdef BSP_USING_GICV2 arm_gic_send_sgi(0, ipi_vector, cpu_mask, 0); +#elif defined(BSP_USING_GICV3) + arm_gic_send_affinity_sgi(0, ipi_vector, (unsigned int *)&cpu_mask, GICV3_ROUTED_TO_SPEC); +#endif } void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler) @@ -368,4 +388,3 @@ void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler) rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER"); } #endif -