diff --git a/include/bao.h b/include/bao.h new file mode 100644 index 0000000..8419ba8 --- /dev/null +++ b/include/bao.h @@ -0,0 +1,466 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Provides some definitions for the Bao Hypervisor modules + * + * Copyright (c) Bao Project and Contributors. All rights reserved. + * + * Authors: + * João Peixoto + * José Martins + * David Cerdeira + */ + +#ifndef __BAO_DRV_H +#define __BAO_DRV_H + +#include +#include + +#define BAO_NAME_MAX_LEN 16 +#define BAO_IO_MAX_DMS 16 + +#define BAO_IOEVENTFD_FLAG_DATAMATCH BIT(1) +#define BAO_IOEVENTFD_FLAG_DEASSIGN BIT(2) +#define BAO_IRQFD_FLAG_DEASSIGN 1U +#define BAO_IO_CLIENT_DESTROYING 0U + +/* IPC through shared-memory hypercall ID */ +#define BAO_IPCSHMEM_HYPERCALL_ID 0x1 + +/* Remote I/O Hypercall ID */ +#define BAO_REMIO_HYPERCALL_ID 0x2 + +/** + * struct bao_virtio_request - Parameters of a Bao VirtIO request + * @dm_id: Device model ID + * @addr: MMIO register address accessed + * @op: Operation type (WRITE = 0, READ, ASK, NOTIFY) + * @value: Value to write or read + * @access_width: Access width (VirtIO MMIO supports 4-byte aligned accesses) + * @request_id: Request ID of the I/O request + */ +struct bao_virtio_request { + __u64 dm_id; + __u64 addr; + __u64 op; + __u64 value; + __u64 access_width; + __u64 request_id; +}; + +/** + * struct bao_ioeventfd - Parameters of an ioeventfd request + * @fd: Eventfd file descriptor associated with the I/O request + * @flags: Logical OR of BAO_IOEVENTFD_FLAG_* + * @addr: Start address of the I/O range + * @len: Length of the I/O range + * @reserved: Reserved, must be 0 + * @data: Data for matching (used if data matching is enabled) + */ +struct bao_ioeventfd { + __u32 fd; + __u32 flags; + __u64 addr; + __u32 len; + __u32 reserved; + __u64 data; +}; + +/** + * struct bao_irqfd - Parameters of an IRQFD request + * @fd: File descriptor of the eventfd + * @flags: Flags associated with the eventfd + */ +struct bao_irqfd { + __s32 fd; + __u32 flags; +}; + +/** + * struct bao_dm_info - Parameters of a Bao device model + * @id: Virtual ID of the DM + * @shmem_addr: Base address of the shared memory + * @shmem_size: Size of the shared memory + * @irq: IRQ number + * @fd: File descriptor of the DM + */ +struct bao_dm_info { + __u32 id; + __u64 shmem_addr; + __u64 shmem_size; + __u32 irq; + __s32 fd; +}; + +/* + * The ioctl type for Bao, documented in + * Documentation/userspace-api/ioctl/ioctl-number.rst + */ +#define BAO_IOCTL_TYPE 0xA6 + +/* + * Bao userspace IOCTL commands + * Follows Linux kernel convention, see Documentation/driver-api/ioctl.rst + */ +#define BAO_IOCTL_DM_GET_INFO _IOWR(BAO_IOCTL_TYPE, 0x01, struct bao_dm_info) +#define BAO_IOCTL_IO_CLIENT_ATTACH \ + _IOWR(BAO_IOCTL_TYPE, 0x02, struct bao_virtio_request) +#define BAO_IOCTL_IO_REQUEST_COMPLETE \ + _IOW(BAO_IOCTL_TYPE, 0x03, struct bao_virtio_request) +#define BAO_IOCTL_IOEVENTFD _IOW(BAO_IOCTL_TYPE, 0x04, struct bao_ioeventfd) +#define BAO_IOCTL_IRQFD _IOW(BAO_IOCTL_TYPE, 0x05, struct bao_irqfd) + +/** + * struct bao_remio_hypercall_ctx - REMIO hypercall context + * @dm_id: Device model identifier + * @addr: Target address + * @op: Operation code + * @value: Value to read/write + * @access_width: Access width in bytes + * @request_id: Request identifier + * @npend_req: Number of pending requests + */ +struct bao_remio_hypercall_ctx { + u64 dm_id; + u64 addr; + u64 op; + u64 value; + u64 access_width; + u64 request_id; + u64 npend_req; +}; + +struct bao_dm; +struct bao_io_client; + +typedef int (*bao_io_client_handler_t)(struct bao_io_client *client, + struct bao_virtio_request *req); + +/** + * enum bao_io_op - Bao hypervisor I/O operation types + * @BAO_IO_WRITE: Write operation + * @BAO_IO_READ: Read operation + * @BAO_IO_ASK: Request operation information (e.g., MMIO address) + * @BAO_IO_NOTIFY: Notify I/O completion + */ +enum bao_io_op { + BAO_IO_WRITE = 0, + BAO_IO_READ, + BAO_IO_ASK, + BAO_IO_NOTIFY, +}; + +/** + * struct bao_io_client - Bao I/O client + * @name: Client name + * @dm: The DM that the client belongs to + * @list: List node for this bao_io_client + * @is_control: If this client is the control client + * @flags: Flags (BAO_IO_CLIENT_*) + * @virtio_requests: List of free I/O requests + * @range_list: I/O ranges + * @handler: I/O request handler for this client + * @thread: Kernel thread executing the handler + * @wq: Wait queue used for thread parking + * @priv: Private data for the handler + */ +struct bao_io_client { + char name[BAO_NAME_MAX_LEN]; + struct bao_dm *dm; + struct list_head list; + bool is_control; + unsigned long flags; + struct list_head virtio_requests; + + /* protects virtio_requests list */ + struct mutex virtio_requests_lock; + + struct list_head range_list; + + /* protects range_list */ + struct rw_semaphore range_lock; + + bao_io_client_handler_t handler; + struct task_struct *thread; + wait_queue_head_t wq; + void *priv; +}; + +/** + * struct bao_dm - Bao backend device model (DM) + * @list: Entry within global list of all DMs + * @info: DM information (id, shmem_addr, shmem_size, irq, fd) + * @shmem_base_addr: The base address of the shared memory + * @ioeventfds: List of all ioeventfds + * @ioeventfd_client: Ioeventfd client + * @irqfds: List of all irqfds + * @irqfd_server: Workqueue responsible for irqfd handling + * @io_clients: List of all bao_io_client + * @control_client: Control client + * @refcount: Each open file holds a reference to the DM + */ +struct bao_dm { + struct list_head list; + struct bao_dm_info info; + void *shmem_base_addr; + + struct list_head ioeventfds; + + /* protects ioeventfds list */ + struct mutex ioeventfds_lock; + + struct bao_io_client *ioeventfd_client; + + struct list_head irqfds; + + /* protects irqfds list */ + struct mutex irqfds_lock; + + struct workqueue_struct *irqfd_server; + + /* protects io_clients list */ + struct rw_semaphore io_clients_lock; + + struct list_head io_clients; + struct bao_io_client *control_client; + + refcount_t refcount; +}; + +/** + * struct bao_io_range - Represents a range of I/O addresses + * @list: List node for linking multiple ranges + * @start: Start address of the range + * @end: End address of the range (inclusive) + */ +struct bao_io_range { + struct list_head list; + u64 start; + u64 end; +}; + +/* Global list of all Bao device models */ +extern struct list_head bao_dm_list; + +/* Lock protecting access to bao_dm_list */ +extern rwlock_t bao_dm_list_lock; + +/** + * bao_dm_create - Create a backend device model (DM) + * @info: DM information (id, shmem_addr, shmem_size, irq, fd) + * + * Return: Pointer to the created DM on success, NULL on error. + */ +struct bao_dm *bao_dm_create(struct bao_dm_info *info); + +/** + * bao_dm_destroy - Destroy a backend device model (DM) + * @dm: DM to be destroyed + */ +void bao_dm_destroy(struct bao_dm *dm); + +/** + * bao_dm_get_info - Retrieve information of a DM + * @info: Structure to be filled; id field must contain the DM ID + * + * Return: True on success, false on error. + */ +bool bao_dm_get_info(struct bao_dm_info *info); + +/** + * bao_io_client_create - Create a backend I/O client + * @dm: DM this client belongs to + * @handler: I/O client handler for requests + * @data: Private data passed to the handler + * @is_control: True if this is the control client + * @name: Name of the I/O client + * + * Return: Pointer to the created I/O client, NULL on failure. + */ +struct bao_io_client *bao_io_client_create(struct bao_dm *dm, + bao_io_client_handler_t handler, + void *data, bool is_control, + const char *name); + +/** + * bao_io_clients_destroy - Destroy all I/O clients of a DM + * @dm: DM whose I/O clients are to be destroyed + */ +void bao_io_clients_destroy(struct bao_dm *dm); + +/** + * bao_io_client_attach - Attach a thread to an I/O client + * @client: I/O client to attach + * + * The thread will wait for I/O requests on this client. + * + * Return: 0 on success, negative error code on failure. + */ +int bao_io_client_attach(struct bao_io_client *client); + +/** + * bao_io_client_range_add - Add an I/O range to monitor in a client + * @client: I/O client + * @start: Start address of the range + * @end: End address of the range (inclusive) + * + * Return: 0 on success, negative error code on failure. + */ +int bao_io_client_range_add(struct bao_io_client *client, u64 start, u64 end); + +/** + * bao_io_client_range_del - Remove an I/O range from a client + * @client: I/O client + * @start: Start address of the range + * @end: End address of the range (inclusive) + */ +void bao_io_client_range_del(struct bao_io_client *client, u64 start, u64 end); + +/** + * bao_io_client_request - Retrieve the oldest I/O request from a client + * @client: I/O client + * @req: Pointer to virtio request structure to fill + * + * Return: 0 on success, negative error code if no request is available. + */ +int bao_io_client_request(struct bao_io_client *client, + struct bao_virtio_request *req); + +/** + * bao_io_client_push_request - Push an I/O request into a client + * @client: I/O client + * @req: I/O request to push + * + * Return: True if a request was pushed, false otherwise. + */ +bool bao_io_client_push_request(struct bao_io_client *client, + struct bao_virtio_request *req); + +/** + * bao_io_client_pop_request - Pop the oldest I/O request from a client + * @client: I/O client + * @req: Buffer to store the popped request + * + * Return: True if a request was popped, false if the list was empty. + */ +bool bao_io_client_pop_request(struct bao_io_client *client, + struct bao_virtio_request *req); + +/** + * bao_io_client_find - Find the I/O client for a given request + * @dm: DM that the I/O request belongs to + * @req: I/O request to locate + * + * Return: Pointer to the I/O client handling the request, NULL if none found. + */ +struct bao_io_client *bao_io_client_find(struct bao_dm *dm, + struct bao_virtio_request *req); + +/** + * bao_ioeventfd_client_init - Initialize the Ioeventfd client for a DM + * @dm: DM that the Ioeventfd client belongs to + * + * Return: 0 on success, negative error code on failure. + */ +int bao_ioeventfd_client_init(struct bao_dm *dm); + +/** + * bao_ioeventfd_client_destroy - Destroy the Ioeventfd client for a DM + * @dm: DM that the Ioeventfd client belongs to + */ +void bao_ioeventfd_client_destroy(struct bao_dm *dm); + +/** + * bao_ioeventfd_client_config - Configure an Ioeventfd client + * @dm: DM that the Ioeventfd client belongs to + * @config: Ioeventfd configuration to apply + * + * Return: 0 on success, negative error code on failure. + */ +int bao_ioeventfd_client_config(struct bao_dm *dm, + struct bao_ioeventfd *config); + +/** + * bao_irqfd_server_init - Initialize the Irqfd server for a DM + * @dm: DM that the Irqfd server belongs to + * + * Return: 0 on success, negative error code on failure. + */ +int bao_irqfd_server_init(struct bao_dm *dm); + +/** + * bao_irqfd_server_destroy - Destroy the Irqfd server for a DM + * @dm: DM that the Irqfd server belongs to + */ +void bao_irqfd_server_destroy(struct bao_dm *dm); + +/** + * bao_irqfd_server_config - Configure an Irqfd server + * @dm: DM that the Irqfd server belongs to + * @config: Irqfd configuration to apply + * + * Return: 0 on success, negative error code on failure. + */ +int bao_irqfd_server_config(struct bao_dm *dm, struct bao_irqfd *config); + +/** + * bao_io_dispatcher_init - Initialize the I/O Dispatcher for a DM + * @dm: DM to initialize on the I/O Dispatcher + * + * Return: 0 on success, negative error code on failure. + */ +int bao_io_dispatcher_init(struct bao_dm *dm); + +/** + * bao_io_dispatcher_destroy - Destroy the I/O Dispatcher for a DM + * @dm: DM to destroy on the I/O Dispatcher + */ +void bao_io_dispatcher_destroy(struct bao_dm *dm); + +/** + * bao_dispatch_io - Acquire and dispatch I/O requests from the Bao Hypervisor + * @dm: DM whose I/O clients will handle the requests + * + * Return: 0 on success, negative error code on failure. + */ +int bao_dispatch_io(struct bao_dm *dm); + +/** + * bao_io_dispatcher_pause - Pause the I/O Dispatcher for a DM + * @dm: DM to pause + */ +void bao_io_dispatcher_pause(struct bao_dm *dm); + +/** + * bao_io_dispatcher_resume - Resume the I/O Dispatcher for a DM + * @dm: DM to resume + */ +void bao_io_dispatcher_resume(struct bao_dm *dm); + +/** + * bao_intc_init - Register the interrupt controller for a DM + * @dm: DM that the interrupt controller belongs to + * + * Return: 0 on success, negative error code on failure. + */ +int bao_intc_init(struct bao_dm *dm); + +/** + * bao_intc_destroy - Unregister the interrupt controller for a DM + * @dm: DM that the interrupt controller belongs to + */ +void bao_intc_destroy(struct bao_dm *dm); + +/** + * bao_intc_setup_handler - Setup the interrupt controller handler + * @handler: Function pointer to the interrupt handler + * @dm: DM that the interrupt controller belongs to + */ +void bao_intc_setup_handler(void (*handler)(struct bao_dm *dm)); + +/** + * bao_intc_remove_handler - Remove the interrupt controller handler + */ +void bao_intc_remove_handler(void); + +#endif /* __BAO_DRV_H */ diff --git a/include/hypercall.h b/include/hypercall.h new file mode 100644 index 0000000..7701ee7 --- /dev/null +++ b/include/hypercall.h @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hypercall API for Bao Hypervisor + * + * Copyright (c) Bao Project and Contributors. All rights reserved. + * + * Authors: + * João Peixoto + */ + +#ifndef __BAO_HYPERCALL_H +#define __BAO_HYPERCALL_H + +#include "bao.h" + +#if defined(CONFIG_ARM64) || defined(CONFIG_ARM) +#include +#elif CONFIG_RISCV +#include +#endif + +/* Remote I/O Hypercall ID */ +#define REMIO_HC_ID 0x2 + +#if defined(CONFIG_ARM64) +static inline unsigned long bao_ipcshmem_hypercall(unsigned long ipcshmem_id) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, + ARM_SMCCC_OWNER_VENDOR_HYP, + BAO_IPCSHMEM_HYPERCALL_ID), + ipcshmem_id, 0, 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static inline unsigned long +bao_remio_hypercall(struct bao_remio_hypercall_ctx *ctx) +{ + register int x0 asm("x0") = + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, + ARM_SMCCC_OWNER_VENDOR_HYP, BAO_REMIO_HYPERCALL_ID); + register u64 x1 asm("x1") = ctx->dm_id; + register u64 x2 asm("x2") = ctx->addr; + register u64 x3 asm("x3") = ctx->op; + register u64 x4 asm("x4") = ctx->value; + register u64 x5 asm("x5") = ctx->request_id; + register u64 x6 asm("x6") = 0; + + asm volatile("hvc 0\n\t" + : "=r"(x0), "=r"(x1), "=r"(x2), "=r"(x3), "=r"(x4), + "=r"(x5), "=r"(x6) + : "r"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5) + : "memory"); + + ctx->addr = x1; + ctx->op = x2; + ctx->value = x3; + ctx->access_width = x4; + ctx->request_id = x5; + ctx->npend_req = x6; + + return x0; +} +#elif defined(CONFIG_ARM) +static inline unsigned long bao_ipcshmem_hypercall(unsigned long ipcshmem_id) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, + ARM_SMCCC_OWNER_VENDOR_HYP, + BAO_IPCSHMEM_HYPERCALL_ID), + ipcshmem_id, 0, 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static inline unsigned long +bao_remio_hypercall(struct bao_remio_hypercall_ctx *ctx) +{ + register int r0 asm("r0") = + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, + ARM_SMCCC_OWNER_VENDOR_HYP, BAO_REMIO_HYPERCALL_ID); + register u32 r1 asm("r1") = ctx->dm_id; + register u32 r2 asm("r2") = ctx->addr; + register u32 r3 asm("r3") = ctx->op; + register u32 r4 asm("r4") = ctx->value; + register u32 r5 asm("r5") = ctx->request_id; + register u32 r6 asm("r6") = 0; + + asm volatile("hvc 0\n\t" + : "=r"(r0), "=r"(r1), "=r"(r2), "=r"(r3), "=r"(r4), + "=r"(r5), "=r"(r6) + : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5) + : "memory"); + + ctx->addr = r1; + ctx->op = r2; + ctx->value = r3; + ctx->access_width = r4; + ctx->request_id = r5; + ctx->npend_req = r6; + + return r0; +} +#elif defined(CONFIG_RISCV) +#define BAO_SBI_EXT_ID 0x08000ba0 + +static inline unsigned long bao_ipcshmem_hypercall(unsigned long ipcshmem_id) +{ + struct sbiret ret; + + ret = sbi_ecall(BAO_SBI_EXT_ID, BAO_IPCSHMEM_HYPERCALL_ID, ipcshmem_id, + 0, 0, 0, 0, 0); + + return ret.error; +} + +static inline unsigned long +bao_remio_hypercall(struct bao_remio_hypercall_ctx *ctx) +{ + register uintptr_t a0 asm("a0") = (uintptr_t)(ctx->dm_id); + register uintptr_t a1 asm("a1") = (uintptr_t)(ctx->addr); + register uintptr_t a2 asm("a2") = (uintptr_t)(ctx->op); + register uintptr_t a3 asm("a3") = (uintptr_t)(ctx->value); + register uintptr_t a4 asm("a4") = (uintptr_t)(ctx->request_id); + register uintptr_t a5 asm("a5") = (uintptr_t)(0); + register uintptr_t a6 asm("a6") = (uintptr_t)(BAO_REMIO_HYPERCALL_ID); + register uintptr_t a7 asm("a7") = (uintptr_t)(0x08000ba0); + + asm volatile("ecall" + : "+r"(a0), "+r"(a1), "+r"(a2), "+r"(a3), "+r"(a4), + "+r"(a5), "+r"(a6), "+r"(a7) + : + : "memory"); + + ctx->addr = a2; + ctx->op = a3; + ctx->value = a4; + ctx->access_width = a5; + ctx->request_id = a6; + ctx->npend_req = a7; + + return a0; +} +#endif + +#endif /* __BAO_HYPERCALL_H */ diff --git a/iodispatcher/Makefile b/iodispatcher/Makefile index 91aab90..ae78e93 100644 --- a/iodispatcher/Makefile +++ b/iodispatcher/Makefile @@ -1,3 +1,4 @@ # Object files and module definition obj-m += bao-iodispatcher.o -iodispatcher-y := dm.o driver.o intc.o io_client.o io_dispatcher.o ioctls.o ioeventfd.o irqfd.o +bao-iodispatcher-y := dm.o driver.o intc.o io_client.o io_dispatcher.o ioeventfd.o irqfd.o +ccflags-y += -I$(PWD)/../include \ No newline at end of file diff --git a/iodispatcher/README.md b/iodispatcher/README.md index e6555ff..90b6bb6 100644 --- a/iodispatcher/README.md +++ b/iodispatcher/README.md @@ -17,13 +17,13 @@ export KERN_DIR=path/to/your/linux make iodispatcher ``` -3. Copy the `iodispatcher.ko` file to your target filesystem as `bao_iodispatcher.ko`. +3. Copy the `bao-iodispatcher.ko` file to your target filesystem. ### Run instructions 1. When the Backend VM boots up, insert the kernel module: ``` -insmod bao_iodispatcher.ko +insmod bao-iodispatcher.ko ``` -2. From now on, you should be able to see the `/dev/bao-io-dispatcher` device node. \ No newline at end of file +2. From now on, you should be able to see the `/dev/bao-io-dispatcher` device node. diff --git a/iodispatcher/bao.h b/iodispatcher/bao.h deleted file mode 100644 index 68fd483..0000000 --- a/iodispatcher/bao.h +++ /dev/null @@ -1,419 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Provides the Bao Hypervisor IOCTLs and global structures - * - * Copyright (c) Bao Project and Contributors. All rights reserved. - * - * Authors: - * João Peixoto - */ - -#ifndef _BAO_H -#define _BAO_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define BAO_IO_WRITE 0x0 -#define BAO_IO_READ 0x1 -#define BAO_IO_ASK 0x2 -#define BAO_IO_NOTIFY 0x3 - -#define BAO_NAME_MAX_LEN 16 -#define BAO_IO_REQUEST_MAX 64 -#define BAO_IO_MAX_DMS 16 - -/** - * Contains the specific parameters of a Bao VirtIO request - * @dm_id: Device Model ID - * @addr: Gives the MMIO register address that was accessed - * @op: Write, Read, Ask or Notify operation - * @value: Value to write or read - * @access_width: Access width (VirtIO MMIO only allows 4-byte wide and alligned - * accesses) - * @request_id: Request ID - */ -struct bao_virtio_request { - __u64 dm_id; - __u64 addr; - __u64 op; - __u64 value; - __u64 access_width; - __u64 request_id; -}; - -/** - * Contains the specific parameters of a ioeventfd request - * @fd: The fd of eventfd associated with a hsm_ioeventfd - * @flags: Logical-OR of BAO_IOEVENTFD_FLAG_* - * @addr: The start address of IO range of ioeventfd - * @len: The length of IO range of ioeventfd - * @reserved: Reserved and should be 0 - * @data: Data for data matching - */ -struct bao_ioeventfd { - __u32 fd; - __u32 flags; - __u64 addr; - __u32 len; - __u32 reserved; - __u64 data; -}; - -/** - * Contains the specific parameters of a irqfd request - * @fd: The file descriptor of the eventfd - * @flags: The flags of the eventfd - */ -struct bao_irqfd { - __s32 fd; - __u32 flags; -}; - -/** - * Contains the specific parameters of a Bao DM - * @id: The virtual ID of the DM - * @shmem_addr: The base address of the shared memory - * @shmem_size: The size of the shared memory - * @irq: The IRQ number - * @fd: The file descriptor of the DM - */ -struct bao_dm_info { - __u32 id; - __u64 shmem_addr; - __u64 shmem_size; - __u32 irq; - __s32 fd; -}; - -/* The ioctl type, listed in Documentation/userspace-api/ioctl/ioctl-number.rst - */ -#define BAO_IOCTL_TYPE 0xA6 - -/* - * Common IOCTL IDs definition for Bao userspace - * Follows the convention of the Linux kernel, listed in - * Documentation/driver-api/ioctl.rst - */ -#define BAO_IOCTL_DM_GET_INFO _IOWR(BAO_IOCTL_TYPE, 0x01, struct bao_dm_info) -#define BAO_IOCTL_IO_CLIENT_ATTACH _IOWR(BAO_IOCTL_TYPE, 0x02, struct bao_virtio_request) -#define BAO_IOCTL_IO_REQUEST_COMPLETE _IOW(BAO_IOCTL_TYPE, 0x03, struct bao_virtio_request) -#define BAO_IOCTL_IOEVENTFD _IOW(BAO_IOCTL_TYPE, 0x04, struct bao_ioeventfd) -#define BAO_IOCTL_IRQFD _IOW(BAO_IOCTL_TYPE, 0x05, struct bao_irqfd) - -#define BAO_IOEVENTFD_FLAG_DATAMATCH (1 << 1) -#define BAO_IOEVENTFD_FLAG_DEASSIGN (1 << 2) -#define BAO_IRQFD_FLAG_DEASSIGN 1U - -#define BAO_IO_CLIENT_DESTROYING 0U - -#define BAO_DM_FLAG_DESTROYING 0U -#define BAO_DM_FLAG_CLEARING_IOREQ 1U - -struct bao_dm; -struct bao_io_client; - -typedef int (*bao_io_client_handler_t)(struct bao_io_client* client, struct bao_virtio_request* req); - -/** - * Bao I/O client - * @name: Client name - * @dm: The DM that the client belongs to - * @list: List node for this bao_io_client - * @is_control: If this client is the control client - * @flags: Flags (BAO_IO_CLIENT_*) - * @virtio_requests: Array of all I/O requests that are free to process - * @virtio_requests_lock: Lock to protect virtio_requests list - * @range_list: I/O ranges - * @range_lock: Semaphore to protect range_list - * @handler: I/O requests handler of this client - * @thread: The thread which executes the handler - * @wq: The wait queue for the handler thread parking - * @priv: Data for the thread - */ -struct bao_io_client { - char name[BAO_NAME_MAX_LEN]; - struct bao_dm* dm; - struct list_head list; - bool is_control; - unsigned long flags; - struct list_head virtio_requests; - struct mutex virtio_requests_lock; - struct list_head range_list; - struct rw_semaphore range_lock; - bao_io_client_handler_t handler; - struct task_struct* thread; - wait_queue_head_t wq; - void* priv; -}; - -/** - * Bao backend device model (DM) - * @list: Entry within global list of all DMs - * @info: DM information (id, shmem_addr, shmem_size, irq, fd) - * @shmem_base_addr: The base address of the shared memory (only used for - * unmapping purposes) - * @flags: Flags (BAO_IO_DISPATCHER_DM_*) - * @ioeventfds: List to link all bao_ioeventfd - * @ioeventfds_lock: Lock to protect ioeventfds list - * @ioeventfd_client: Ioevenfd client - * @irqfds: List to link all bao_irqfd - * @irqfds_lock: Lock to protect irqfds list - * @irqfd_server: Irqfd server - * @io_clients_lock: Semaphore to protect io_clients - * @io_clients: List to link all bao_io_client - * @control_client: Control client - */ -struct bao_dm { - struct list_head list; - struct bao_dm_info info; - void* shmem_base_addr; - unsigned long flags; - struct list_head ioeventfds; - struct mutex ioeventfds_lock; - struct bao_io_client* ioeventfd_client; - struct list_head irqfds; - struct mutex irqfds_lock; - struct workqueue_struct* irqfd_server; - struct rw_semaphore io_clients_lock; - struct list_head io_clients; - struct bao_io_client* control_client; -}; - -/** - * Bao I/O request range - * @list: List node for this range - * @start: The start address of the range - * @end: The end address of the range - * - */ -struct bao_io_range { - struct list_head list; - u64 start; - u64 end; -}; - -extern struct list_head bao_dm_list; -extern rwlock_t bao_dm_list_lock; - -/** - * Create the backend DM - * @info: The DM information (id, shmem_addr, shmem_size, irq, fd) - * @return dm on success, NULL on error - */ -struct bao_dm* bao_dm_create(struct bao_dm_info* info); - -/** - * Destroy the backend DM - * @dm: The DM to be destroyed - */ -void bao_dm_destroy(struct bao_dm* dm); - -/** - * Get the DM information - * @info: The DM information to be filled (id field contains the DM ID) - * @return true on success, false on error - */ -bool bao_dm_get_info(struct bao_dm_info* info); - -/** - * DM ioctls handler - * @filp: The open file pointer - * @cmd: The ioctl command - * @ioctl_param: The ioctl parameter - */ -long bao_dm_ioctl(struct file* filp, unsigned int cmd, unsigned long ioctl_param); - -/** - * Create an I/O client - * @dm: The DM that this client belongs to - * @handler: The I/O client handler for the I/O requests - * @data: Private data for the handler - * @is_control: If it is the control client - * @name: The name of I/O client - */ -struct bao_io_client* bao_io_client_create(struct bao_dm* dm, bao_io_client_handler_t handler, - void* data, bool is_control, const char* name); - -/** - * Destroy the I/O clients of the DM - * @dm: The DM that the I/O clients belong to - */ -void bao_io_clients_destroy(struct bao_dm* dm); - -/** - * Attach the thread to the I/O client to wait for I/O requests - * @client: The I/O client to handle the I/O request - */ -int bao_io_client_attach(struct bao_io_client* client); - -/** - * Add an I/O range monitor into an I/O client - * @client: The I/O client that the range will be added - * @start: The start address of the range - * @end: The end address of the range - */ -int bao_io_client_range_add(struct bao_io_client* client, u64 start, u64 end); - -/** - * Delete an I/O range monitor from an I/O client - * @client: The I/O client that the range will be deleted - * @start: The start address of the range - * @end: The end address of the range - */ -void bao_io_client_range_del(struct bao_io_client* client, u64 start, u64 end); - -/** - * Retrieve the oldest I/O request from the I/O client - * @client: The I/O client - * @req: The virtio request to be retrieved - * @return 0 on success, <0 on failure - */ -int bao_io_client_request(struct bao_io_client* client, struct bao_virtio_request* req); - -/** - * Push an I/O request into the I/O client request list - * @client: The I/O Client that the I/O request belongs to - * @req: The I/O request to be pushed - */ -void bao_io_client_push_request(struct bao_io_client* client, struct bao_virtio_request* req); - -/** - * Pop an I/O request from the I/O client request list - * @client: The I/O client that the I/O request belongs to - * @req: The I/O request to be popped - * @return true if the I/O request was popped, false otherwise - */ -bool bao_io_client_pop_request(struct bao_io_client* client, struct bao_virtio_request* req); - -/** - * Find the I/O client that the I/O request belongs to - * @dm: The DM that the I/O request belongs to - * @req: The I/O request - * @return The I/O client that the I/O request belongs to, or NULL if there is - * no client - */ -struct bao_io_client* bao_io_client_find(struct bao_dm* dm, struct bao_virtio_request* req); - -/** - * Initialize the Ioeventfd client - * @dm: The DM that the Ioeventfd client belongs to - */ -int bao_ioeventfd_client_init(struct bao_dm* dm); - -/** - * Destroy the Ioeventfd client - * @dm: The DM that the Ioeventfd client belongs to - */ -void bao_ioeventfd_client_destroy(struct bao_dm* dm); - -/** - * Configure the Ioeventfd client - * @dm: The DM that the Ioeventfd client belongs to - * @config: The ioeventfd configuration - */ -int bao_ioeventfd_client_config(struct bao_dm* dm, struct bao_ioeventfd* config); - -/** - * Initialize the Irqfd server - * @dm: The DM that the Irqfd server belongs to - */ -int bao_irqfd_server_init(struct bao_dm* dm); - -/** - * Destroy the Irqfd server - * @dm: The DM that the Irqfd server belongs to - */ -void bao_irqfd_server_destroy(struct bao_dm* dm); - -/** - * Configure the Irqfd server - * @dm: The DM that the Irqfd server belongs to - * @config: The irqfd configuration - */ -int bao_irqfd_server_config(struct bao_dm* dm, struct bao_irqfd* config); - -/** - * Initialize the I/O Dispatcher - * @dm: The DM to be initialized on the I/O Dispatcher - */ -int bao_io_dispatcher_init(struct bao_dm* dm); - -/** - * Destroy the I/O Dispatcher - * @dm: The DM to be destroyed on the I/O Dispatcher - */ -void bao_io_dispatcher_destroy(struct bao_dm* dm); - -/** - * Setup the I/O Dispatcher - */ -int bao_io_dispatcher_setup(void); - -/** - * Remove the I/O Dispatcher - */ -void bao_io_dispatcher_remove(void); - -/** - * Acquires the I/O requests from the Bao Hypervisor and dispatches them to the - * respective I/O client - * @dm: The DM that the I/O clients belongs to - * @return: 0 on success, <0 on failure - */ -int bao_dispatch_io(struct bao_dm* dm); - -/** - * Pause the I/O Dispatcher - * @dm: The DM that will be paused - */ -void bao_io_dispatcher_pause(struct bao_dm* dm); - -/** - * Resume the I/O Dispatcher - * @dm: The DM that will be resumed - */ -void bao_io_dispatcher_resume(struct bao_dm* dm); - -/** - * Register the interrupt controller - * @dm: The DM that the interrupt controller belongs to - */ -int bao_intc_register(struct bao_dm* dm); - -/** - * Unregister the interrupt controller - * @dm: The DM that the interrupt controller belongs to - */ -void bao_intc_unregister(struct bao_dm* dm); - -/** - * Setup the interrupt controller handler - * @handler: The interrupt handler - * @dm: The DM that the interrupt controller belongs to - */ -void bao_intc_setup_handler(void (*handler)(struct bao_dm* dm)); - -/** - * Remove the interrupt controller handler - */ -void bao_intc_remove_handler(void); - -/** - * I/O Dispatcher kernel module ioctls handler - * @filp: The open file pointer - * @cmd: The ioctl command - * @ioctl_param: The ioctl parameter - */ -long bao_io_dispatcher_driver_ioctl(struct file* filp, unsigned int cmd, unsigned long ioctl_param); - -#endif /* _BAO_H */ diff --git a/iodispatcher/dm.c b/iodispatcher/dm.c index 7323d93..976a5bf 100644 --- a/iodispatcher/dm.c +++ b/iodispatcher/dm.c @@ -5,28 +5,40 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" -#include "hypercall.h" -#include -#include +#include +#include #include -#include #include -#include -#include +#include +#include -/* List of all Backend DMs */ +/* + * List of all backend device models (DMs) + */ LIST_HEAD(bao_dm_list); /* - * bao_dm_list is read in a worker thread which dispatch I/O requests and - * is wrote in DM creation ioctl. This rwlock mechanism is used to protect it. + * Lock to protect bao_dm_list */ DEFINE_RWLOCK(bao_dm_list_lock); +static void bao_dm_get(struct bao_dm* dm) +{ + refcount_inc(&dm->refcount); +} + +static void bao_dm_put(struct bao_dm* dm) +{ + if (refcount_dec_and_test(&dm->refcount)) { + kfree(dm); + } +} + static int bao_dm_open(struct inode* inode, struct file* filp) { return 0; @@ -35,27 +47,147 @@ static int bao_dm_open(struct inode* inode, struct file* filp) static int bao_dm_release(struct inode* inode, struct file* filp) { struct bao_dm* dm = filp->private_data; - kfree(dm); + + if (WARN_ON_ONCE(!dm)) { + return -ENODEV; + } + + filp->private_data = NULL; + bao_dm_put(dm); + return 0; } +static long bao_dm_ioctl(struct file* filp, unsigned int cmd, unsigned long arg) +{ + struct bao_dm* dm = filp->private_data; + int rc; + + if (WARN_ON_ONCE(!dm)) { + return -ENODEV; + } + + switch (cmd) { + case BAO_IOCTL_IO_CLIENT_ATTACH: { + struct bao_virtio_request* req; + + req = memdup_user((void __user*)arg, sizeof(*req)); + if (IS_ERR(req)) { + rc = PTR_ERR(req); + break; + } + + if (!dm->control_client) { + rc = -ENOENT; + goto out_free; + } + + rc = bao_io_client_attach(dm->control_client); + if (rc) { + goto out_free; + } + + rc = bao_io_client_request(dm->control_client, req); + if (rc) { + goto out_free; + } + + if (copy_to_user((void __user*)arg, req, sizeof(*req))) { + rc = -EFAULT; + goto out_free; + } + + rc = 0; + +out_free: + kfree(req); + break; + } + case BAO_IOCTL_IO_REQUEST_COMPLETE: { + struct bao_virtio_request* req; + struct bao_remio_hypercall_ctx ctx; + + req = memdup_user((void __user*)arg, sizeof(*req)); + if (IS_ERR(req)) { + rc = PTR_ERR(req); + break; + } + + ctx.dm_id = req->dm_id; + ctx.addr = req->addr; + ctx.op = req->op; + ctx.value = req->value; + ctx.access_width = req->access_width; + ctx.request_id = req->request_id; + + rc = bao_remio_hypercall(&ctx); + kfree(req); + + break; + } + case BAO_IOCTL_IOEVENTFD: { + struct bao_ioeventfd ioeventfd; + + if (copy_from_user(&ioeventfd, (void __user*)arg, sizeof(struct bao_ioeventfd))) { + return -EFAULT; + } + + rc = bao_ioeventfd_client_config(dm, &ioeventfd); + break; + } + case BAO_IOCTL_IRQFD: { + struct bao_irqfd irqfd; + + if (copy_from_user(&irqfd, (void __user*)arg, sizeof(struct bao_irqfd))) { + return -EFAULT; + } + + rc = bao_irqfd_server_config(dm, &irqfd); + break; + } + default: + rc = -ENOTTY; + break; + } + + return rc; +} + /** - * @brief IOCTL handler for the backend DM mmap operation - * @note This function is used to map the previosuly allocated kernel memory - * region of the backend DM to the userspace virtual address space - * @filp: The file pointer of the DM - * @vma: Contains the information about the virtual address range that is used - * to access - * @return: 0 on success, <0 on failure + * bao_dm_mmap - mmap backend DM shared memory to userspace + * @filp: File pointer for the DM device + * @vma: Virtual memory area for mapping + * + * Return: 0 on success, negative errno on failure */ static int bao_dm_mmap(struct file* filp, struct vm_area_struct* vma) { struct bao_dm* dm = filp->private_data; + unsigned long vsize; + unsigned long offset; + phys_addr_t phys; + + if (WARN_ON_ONCE(!dm)) { + return -ENODEV; + } + + vsize = vma->vm_end - vma->vm_start; + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (!vsize || offset) { + return -EINVAL; + } + + if (vsize > dm->info.shmem_size) { + return -EINVAL; + } - unsigned long vsize = vma->vm_end - vma->vm_start; + phys = dm->info.shmem_addr; + if (!PAGE_ALIGNED(phys)) { + return -EINVAL; + } - if (remap_pfn_range(vma, vma->vm_start, dm->info.shmem_addr >> PAGE_SHIFT, vsize, - vma->vm_page_prot)) { + if (remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vsize, vma->vm_page_prot)) { return -EFAULT; } @@ -63,17 +195,22 @@ static int bao_dm_mmap(struct file* filp, struct vm_area_struct* vma) } /** - * @brief IOCTL handler for the backend DM llseek operation - * @file: The file pointer of the DM - * @offset: The offset to seek - * @whence: The seek operation - * @return: >=0 on success, <0 on failure + * bao_dm_llseek - Adjust file offset for backend DM device + * @file: File pointer for the DM device + * @offset: Offset to seek + * @whence: Reference point (SEEK_SET, SEEK_CUR, SEEK_END) + * + * Return: New file position on success, negative errno on failure */ static loff_t bao_dm_llseek(struct file* file, loff_t offset, int whence) { struct bao_dm* bao = file->private_data; loff_t new_pos; + if (WARN_ON_ONCE(!bao)) { + return -ENODEV; + } + switch (whence) { case SEEK_SET: new_pos = offset; @@ -82,23 +219,21 @@ static loff_t bao_dm_llseek(struct file* file, loff_t offset, int whence) new_pos = file->f_pos + offset; break; case SEEK_END: - new_pos = bao->info.shmem_addr + bao->info.shmem_size + offset; + new_pos = bao->info.shmem_size + offset; break; default: return -EINVAL; } - // Ensure new_pos is within the valid range of the total shared memory - if (new_pos < 0 || (new_pos > (bao->info.shmem_addr + bao->info.shmem_size + offset))) { + if (new_pos < 0 || new_pos > bao->info.shmem_size) { return -EINVAL; } file->f_pos = new_pos; - return new_pos; } -static struct file_operations bao_dm_fops = { +static const struct file_operations bao_dm_fops = { .owner = THIS_MODULE, .open = bao_dm_open, .release = bao_dm_release, @@ -110,162 +245,178 @@ static struct file_operations bao_dm_fops = { struct bao_dm* bao_dm_create(struct bao_dm_info* info) { struct bao_dm* dm; + struct bao_dm* tmp; char name[BAO_NAME_MAX_LEN]; - // verify if already exists a DM with the same virtual ID - read_lock(&bao_dm_list_lock); - list_for_each_entry(dm, &bao_dm_list, list) - { - if (dm->info.id == info->id) { - read_unlock(&bao_dm_list_lock); - return NULL; - } + if (WARN_ON(!info)) { + return NULL; } - read_unlock(&bao_dm_list_lock); - // allocate memory for the DM - dm = kzalloc(sizeof(struct bao_dm), GFP_KERNEL); + dm = kzalloc(sizeof(*dm), GFP_KERNEL); if (!dm) { - pr_err("%s: kzalloc failed\n", __FUNCTION__); return NULL; } - // initialize the DM structure + INIT_LIST_HEAD(&dm->list); INIT_LIST_HEAD(&dm->io_clients); init_rwsem(&dm->io_clients_lock); - // set the DM fields + refcount_set(&dm->refcount, 1); dm->info = *info; - // initialize the I/O request client bao_io_dispatcher_init(dm); - // add the DM to the list - write_lock_bh(&bao_dm_list_lock); - list_add(&dm->list, &bao_dm_list); - write_unlock_bh(&bao_dm_list_lock); - - // create the Control client snprintf(name, sizeof(name), "bao-ioctlc%u", dm->info.id); dm->control_client = bao_io_client_create(dm, NULL, NULL, true, name); + if (!dm->control_client) { + pr_err("%s: failed to create control client for DM %u\n", __func__, dm->info.id); + goto err_remove_dm; + } - // initialize the Ioeventfd client - bao_ioeventfd_client_init(dm); + if (bao_ioeventfd_client_init(dm)) { + pr_err("%s: failed to initialize ioeventfd for DM %u\n", __func__, dm->info.id); + goto err_destroy_io_clients; + } - // initialize the Irqfd server - bao_irqfd_server_init(dm); + if (bao_irqfd_server_init(dm)) { + pr_err("%s: failed to initialize irqfd for DM %u\n", __func__, dm->info.id); + goto err_destroy_io_clients; + } - // map the memory region to the kernel virtual address space dm->shmem_base_addr = memremap(dm->info.shmem_addr, dm->info.shmem_size, MEMREMAP_WB); - if (dm->shmem_base_addr == NULL) { - pr_err("%s: failed to map memory region for dm %d\n", __FUNCTION__, dm->info.id); - return NULL; + if (!dm->shmem_base_addr) { + pr_err("%s: failed to map memory region for DM %u\n", __func__, dm->info.id); + goto err_destroy_irqfd; + } + + write_lock(&bao_dm_list_lock); + list_for_each_entry(tmp, &bao_dm_list, list) + { + if (tmp->info.id == info->id) { + write_unlock(&bao_dm_list_lock); + goto err_unmap; + } } + list_add(&dm->list, &bao_dm_list); + write_unlock(&bao_dm_list_lock); return dm; + +err_unmap: + memunmap(dm->shmem_base_addr); + +err_destroy_irqfd: + bao_irqfd_server_destroy(dm); + +err_destroy_io_clients: + bao_io_clients_destroy(dm); + +err_remove_dm: + kfree(dm); + + return NULL; } void bao_dm_destroy(struct bao_dm* dm) { - // mark as destroying - set_bit(BAO_DM_FLAG_DESTROYING, &dm->flags); + if (WARN_ON_ONCE(!dm)) { + return; + } - // remove the DM from the list - write_lock_bh(&bao_dm_list_lock); + write_lock(&bao_dm_list_lock); list_del_init(&dm->list); - write_unlock_bh(&bao_dm_list_lock); + write_unlock(&bao_dm_list_lock); - // clear the global fields dm->info.id = 0; dm->info.shmem_addr = 0; dm->info.shmem_size = 0; dm->info.irq = 0; - // unmap the memory region - memunmap(dm->shmem_base_addr); + if (dm->shmem_base_addr) { + memunmap(dm->shmem_base_addr); + } - // release the DM file descriptor - put_unused_fd(dm->info.fd); + if (dm->info.fd >= 0) { + put_unused_fd(dm->info.fd); + } - // destroy the Irqfd server bao_irqfd_server_destroy(dm); - - // destroy the I/O clients bao_io_clients_destroy(dm); - - // destroy the I/O dispatcher bao_io_dispatcher_destroy(dm); - // clear the destroying flag - clear_bit(BAO_DM_FLAG_DESTROYING, &dm->flags); - - // free the DM - kfree(dm); + bao_dm_put(dm); } /** - * Create an anonymous inode for the DM abstraction - * @note: The anonymous inode is used to expose the DM to userspace - * and allow the frontend DM to request services from the backend - * DM directly through the file descriptor This function should be called after - * the DM is created and invoked by the frontend DM (userspace process) to - * create the anonymous inode inside the process file descriptor table - * @dm: The DM to create the anonymous inode - * @return: >=0 on success, <0 on failure + * bao_dm_create_anonymous_inode - Create an anonymous inode for a backend DM + * @dm: The backend device model (DM) + * + * Creates an anonymous inode that exposes the backend DM to userspace. + * The frontend DM can use the returned file descriptor to request + * services from the backend DM directly. + * + * Return: File descriptor on success, negative errno on failure */ static int bao_dm_create_anonymous_inode(struct bao_dm* dm) { char name[BAO_NAME_MAX_LEN]; struct file* file; - int rc = 0; + int fd; - // create a new file descriptor for the DM - rc = get_unused_fd_flags(O_CLOEXEC); - if (rc < 0) { - pr_err("%s: get_unused_fd_flags failed\n", __FUNCTION__); - return rc; + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; } - // create a name for the DM file descriptor - snprintf(name, sizeof(name), "bao-dm%u", dm->info.id); + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + return fd; + } - // create a new anonymous inode for the DM abstraction - // the `bao_dm_fops` defines the behavior of this "file" and - // the `dm` is the private data + snprintf(name, sizeof(name), "bao-dm%u", dm->info.id); + bao_dm_get(dm); file = anon_inode_getfile(name, &bao_dm_fops, dm, O_RDWR); if (IS_ERR(file)) { - pr_err("%s: anon_inode_getfile failed\n", __FUNCTION__); - put_unused_fd(rc); - return rc; + bao_dm_put(dm); + put_unused_fd(fd); + return PTR_ERR(file); } - // associate the file descriptor `rc` with the struct file object `file` - // in the file descriptor table of the current process - // (expose the file descriptor `rc` to userspace) - fd_install(rc, file); - - // update the DM file descriptor - dm->info.fd = rc; + fd_install(fd, file); + dm->info.fd = fd; - return rc; + return fd; } bool bao_dm_get_info(struct bao_dm_info* info) { struct bao_dm* dm; - bool rc = false; + bool found = false; + + if (WARN_ON_ONCE(!info)) { + return false; + } + read_lock(&bao_dm_list_lock); list_for_each_entry(dm, &bao_dm_list, list) { if (dm->info.id == info->id) { - info->shmem_addr = dm->info.shmem_addr; - info->shmem_size = dm->info.shmem_size; - info->irq = dm->info.irq; - info->fd = bao_dm_create_anonymous_inode(dm); - rc = true; + bao_dm_get(dm); + found = true; break; } } + read_unlock(&bao_dm_list_lock); - return rc; + if (!found) { + return false; + } + + info->shmem_addr = dm->info.shmem_addr; + info->shmem_size = dm->info.shmem_size; + info->irq = dm->info.irq; + info->fd = bao_dm_create_anonymous_inode(dm); + + bao_dm_put(dm); + + return true; } diff --git a/iodispatcher/driver.c b/iodispatcher/driver.c index ef5bb7c..728a5e2 100644 --- a/iodispatcher/driver.c +++ b/iodispatcher/driver.c @@ -3,220 +3,161 @@ * Bao Hypervisor I/O Dispatcher Kernel Driver * * Copyright (c) Bao Project and Contributors. All rights reserved. - * - * Authors: - * João Peixoto */ -#include "bao.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include -#include -#include - -#define DEV_NAME "bao-io-dispatcher" - -static dev_t bao_iodispatcher_devt; -struct class* bao_iodispatcher_cl; +#include +#include -/** - * Bao I/O Dispatcher driver structure - * @cdev: The character device - * @dev: The device - */ struct bao_iodispatcher_drv { - struct cdev cdev; - struct device* dev; + struct miscdevice miscdev; }; -/** - * Open the I/O Dispatcher device - * @inode: The inode of the I/O Dispatcher - * @filp: The file pointer of the I/O Dispatcher - */ -static int bao_io_dispatcher_driver_open_fops(struct inode* inode, struct file* filp) +static int bao_io_dispatcher_driver_open(struct inode* inode, struct file* filp) { - struct bao_iodispatcher_drv* bao_iodispatcher_drv = - container_of(inode->i_cdev, struct bao_iodispatcher_drv, cdev); - filp->private_data = bao_iodispatcher_drv; + struct miscdevice* misc = filp->private_data; + struct bao_iodispatcher_drv* drv; - kobject_get(&bao_iodispatcher_drv->dev->kobj); + drv = container_of(misc, struct bao_iodispatcher_drv, miscdev); + filp->private_data = drv; return 0; } -/** - * Release the I/O Dispatcher device - * @inode: The inode of the I/O Dispatcher - * @filp: The file pointer of the I/O Dispatcher - */ -static int bao_io_dispatcher_driver_release_fops(struct inode* inode, struct file* filp) +static int bao_io_dispatcher_driver_release(struct inode* inode, struct file* filp) { - struct bao_iodispatcher_drv* bao_iodispatcher_drv = - container_of(inode->i_cdev, struct bao_iodispatcher_drv, cdev); filp->private_data = NULL; - - kobject_put(&bao_iodispatcher_drv->dev->kobj); - return 0; } -static long bao_io_dispatcher_driver_ioctl_fops(struct file* filp, unsigned int cmd, - unsigned long ioctl_param) +static long bao_io_dispatcher_driver_ioctl(struct file* filp, unsigned int cmd, unsigned long arg) { - return bao_io_dispatcher_driver_ioctl(filp, cmd, ioctl_param); + struct bao_dm_info* info; + + switch (cmd) { + case BAO_IOCTL_DM_GET_INFO: + info = memdup_user((void __user*)arg, sizeof(*info)); + if (IS_ERR(info)) { + return PTR_ERR(info); + } + + if (!bao_dm_get_info(info)) { + kfree(info); + return -ENOENT; + } + + if (copy_to_user((void __user*)arg, info, sizeof(*info))) { + kfree(info); + return -EFAULT; + } + + kfree(info); + return 0; + + default: + return -ENOTTY; + } } -static struct file_operations bao_io_dispatcher_driver_fops = { +static const struct file_operations bao_io_dispatcher_driver_fops = { .owner = THIS_MODULE, - .open = bao_io_dispatcher_driver_open_fops, - .release = bao_io_dispatcher_driver_release_fops, - .unlocked_ioctl = bao_io_dispatcher_driver_ioctl_fops, + .open = bao_io_dispatcher_driver_open, + .release = bao_io_dispatcher_driver_release, + .unlocked_ioctl = bao_io_dispatcher_driver_ioctl, }; -/** - * Register the driver with the kernel - * @pdev: Platform device pointer - */ -static int bao_io_dispatcher_driver_register(struct platform_device* pdev) +static int bao_io_dispatcher_driver_probe(struct platform_device* pdev) { - int ret, irq; - struct module* owner = THIS_MODULE; - struct resource* r; - dev_t devt; - resource_size_t reg_size; - struct bao_iodispatcher_drv* bao_io_dispatcher_drv; + struct device* dev = &pdev->dev; + struct bao_iodispatcher_drv* drv; struct bao_dm* dm; struct bao_dm_info dm_info; + struct resource* r; + int ret; + int irq; + int i; + resource_size_t reg_size; - // setup the I/O Dispatcher system - ret = bao_io_dispatcher_setup(); - if (ret) { - dev_err(&pdev->dev, "setup I/O Dispatcher failed!\n"); - return ret; - } - - // allocate memory for the Bao I/O Dispatcher structure - bao_io_dispatcher_drv = - devm_kzalloc(&pdev->dev, sizeof(struct bao_iodispatcher_drv), GFP_KERNEL); - - if (bao_io_dispatcher_drv == NULL) { - ret = -ENOMEM; - goto err_io_dispatcher; + drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); + if (!drv) { + return -ENOMEM; } - for (int i = 0; i < BAO_IO_MAX_DMS; i++) { - // get the memory region from the device tree + for (i = 0; i < BAO_IO_MAX_DMS; i++) { r = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!r) { break; } - // get the interrupt number from the device tree irq = platform_get_irq(pdev, i); if (irq < 0) { - dev_err(&pdev->dev, "Failed to read interrupt number at index %d\n", i); + dev_err(dev, "failed to get IRQ at index %d\n", i); ret = irq; - goto err_io_dispatcher; + goto err_unregister_dms; } - // get the memory region size reg_size = resource_size(r); - // set the device model information dm_info.id = i; dm_info.shmem_addr = (unsigned long)r->start; dm_info.shmem_size = (unsigned long)reg_size; dm_info.irq = irq; dm_info.fd = 0; - // create the device model dm = bao_dm_create(&dm_info); - if (dm == NULL) { - dev_err(&pdev->dev, "failed to create Bao I/O Dispatcher device model %d\n", i); - ret = -ENOMEM; - goto err_io_dispatcher; + if (!dm) { + dev_err(dev, "failed to create Bao DM %d\n", i); + ret = -EINVAL; + goto err_unregister_dms; } - // register the interrupt - ret = bao_intc_register(dm); + ret = bao_intc_init(dm); if (ret) { - dev_err(&pdev->dev, "failed to register interrupt %d\n", irq); + dev_err(dev, "failed to register interrupt %d\n", irq); goto err_unregister_dms; } } - cdev_init(&bao_io_dispatcher_drv->cdev, &bao_io_dispatcher_driver_fops); - bao_io_dispatcher_drv->cdev.owner = owner; + drv->miscdev.minor = MISC_DYNAMIC_MINOR; + drv->miscdev.name = "bao-io-dispatcher"; + drv->miscdev.fops = &bao_io_dispatcher_driver_fops; + drv->miscdev.parent = dev; - devt = MKDEV(MAJOR(bao_iodispatcher_devt), 0); - ret = cdev_add(&bao_io_dispatcher_drv->cdev, devt, 1); + ret = misc_register(&drv->miscdev); if (ret) { + dev_err(dev, "failed to register misc device: %d\n", ret); goto err_unregister_irqs; } - bao_io_dispatcher_drv->dev = - device_create(bao_iodispatcher_cl, &pdev->dev, devt, bao_io_dispatcher_drv, DEV_NAME); - if (IS_ERR(bao_io_dispatcher_drv->dev)) { - ret = PTR_ERR(bao_io_dispatcher_drv->dev); - goto err_cdev; - } - dev_set_drvdata(bao_io_dispatcher_drv->dev, bao_io_dispatcher_drv); + platform_set_drvdata(pdev, drv); + dev_info(dev, "Bao I/O dispatcher device registered\n"); return 0; -err_cdev: - cdev_del(&bao_io_dispatcher_drv->cdev); -err_unregister_irqs: { - list_for_each_entry(dm, &bao_dm_list, list) - { - bao_intc_unregister(dm); - } -} -err_unregister_dms: { - list_for_each_entry(dm, &bao_dm_list, list) - { - bao_dm_destroy(dm); - } -} -err_io_dispatcher: - bao_io_dispatcher_remove(); +err_unregister_irqs: + list_for_each_entry(dm, &bao_dm_list, list) bao_intc_destroy(dm); + +err_unregister_dms: + list_for_each_entry(dm, &bao_dm_list, list) bao_dm_destroy(dm); - dev_err(&pdev->dev, "failed initialization\n"); return ret; } -/** - * Unregister the driver from the kernel - * @pdev: Platform device pointer - */ -static void bao_io_dispatcher_driver_unregister(struct platform_device* pdev) +static void bao_io_dispatcher_driver_remove(struct platform_device* pdev) { + struct bao_iodispatcher_drv* drv = platform_get_drvdata(pdev); struct bao_dm* dm; + struct bao_dm* tmp; - // remove the I/O Dispatcher system - bao_io_dispatcher_remove(); + if (drv) { + misc_deregister(&drv->miscdev); + } - list_for_each_entry(dm, &bao_dm_list, list) + list_for_each_entry_safe(dm, tmp, &bao_dm_list, list) { - // destroy the device model + bao_intc_destroy(dm); bao_dm_destroy(dm); - // unregister the interrupt - bao_intc_unregister(dm); } } @@ -226,45 +167,18 @@ static const struct of_device_id bao_io_dispatcher_driver_dt_ids[] = { MODULE_DEVICE_TABLE(of, bao_io_dispatcher_driver_dt_ids); static struct platform_driver bao_io_dispatcher_driver = { - .probe = bao_io_dispatcher_driver_register, - .remove = bao_io_dispatcher_driver_unregister, - .driver = - { - .name = "bao-io-dispatcher", - .of_match_table = of_match_ptr(bao_io_dispatcher_driver_dt_ids), - .owner = THIS_MODULE, - }, + .probe = bao_io_dispatcher_driver_probe, + .remove = bao_io_dispatcher_driver_remove, + .driver = { + .name = "bao-io-dispatcher", + .of_match_table = bao_io_dispatcher_driver_dt_ids, + }, }; -static int __init bao_io_dispatcher_driver_init(void) -{ - int ret; - - if ((bao_iodispatcher_cl = class_create(DEV_NAME)) == NULL) { - ret = -1; - pr_err("unable to class_create " DEV_NAME " device\n"); - return ret; - } - - ret = alloc_chrdev_region(&bao_iodispatcher_devt, 0, BAO_IO_MAX_DMS, DEV_NAME); - if (ret < 0) { - pr_err("unable to alloc_chrdev_region " DEV_NAME " device\n"); - return ret; - } - - return platform_driver_register(&bao_io_dispatcher_driver); -} - -static void __exit bao_io_dispatcher_driver_exit(void) -{ - platform_driver_unregister(&bao_io_dispatcher_driver); - unregister_chrdev(bao_iodispatcher_devt, DEV_NAME); - class_destroy(bao_iodispatcher_cl); -} - -module_init(bao_io_dispatcher_driver_init); -module_exit(bao_io_dispatcher_driver_exit); +module_platform_driver(bao_io_dispatcher_driver); -MODULE_AUTHOR("João Peixoto "); MODULE_LICENSE("GPL"); +MODULE_AUTHOR("João Peixoto "); +MODULE_AUTHOR("David Cerdeira "); +MODULE_AUTHOR("José Martins "); MODULE_DESCRIPTION("Bao Hypervisor I/O Dispatcher Kernel Driver"); diff --git a/iodispatcher/hypercall.h b/iodispatcher/hypercall.h deleted file mode 100644 index 58c4f7f..0000000 --- a/iodispatcher/hypercall.h +++ /dev/null @@ -1,150 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Hypercall API for Bao Hypervisor - * - * Copyright (c) Bao Project and Contributors. All rights reserved. - * - * Authors: - * João Peixoto - */ - -#ifndef __BAO_HYPERCALL_H -#define __BAO_HYPERCALL_H - -#include "bao.h" - -#if defined(CONFIG_ARM64) || defined(CONFIG_ARM) -#include -#elif CONFIG_RISCV -#include -#endif - -/* Remote I/O Hypercall ID */ -#define REMIO_HC_ID 0x2 - -/** - * Remote I/O Hypercall return structure - * @hyp_ret: The generic return value of Bao's hypercall - * @remio_hyp_ret: The return value of the Remote I/O Hypercall - * @pending_requests: The number of pending requests (only used in the Remote - * I/O Ask Hypercall) - */ -struct remio_hypercall_ret { - u64 hyp_ret; - u64 remio_hyp_ret; - u64 pending_requests; -}; - -#if defined(CONFIG_ARM64) -/** - * asm_bao_hypercall_remio() - Performs a Remote I/O Hypercall - * @request: VirtIO request structure - * @return: Remote I/O Hypercall return structure - */ -static inline struct remio_hypercall_ret asm_bao_hypercall_remio(struct bao_virtio_request* request) -{ - struct remio_hypercall_ret ret; - register int x0 asm("x0") = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, - ARM_SMCCC_OWNER_VENDOR_HYP, REMIO_HC_ID); - register u64 x1 asm("x1") = request->dm_id; - register u64 x2 asm("x2") = request->addr; - register u64 x3 asm("x3") = request->op; - register u64 x4 asm("x4") = request->value; - register u64 x5 asm("x5") = request->request_id; - register u64 x6 asm("x6") = 0; - - asm volatile("hvc 0\n\t" : "=r"(x0), "=r"(x1), "=r"(x2), "=r"(x3), "=r"(x4), "=r"(x5), - "=r"(x6) : "r"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5) : "memory"); - - ret.hyp_ret = 0; - ret.remio_hyp_ret = x0; - ret.pending_requests = x6; - - request->addr = x1; - request->op = x2; - request->value = x3; - request->access_width = x4; - request->request_id = x5; - - return ret; -} -#elif defined(CONFIG_ARM) -/** - * asm_bao_hypercall_remio() - Performs a Remote I/O Hypercall - * @request: VirtIO request structure - * @return: Remote I/O Hypercall return structure - */ -static inline struct remio_hypercall_ret asm_bao_hypercall_remio(struct bao_virtio_request* request) -{ - struct remio_hypercall_ret ret; - register int x0 asm("r0") = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, - ARM_SMCCC_OWNER_VENDOR_HYP, REMIO_HC_ID); - register u32 x1 asm("r1") = request->dm_id; - register u32 x2 asm("r2") = request->addr; - register u32 x3 asm("r3") = request->op; - register u32 x4 asm("r4") = request->value; - register u32 x5 asm("r5") = request->request_id; - register u32 x6 asm("r6") = 0; - - asm volatile("hvc 0\n\t" : "=r"(x0), "=r"(x1), "=r"(x2), "=r"(x3), "=r"(x4), "=r"(x5), - "=r"(x6) : "r"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5) : "memory"); - - ret.hyp_ret = 0; - ret.remio_hyp_ret = x0; - ret.pending_requests = x6; - - request->addr = x1; - request->op = x2; - request->value = x3; - request->access_width = x4; - request->request_id = x5; - - return ret; -} -#elif defined(CONFIG_RISCV) -/** - * asm_bao_hypercall_remio() - Performs a Remote I/O Hypercall - * @request: VirtIO request structure - * @return: Remote I/O Hypercall return structure - */ -static inline struct remio_hypercall_ret asm_bao_hypercall_remio(struct bao_virtio_request* request) -{ - struct remio_hypercall_ret ret; - register uintptr_t a0 asm("a0") = (uintptr_t)(request->dm_id); - register uintptr_t a1 asm("a1") = (uintptr_t)(request->addr); - register uintptr_t a2 asm("a2") = (uintptr_t)(request->op); - register uintptr_t a3 asm("a3") = (uintptr_t)(request->value); - register uintptr_t a4 asm("a4") = (uintptr_t)(request->request_id); - register uintptr_t a5 asm("a5") = (uintptr_t)(0); - register uintptr_t a6 asm("a6") = (uintptr_t)(REMIO_HC_ID); - register uintptr_t a7 asm("a7") = (uintptr_t)(0x08000ba0); - - asm volatile("ecall" - : "+r"(a0), "+r"(a1), "+r"(a2), "+r"(a3), "+r"(a4), "+r"(a5), "+r"(a6), "+r"(a7) - : "r"(a0), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), "r"(a6), "r"(a7) : "memory"); - - ret.hyp_ret = a0; - ret.remio_hyp_ret = a1; - ret.pending_requests = a7; - - request->addr = a2; - request->op = a3; - request->value = a4; - request->access_width = a5; - request->request_id = a6; - - return ret; -} -#endif - -/** - * bao_hypercall_remio() - Performs a Remote I/O Hypercall - * @request: VirtIO request structure - * @return: Remote I/O Hypercall return structure - */ -static inline struct remio_hypercall_ret bao_hypercall_remio(struct bao_virtio_request* request) -{ - return asm_bao_hypercall_remio(request); -} - -#endif /* __BAO_HYPERCALL_H */ diff --git a/iodispatcher/intc.c b/iodispatcher/intc.c index adcec6e..4c4f8ad 100644 --- a/iodispatcher/intc.c +++ b/iodispatcher/intc.c @@ -5,24 +5,28 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" +#include #include -#include -#include -#include -#include -// handler for the interrupt +/* Top-level handler registered by the Bao interrupt controller */ static void (*bao_intc_handler)(struct bao_dm* dm); +/** + * bao_interrupt_handler - Top-level interrupt handler for Bao DM + * @irq: Interrupt number + * @dev: Pointer to the Bao device model (struct bao_dm) + * + * Invokes the registered Bao interrupt controller handler, if any. + */ static irqreturn_t bao_interrupt_handler(int irq, void* dev) { struct bao_dm* dm = (struct bao_dm*)dev; - // if the handler is set, call it if (bao_intc_handler) { bao_intc_handler(dm); } @@ -40,14 +44,24 @@ void bao_intc_remove_handler(void) bao_intc_handler = NULL; } -int bao_intc_register(struct bao_dm* dm) +int bao_intc_init(struct bao_dm* dm) { char name[BAO_NAME_MAX_LEN]; - snprintf(name, BAO_NAME_MAX_LEN, "bao-iodintc%d", dm->info.id); + + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; + } + + scnprintf(name, sizeof(name), "bao-iodintc%d", dm->info.id); + return request_irq(dm->info.irq, bao_interrupt_handler, 0, name, dm); } -void bao_intc_unregister(struct bao_dm* dm) +void bao_intc_destroy(struct bao_dm* dm) { + if (WARN_ON_ONCE(!dm)) { + return; + } + free_irq(dm->info.irq, dm); } diff --git a/iodispatcher/io_client.c b/iodispatcher/io_client.c index fa89417..731e3eb 100644 --- a/iodispatcher/io_client.c +++ b/iodispatcher/io_client.c @@ -5,19 +5,21 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" -#include "hypercall.h" -#include +#include +#include #include -#include /** - * Contains the specific parameters of a Bao I/O request - * @list: List node for this request - * @virtio_request: The I/O request + * struct bao_io_request - Bao I/O request structure + * @list: List node linking all requests + * @virtio_request: The VirtIO request payload + * + * Represents a single I/O request for a Bao I/O client. */ struct bao_io_request { struct list_head list; @@ -25,92 +27,112 @@ struct bao_io_request { }; /** - * Check if there are pending requests - * @client: The I/O client - * @return: True if there are pending requests, false otherwise + * bao_io_client_has_pending_requests - Check if an I/O client has pending requests + * @client: The bao_io_client to check + * + * Return: True if has pending I/O requests, false otherwise. */ static inline bool bao_io_client_has_pending_requests(struct bao_io_client* client) { + if (WARN_ON_ONCE(!client)) { + return false; + } + return !list_empty(&client->virtio_requests); } /** - * Check if the I/O client is being destroyed - * @client: The I/O client - * @return: bool + * bao_io_client_is_destroying - Check if an I/O client is being destroyed + * @client: The bao_io_client to check + * + * Return: True if the client is being destroyed, false otherwise. */ static inline bool bao_io_client_is_destroying(struct bao_io_client* client) { + if (WARN_ON_ONCE(!client)) { + return true; + } + return test_bit(BAO_IO_CLIENT_DESTROYING, &client->flags); } -void bao_io_client_push_request(struct bao_io_client* client, struct bao_virtio_request* req) +bool bao_io_client_push_request(struct bao_io_client* client, struct bao_virtio_request* req) { struct bao_io_request* io_req; - // allocate the I/O request object + if (WARN_ON_ONCE(!client || !req)) { + return false; + } + io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); + if (!io_req) { + return false; + } - // copy the request to the I/O request object io_req->virtio_request = *req; - // add the request to the end of the requests list mutex_lock(&client->virtio_requests_lock); list_add_tail(&io_req->list, &client->virtio_requests); mutex_unlock(&client->virtio_requests_lock); + + return true; } bool bao_io_client_pop_request(struct bao_io_client* client, struct bao_virtio_request* ret) { struct bao_io_request* req; - // pop the first request from the list + if (WARN_ON_ONCE(!client || !ret)) { + return false; + } + mutex_lock(&client->virtio_requests_lock); - req = list_first_entry_or_null(&client->virtio_requests, struct bao_io_request, list); - mutex_unlock(&client->virtio_requests_lock); - if (req == NULL) { + req = list_first_entry_or_null(&client->virtio_requests, struct bao_io_request, list); + if (!req) { + mutex_unlock(&client->virtio_requests_lock); return false; } - // copy the request to the return value + list_del(&req->list); *ret = req->virtio_request; - // delete the request from the list - mutex_lock(&client->virtio_requests_lock); - list_del(&req->list); mutex_unlock(&client->virtio_requests_lock); - // free the request kfree(req); return true; } /** - * Destroy an I/O client - * @client: The I/O client to be destroyed + * bao_io_client_destroy - Destroy an I/O client + * @client: The bao_io_client to destroy */ static void bao_io_client_destroy(struct bao_io_client* client) { - struct bao_io_client *range, *next; - struct bao_dm* dm = client->dm; + struct bao_io_client* range; + struct bao_io_client* next; + struct bao_dm* dm; + + if (WARN_ON_ONCE(!client)) { + return; + } + + dm = client->dm; - // pause the I/O requests dispatcher bao_io_dispatcher_pause(dm); - // set the destroying flag set_bit(BAO_IO_CLIENT_DESTROYING, &client->flags); - // stop the client if (client->is_control) { wake_up_interruptible(&client->wq); } else { bao_ioeventfd_client_destroy(dm); - kthread_stop(client->thread); + if (client->thread) { + kthread_stop(client->thread); + } } - // remove the I/O ranges down_write(&client->range_lock); list_for_each_entry_safe(range, next, &client->range_list, list) { @@ -119,20 +141,18 @@ static void bao_io_client_destroy(struct bao_io_client* client) } up_write(&client->range_lock); - // remove the I/O client down_write(&dm->io_clients_lock); if (client->is_control) { dm->control_client = NULL; } else { dm->ioeventfd_client = NULL; } + list_del(&client->list); up_write(&dm->io_clients_lock); - // resume the I/O requests dispatcher bao_io_dispatcher_resume(dm); - // free the allocated I/O client object kfree(client); } @@ -140,7 +160,10 @@ void bao_io_clients_destroy(struct bao_dm* dm) { struct bao_io_client *client, *next; - // destroy all the I/O clients + if (WARN_ON_ONCE(!dm)) { + return; + } + list_for_each_entry_safe(client, next, &dm->io_clients, list) { bao_io_client_destroy(client); @@ -149,26 +172,17 @@ void bao_io_clients_destroy(struct bao_dm* dm) int bao_io_client_attach(struct bao_io_client* client) { + if (WARN_ON_ONCE(!client)) { + return -EINVAL; + } + if (client->is_control) { - /* - * In the Control client, a user space thread waits on the waitqueue. - * The thread should wait until: - * - there are pending I/O requests to be processed - * - the I/O client is going to be destroyed - */ wait_event_interruptible(client->wq, bao_io_client_has_pending_requests(client) || bao_io_client_is_destroying(client)); if (bao_io_client_is_destroying(client)) { return -EPERM; } } else { - /* - * In the non-control client (e.g., Ioeventfd Client), a kernel space thread - * waits on the waitqueue. The thread should wait until: - * - there are pending I/O requests to be processed - * - the I/O client is going to be destroyed - * - the kernel thread is going to be stopped - */ wait_event_interruptible(client->wq, bao_io_client_has_pending_requests(client) || bao_io_client_is_destroying(client) || kthread_should_stop()); @@ -184,38 +198,53 @@ int bao_io_client_attach(struct bao_io_client* client) } /** - * Execution entity thread for a kernel I/O client (e.g., Ioeventfd client) - * @data: The I/O client + * bao_io_client_kernel_thread - Thread for processing a kernel I/O client + * @data: Pointer to the bao_io_client structure + * + * Return: 0 on completion */ static int bao_io_client_kernel_thread(void* data) { struct bao_io_client* client = data; struct bao_virtio_request req; - struct remio_hypercall_ret hret; - int ret = -EINVAL; - int stop = false; + struct bao_remio_hypercall_ctx ctx; + bool stop = false; + int ret; + + if (WARN_ON_ONCE(!client)) { + return -EINVAL; + } + + while (!stop && !kthread_should_stop()) { + ret = bao_io_client_attach(client); + if (ret < 0) { + stop = true; + break; + } - while (!stop) { - // attach the client - stop = bao_io_client_attach(client); while (bao_io_client_has_pending_requests(client) && !stop) { - // get the first kernel handled I/O request if (!bao_io_client_pop_request(client, &req)) { - return -EFAULT; + pr_err("%s: failed to pop I/O request\n", __func__); + stop = true; + break; } - // call the handler callback of the I/O client - // (e.g bao_ioeventfd_handler() for an ioeventfd client) + ret = client->handler(client, &req); if (ret < 0) { + pr_warn("%s: client handler returned %d\n", __func__, ret); break; } - // complete the request - else { - hret = bao_hypercall_remio(&req); - if (hret.hyp_ret != 0 || hret.remio_hyp_ret != 0) { - return -EFAULT; - } + ctx.dm_id = req.dm_id; + ctx.op = req.op; + ctx.addr = req.addr; + ctx.value = req.value; + ctx.access_width = req.access_width; + ctx.request_id = req.request_id; + + if (bao_remio_hypercall(&ctx)) { + stop = true; + break; } } } @@ -228,32 +257,33 @@ struct bao_io_client* bao_io_client_create(struct bao_dm* dm, bao_io_client_hand { struct bao_io_client* client; - // if the I/O client is implemenmted in the kernel, it must have a kernel - // handler (e.g., Ioevendfd client) + if (WARN_ON_ONCE(!dm || !name)) { + return NULL; + } + if (!handler && !is_control) { return NULL; } - // allocate the I/O client object client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { return NULL; } - // initialize the I/O client client->handler = handler; client->dm = dm; client->priv = data; client->is_control = is_control; if (name) { - strncpy(client->name, name, sizeof(client->name) - 1); + strscpy(client->name, name, sizeof(client->name)); } + INIT_LIST_HEAD(&client->virtio_requests); + mutex_init(&client->virtio_requests_lock); init_rwsem(&client->range_lock); INIT_LIST_HEAD(&client->range_list); init_waitqueue_head(&client->wq); - // if the I/O client is implemented in the kernel, create the kernel thread if (client->handler) { client->thread = kthread_run(bao_io_client_kernel_thread, client, "%s-kthread", client->name); @@ -263,34 +293,25 @@ struct bao_io_client* bao_io_client_create(struct bao_dm* dm, bao_io_client_hand } } - // add the I/O client to the I/O clients list down_write(&dm->io_clients_lock); if (is_control) { dm->control_client = client; } else { dm->ioeventfd_client = client; } + list_add(&client->list, &dm->io_clients); up_write(&dm->io_clients_lock); - // back up any pending requests that could potentially be lost - // (e.g., if the backend VM is initialized after the frontend VM) - if (is_control) { - while (bao_dispatch_io(dm) > 0) - ; - } - return client; } int bao_io_client_request(struct bao_io_client* client, struct bao_virtio_request* req) { - // check if the Control client exists - if (!client) { - return -EEXIST; + if (WARN_ON_ONCE(!client)) { + return -EINVAL; } - // pop the first request from the list if (!bao_io_client_pop_request(client, req)) { return -EFAULT; } @@ -302,22 +323,22 @@ int bao_io_client_range_add(struct bao_io_client* client, u64 start, u64 end) { struct bao_io_range* range; - // check if the range is valid + if (WARN_ON_ONCE(!client)) { + return -EINVAL; + } + if (end < start) { return -EINVAL; } - // allocate the range object range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) { return -ENOMEM; } - // initialize the range range->start = start; range->end = end; - // add the range to the list down_write(&client->range_lock); list_add(&range->list, &client->range_list); up_write(&client->range_lock); @@ -328,12 +349,16 @@ int bao_io_client_range_add(struct bao_io_client* client, u64 start, u64 end) void bao_io_client_range_del(struct bao_io_client* client, u64 start, u64 end) { struct bao_io_range* range; + struct bao_io_range* tmp; + + if (WARN_ON_ONCE(!client)) { + return; + } - // delete the range from the list down_write(&client->range_lock); - list_for_each_entry(range, &client->range_list, list) + list_for_each_entry_safe(range, tmp, &client->range_list, list) { - if (start == range->start && end == range->end) { + if (range->start == start && range->end == end) { list_del(&range->list); kfree(range); break; @@ -343,15 +368,19 @@ void bao_io_client_range_del(struct bao_io_client* client, u64 start, u64 end) } /** - * Check if the I/O request is in the range + * bao_io_request_in_range - Check if the I/O request is in the range * @range: The I/O request range * @req: The I/O request to be checked - * @return True if the I/O request is in the range, False otherwise + * + * Return: True if the I/O request is in the range, false otherwise */ static bool bao_io_request_in_range(struct bao_io_range* range, struct bao_virtio_request* req) { - // check if the I/O request is in the range - if ((req->addr >= range->start) && ((req->addr + req->access_width - 1) <= range->end)) { + if (WARN_ON_ONCE(!range || !req)) { + return false; + } + + if (req->addr >= range->start && (req->addr + req->access_width - 1) <= range->end) { return true; } @@ -360,30 +389,30 @@ static bool bao_io_request_in_range(struct bao_io_range* range, struct bao_virti struct bao_io_client* bao_io_client_find(struct bao_dm* dm, struct bao_virtio_request* req) { - struct bao_io_client *client, *found = NULL; + struct bao_io_client* client; + struct bao_io_client* found = NULL; struct bao_io_range* range; - // for all the I/O clients + if (WARN_ON_ONCE(!dm || !req)) { + return NULL; + } + list_for_each_entry(client, &dm->io_clients, list) { down_read(&client->range_lock); - // for all the ranges list_for_each_entry(range, &client->range_list, list) { - // check if the I/O request is in the range of a given client if (bao_io_request_in_range(range, req)) { found = client; break; } } up_read(&client->range_lock); + if (found) { break; } } - // if the I/O request is not in the range of any client, return the Control - // client otherwise, return the client that the I/O request belongs to (e.g., - // Ioeventfd client) return found ? found : dm->control_client; } diff --git a/iodispatcher/io_dispatcher.c b/iodispatcher/io_dispatcher.c index 68915b9..d1b44b3 100644 --- a/iodispatcher/io_dispatcher.c +++ b/iodispatcher/io_dispatcher.c @@ -5,47 +5,45 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" -#include "hypercall.h" -#include -#include -#include -#include -#include -#include -#include -#include - -// Define a wrapper structure that contains both work_struct and the private -// data (bao_dm) +#include +#include + +/** + * struct bao_io_dispatcher_work - Work item for I/O dispatching + * @work: Work struct for scheduling on workqueue + * @dm: Pointer to the associated Bao device model + * + * Represents a single work item that dispatches I/O requests + * for a specific Bao device model. + */ struct bao_io_dispatcher_work { struct work_struct work; struct bao_dm* dm; }; +/* Array of I/O dispatcher work items, one per Bao DM */ static struct bao_io_dispatcher_work io_dispatcher_work[BAO_IO_MAX_DMS]; -/** - * Responsible for dispatching I/O requests for all I/O DMs - * This function is called by the workqueue - * @work: The work struct - */ -static void io_dispatcher(struct work_struct* work); -// Workqueue for the I/O requests +/* Workqueues dedicated to dispatching I/O requests for each Bao DM */ static struct workqueue_struct* bao_io_dispatcher_wq[BAO_IO_MAX_DMS]; void bao_io_dispatcher_destroy(struct bao_dm* dm) { - // if the workqueue exists + if (WARN_ON_ONCE(!dm)) { + return; + } + if (bao_io_dispatcher_wq[dm->info.id]) { - // pause the I/O Dispatcher bao_io_dispatcher_pause(dm); - // destroy the I/O Dispatcher workqueue + destroy_workqueue(bao_io_dispatcher_wq[dm->info.id]); - // remove the interrupt handler + bao_io_dispatcher_wq[dm->info.id] = NULL; + bao_intc_remove_handler(); } } @@ -53,116 +51,139 @@ void bao_io_dispatcher_destroy(struct bao_dm* dm) int bao_dispatch_io(struct bao_dm* dm) { struct bao_io_client* client; + struct bao_remio_hypercall_ctx ctx; struct bao_virtio_request req; - struct remio_hypercall_ret ret; - - // update the request - // the dm_id is the Virtual Remote I/O ID - req.dm_id = dm->info.id; - // BAO_IO_ASK will extract the I/O request from the Remote I/O system - req.op = BAO_IO_ASK; - // clear the other fields (convention) - req.addr = 0; - req.value = 0; - req.request_id = 0; - - // perform a Hypercall to get the I/O request from the Remote I/O system - // the ret.pending_requests value holds the number of requests that still need - // to be processed - ret = bao_hypercall_remio(&req); - - if (ret.hyp_ret != 0 || ret.remio_hyp_ret != 0) { + + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; + } + + ctx.dm_id = dm->info.id; + ctx.op = BAO_IO_ASK; + ctx.addr = 0; + ctx.value = 0; + ctx.request_id = 0; + ctx.access_width = 0; + ctx.npend_req = 0; + + if (bao_remio_hypercall(&ctx)) { return -EFAULT; } - // find the I/O client that the I/O request belongs to + req.dm_id = ctx.dm_id; + req.op = ctx.op; + req.addr = ctx.addr; + req.value = ctx.value; + req.access_width = ctx.access_width; + req.request_id = ctx.request_id; + down_read(&dm->io_clients_lock); client = bao_io_client_find(dm, &req); if (!client) { up_read(&dm->io_clients_lock); - return -EEXIST; + return -ENODEV; } - // add the request to the end of the virtio_request list - bao_io_client_push_request(client, &req); + if (!bao_io_client_push_request(client, &req)) { + up_read(&dm->io_clients_lock); + return -EINVAL; + } - // wake up the handler thread which is waiting for requests on the wait queue wake_up_interruptible(&client->wq); up_read(&dm->io_clients_lock); - // return the number of request that still need to be processed - return ret.pending_requests; + return ctx.npend_req; } +/** + * io_dispatcher - Workqueue handler for dispatching I/O + * @work: Work struct representing this dispatch operation + * + * Handles all pending I/O requests for the associated Bao DM. + * Executed in process context by the workqueue. + */ static void io_dispatcher(struct work_struct* work) { - struct bao_io_dispatcher_work* bao_dm_work = - container_of(work, struct bao_io_dispatcher_work, work); - struct bao_dm* dm = bao_dm_work->dm; + struct bao_io_dispatcher_work* bao_dm_work; + struct bao_dm* dm; + + if (WARN_ON_ONCE(!work)) { + return; + } + + bao_dm_work = container_of(work, struct bao_io_dispatcher_work, work); + dm = bao_dm_work->dm; - // dispatch the I/O request for the device model - while (bao_dispatch_io(dm) > 0) - ; // while there are requests to be processed + if (WARN_ON_ONCE(!dm)) { + return; + } + + while (bao_dispatch_io(dm) > 0) { + cpu_relax(); + } } /** - * Interrupt Controller handler for the I/O requests - * @note: This function is called by the interrupt controller - * when an interrupt is triggered (when a new I/O request is available) - * @dm: The DM that triggered the interrupt + * io_dispatcher_intc_handler - Interrupt handler for I/O requests + * @dm: Bao device model that triggered the interrupt + * + * Invoked by the interrupt controller when a new I/O request is available. + * Queues the corresponding work item onto the I/O dispatcher workqueue + * for processing in process context. */ static void io_dispatcher_intc_handler(struct bao_dm* dm) { - // add the work to the workqueue + if (WARN_ON_ONCE(!dm || !bao_io_dispatcher_wq[dm->info.id])) { + return; + } + queue_work(bao_io_dispatcher_wq[dm->info.id], &io_dispatcher_work[dm->info.id].work); } void bao_io_dispatcher_pause(struct bao_dm* dm) { - // remove the interrupt handler + if (WARN_ON_ONCE(!dm || !bao_io_dispatcher_wq[dm->info.id])) { + return; + } + bao_intc_remove_handler(); - // drain the workqueue (wait for all the work to finish) + drain_workqueue(bao_io_dispatcher_wq[dm->info.id]); } void bao_io_dispatcher_resume(struct bao_dm* dm) { - // setup the interrupt handler + if (WARN_ON_ONCE(!dm || !bao_io_dispatcher_wq[dm->info.id])) { + return; + } + bao_intc_setup_handler(io_dispatcher_intc_handler); - // add the work to the workqueue + queue_work(bao_io_dispatcher_wq[dm->info.id], &io_dispatcher_work[dm->info.id].work); } int bao_io_dispatcher_init(struct bao_dm* dm) { char name[BAO_NAME_MAX_LEN]; - snprintf(name, BAO_NAME_MAX_LEN, "bao-iodwq%u", dm->info.id); - // Create the I/O Dispatcher workqueue with high priority + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; + } + + snprintf(name, sizeof(name), "bao-iodwq%u", dm->info.id); + + if (bao_io_dispatcher_wq[dm->info.id]) { + return -EBUSY; + } bao_io_dispatcher_wq[dm->info.id] = alloc_workqueue(name, WQ_HIGHPRI | WQ_MEM_RECLAIM, 1); if (!bao_io_dispatcher_wq[dm->info.id]) { return -ENOMEM; } - // Assign the custom data to the work io_dispatcher_work[dm->info.id].dm = dm; - - // Initialize the work_struct INIT_WORK(&io_dispatcher_work[dm->info.id].work, io_dispatcher); - // setup the interrupt handler bao_intc_setup_handler(io_dispatcher_intc_handler); return 0; } - -int bao_io_dispatcher_setup(void) -{ - // Do nothing - return 0; -} - -void bao_io_dispatcher_remove(void) -{ - // Do nothing -} diff --git a/iodispatcher/ioctls.c b/iodispatcher/ioctls.c deleted file mode 100644 index f9413f0..0000000 --- a/iodispatcher/ioctls.c +++ /dev/null @@ -1,117 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Bao Hypervisor IOCTLs Handler for the I/O Dispatcher kernel module - * - * Copyright (c) Bao Project and Contributors. All rights reserved. - * - * Authors: - * João Peixoto - */ - -#include "bao.h" -#include "hypercall.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include - -long bao_io_dispatcher_driver_ioctl(struct file* filp, unsigned int cmd, unsigned long ioctl_param) -{ - int rc = -EINVAL; - struct bao_dm_info* info; - - switch (cmd) { - case BAO_IOCTL_DM_GET_INFO: - info = memdup_user((void __user*)ioctl_param, sizeof(struct bao_dm_info)); - if (IS_ERR(info)) { - pr_err("%s: memdup_user failed\n", __FUNCTION__); - return PTR_ERR(info); - } - rc = bao_dm_get_info(info); - if (!rc) { - pr_err("%s: DM with id [%d] not found\n", __FUNCTION__, info->id); - kfree(info); - return -EINVAL; - } - if (copy_to_user((void __user*)ioctl_param, info, sizeof(struct bao_dm_info))) { - pr_err("%s: copy_to_user failed\n", __FUNCTION__); - kfree(info); - return -EFAULT; - } - break; - default: - pr_err("%s: unknown ioctl cmd [%d]\n", __FUNCTION__, cmd); - return -ENOTTY; - } - return rc; -} - -long bao_dm_ioctl(struct file* filp, unsigned int cmd, unsigned long ioctl_param) -{ - struct bao_virtio_request* req; - struct remio_hypercall_ret hret; - int rc = -EINVAL; - - // get the backend DM pointer from the file pointer private data - struct bao_dm* dm = filp->private_data; - - switch (cmd) { - case BAO_IOCTL_IO_CLIENT_ATTACH: - req = memdup_user((void __user*)ioctl_param, sizeof(struct bao_virtio_request)); - if (IS_ERR(req)) { - pr_err("%s: memdup_user failed\n", __FUNCTION__); - return PTR_ERR(req); - } - if (!dm->control_client) { - pr_err("%s: control client does not exist\n", __FUNCTION__); - return -EINVAL; - } - rc = bao_io_client_attach(dm->control_client); - if (rc == 0) { - rc = bao_io_client_request(dm->control_client, req); - if (copy_to_user((void __user*)ioctl_param, req, - sizeof(struct bao_virtio_request))) { - pr_err("%s: copy_to_user failed\n", __FUNCTION__); - return -EFAULT; - } - } - kfree(req); - break; - case BAO_IOCTL_IO_REQUEST_COMPLETE: - req = memdup_user((void __user*)ioctl_param, sizeof(struct bao_virtio_request)); - if (IS_ERR(req)) { - pr_err("%s: memdup_user failed\n", __FUNCTION__); - return PTR_ERR(req); - } - hret = bao_hypercall_remio(req); - rc = hret.hyp_ret | hret.remio_hyp_ret; - break; - case BAO_IOCTL_IOEVENTFD: - struct bao_ioeventfd ioeventfd; - if (copy_from_user(&ioeventfd, (void __user*)ioctl_param, - sizeof(struct bao_ioeventfd))) { - pr_err("%s: copy_from_user failed\n", __FUNCTION__); - return -EFAULT; - } - rc = bao_ioeventfd_client_config(dm, &ioeventfd); - break; - case BAO_IOCTL_IRQFD: - struct bao_irqfd irqfd; - if (copy_from_user(&irqfd, (void __user*)ioctl_param, sizeof(struct bao_irqfd))) { - pr_err("%s: copy_from_user failed\n", __FUNCTION__); - return -EFAULT; - } - rc = bao_irqfd_server_config(dm, &irqfd); - break; - default: - pr_err("%s: unknown ioctl cmd [%d]\n", __FUNCTION__, cmd); - rc = -ENOTTY; - break; - } - return rc; -} diff --git a/iodispatcher/ioeventfd.c b/iodispatcher/ioeventfd.c index d80732e..481ac8f 100644 --- a/iodispatcher/ioeventfd.c +++ b/iodispatcher/ioeventfd.c @@ -5,20 +5,24 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" +#include #include /** - * Properties of a ioeventfd - * @list: List node of the ioeventfd - * @eventfd: Eventfd of the ioeventfd - * @addr: Address of I/O range - * @data: Data for matching - * @length: Length of I/O range - * @wildcard: Data matching or not + * struct ioeventfd - Properties of an I/O eventfd + * @list: List node linking this ioeventfd + * @eventfd: Associated eventfd context + * @addr: Start address of the I/O range + * @data: Data used for matching (if not wildcard) + * @length: Length of the I/O range + * @wildcard: True if data matching is not required + * + * Represents an I/O eventfd registered for a Bao device model. */ struct ioeventfd { struct list_head list; @@ -30,40 +34,40 @@ struct ioeventfd { }; /** - * Shutdown the ioeventfd - * @dm: The DM that the ioeventfd belongs to - * @p: The ioeventfd to shutdown + * bao_ioeventfd_shutdown - Release and remove an ioeventfd + * @dm: Bao device model owning the ioeventfd + * @p: Ioeventfd to shut down */ static void bao_ioeventfd_shutdown(struct bao_dm* dm, struct ioeventfd* p) { lockdep_assert_held(&dm->ioeventfds_lock); - // unregister the ioeventfd + if (WARN_ON_ONCE(!p)) { + return; + } + eventfd_ctx_put(p->eventfd); - // remove the ioeventfd from the list list_del(&p->list); - // free the ioeventfd kfree(p); } /** - * Check if the configuration of ioeventfd is valid - * @config: The configuration of ioeventfd - * @return: bool + * bao_ioeventfd_config_valid - Validate ioeventfd configuration + * @config: Ioeventfd configuration + * + * Return: True if config is non-NULL, address+length does not wrap, + * and length is 1, 2, 4, or 8 bytes. */ static bool bao_ioeventfd_config_valid(struct bao_ioeventfd* config) { - // check if the configuration is valid - if (!config) { + if (WARN_ON_ONCE(!config)) { return false; } - // check for overflow if (config->addr + config->len < config->addr) { return false; } - // vhost supports 1, 2, 4 and 8 bytes access if (!(config->len == 1 || config->len == 2 || config->len == 4 || config->len == 8)) { return false; } @@ -72,10 +76,12 @@ static bool bao_ioeventfd_config_valid(struct bao_ioeventfd* config) } /** - * Check if the ioeventfd is conflict with other ioeventfds - * @dm: The DM that the ioeventfd belongs to - * @ioeventfd: The ioeventfd to check - * @return: bool + * bao_ioeventfd_is_conflict - Check if an ioeventfd conflicts with existing ones + * @dm: Bao device model + * @ioeventfd: Ioeventfd to check + * + * Return: True if an existing ioeventfd matches address, eventfd, + * and optionally data. */ static bool bao_ioeventfd_is_conflict(struct bao_dm* dm, struct ioeventfd* ioeventfd) { @@ -83,28 +89,40 @@ static bool bao_ioeventfd_is_conflict(struct bao_dm* dm, struct ioeventfd* ioeve lockdep_assert_held(&dm->ioeventfds_lock); - // either one is wildcard, the data matching will be skipped - list_for_each_entry(p, &dm->ioeventfds, list) if (p->eventfd == ioeventfd->eventfd && - p->addr == ioeventfd->addr && - (p->wildcard || ioeventfd->wildcard || p->data == ioeventfd->data)) return true; + if (WARN_ON_ONCE(!dm || !ioeventfd)) { + return true; + } + + list_for_each_entry(p, &dm->ioeventfds, list) + { + if (p->eventfd == ioeventfd->eventfd && p->addr == ioeventfd->addr && + (p->wildcard || ioeventfd->wildcard || p->data == ioeventfd->data)) { + return true; + } + } return false; } /** - * Return the matched ioeventfd - * @dm: The DM to check - * @addr: The address of I/O request - * @data: The data of I/O request - * @len: The length of I/O request - * @return: The matched ioeventfd or NULL + * bao_ioeventfd_match - Find ioeventfd matching an I/O request + * @dm: Bao device model + * @addr: I/O request address + * @data: I/O request data + * @len: I/O request length + * + * Return: The matching ioeventfd, NULL if none matches. */ static struct ioeventfd* bao_ioeventfd_match(struct bao_dm* dm, u64 addr, u64 data, int len) { - struct ioeventfd* p = NULL; + struct ioeventfd* p; lockdep_assert_held(&dm->ioeventfds_lock); + if (WARN_ON_ONCE(!dm)) { + return NULL; + } + list_for_each_entry(p, &dm->ioeventfds, list) { if (p->addr == addr && p->length >= len && (p->wildcard || p->data == data)) { @@ -116,9 +134,17 @@ static struct ioeventfd* bao_ioeventfd_match(struct bao_dm* dm, u64 addr, u64 da } /** - * Assign an eventfd to a DM and create a ioeventfd associated with the eventfd - * @dm: The DM to assign the eventfd to - * @config: The configuration of the eventfd + * bao_ioeventfd_assign - Assign and create an eventfd for a DM + * @dm: Bao device model to assign the eventfd to + * @config: Configuration of the eventfd to create + * + * Creates a new ioeventfd associated with the given eventfd and + * adds it to the Bao DM. Validates the configuration, checks for + * conflicts with existing ioeventfds, and registers the corresponding + * I/O client address range. Supports optional data matching for + * virtio 1.0 notifications; if not set, wildcard matching is used. + * + * Return: 0 on success, a negative error code on failure */ static int bao_ioeventfd_assign(struct bao_dm* dm, struct bao_ioeventfd* config) { @@ -126,126 +152,122 @@ static int bao_ioeventfd_assign(struct bao_dm* dm, struct bao_ioeventfd* config) struct ioeventfd* new; int rc = 0; - // check if the configuration is valid + if (WARN_ON_ONCE(!dm || !config)) { + return -EINVAL; + } + if (!bao_ioeventfd_config_valid(config)) { return -EINVAL; } - // get the eventfd from the file descriptor eventfd = eventfd_ctx_fdget(config->fd); if (IS_ERR(eventfd)) { return PTR_ERR(eventfd); } - // allocate a new ioeventfd object new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) { rc = -ENOMEM; - goto err; + goto err_put_eventfd; } - // initialize the ioeventfd INIT_LIST_HEAD(&new->list); new->addr = config->addr; new->length = config->len; new->eventfd = eventfd; - - /* - * BAO_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the - * writing of notification register of each virtqueue may trigger the - * notification. There is no data matching requirement. - */ - if (config->flags & BAO_IOEVENTFD_FLAG_DATAMATCH) { + new->wildcard = !(config->flags & BAO_IOEVENTFD_FLAG_DATAMATCH); + if (!new->wildcard) { new->data = config->data; - } else { - new->wildcard = true; } mutex_lock(&dm->ioeventfds_lock); - // check if the ioeventfd is conflict with other ioeventfds if (bao_ioeventfd_is_conflict(dm, new)) { rc = -EEXIST; - goto err_unlock; + goto err_unlock_free; } - // register the I/O range monitor into the Ioeventfd client rc = bao_io_client_range_add(dm->ioeventfd_client, new->addr, new->addr + new->length - 1); if (rc < 0) { - goto err_unlock; + goto err_unlock_free; } - // add the ioeventfd to the list list_add_tail(&new->list, &dm->ioeventfds); mutex_unlock(&dm->ioeventfds_lock); - return rc; + return 0; -err_unlock: +err_unlock_free: mutex_unlock(&dm->ioeventfds_lock); kfree(new); -err: +err_put_eventfd: eventfd_ctx_put(eventfd); return rc; } /** - * Deassign an eventfd from a DM and destroy the ioeventfd associated with - * the eventfd. - * @dm: The DM to deassign the eventfd from - * @config: The configuration of the eventfd + * bao_ioeventfd_deassign - Deassign and destroy an eventfd from a DM + * @dm: Bao device model to deassign the eventfd from + * @config: Configuration of the eventfd to remove + * + * Return: 0 on success, a negative error code on failure */ static int bao_ioeventfd_deassign(struct bao_dm* dm, struct bao_ioeventfd* config) { struct ioeventfd* p; struct eventfd_ctx* eventfd; - // get the eventfd from the file descriptor + if (WARN_ON_ONCE(!dm || !config)) { + return -EINVAL; + } + eventfd = eventfd_ctx_fdget(config->fd); if (IS_ERR(eventfd)) { return PTR_ERR(eventfd); } mutex_lock(&dm->ioeventfds_lock); + list_for_each_entry(p, &dm->ioeventfds, list) { if (p->eventfd != eventfd) { continue; } - // delete the I/O range monitor from the Ioeventfd client + bao_io_client_range_del(dm->ioeventfd_client, p->addr, p->addr + p->length - 1); - // shutdown the ioeventfd + bao_ioeventfd_shutdown(dm, p); break; } - mutex_unlock(&dm->ioeventfds_lock); - // unregister the eventfd + mutex_unlock(&dm->ioeventfds_lock); eventfd_ctx_put(eventfd); + return 0; } /** - * Handle the Ioeventfd client I/O request - * This function is called by the I/O client kernel thread - * (bao_io_client_kernel_thread) - * @client: The Ioeventfd client that the I/O request belongs to - * @req: The I/O request to be handled + * bao_ioeventfd_handler - Handle an Ioeventfd client I/O request + * @client: Ioeventfd client associated with the request + * @req: I/O request to process + * + * Processes I/O requests from the Bao I/O client kernel thread + * (bao_io_client_kernel_thread). For READ operations, the value is + * ignored and set to 0 since virtio MMIO drivers only write to the + * `QueueNotify` field. WRITE operations are checked against the + * registered ioeventfds, and the corresponding eventfd is signaled + * if a match is found. + * + * Return: 0 on success, a negative error code on failure */ static int bao_ioeventfd_handler(struct bao_io_client* client, struct bao_virtio_request* req) { struct ioeventfd* p; - /* - * I/O requests are dispatched by range check only, so a - * bao_io_client need process both READ and WRITE accesses - * of same range. READ accesses are safe to be ignored here - * because virtio MMIO drivers only write into the notify - * register (`QueueNotify` field) for notification. - * In fact, the read request won't exist since - * the `QueueNotify` field is WRITE ONLY from the driver - * and read only from the device. - */ + if (WARN_ON_ONCE(!client || !req)) { + return -EINVAL; + } + if (req->op == BAO_IO_READ) { req->value = 0; return 0; @@ -253,13 +275,11 @@ static int bao_ioeventfd_handler(struct bao_io_client* client, struct bao_virtio mutex_lock(&client->dm->ioeventfds_lock); - // find the matched ioeventfd p = bao_ioeventfd_match(client->dm, req->addr, req->value, req->access_width); - - // if matched, signal the eventfd if (p) { eventfd_signal(p->eventfd); } + mutex_unlock(&client->dm->ioeventfds_lock); return 0; @@ -267,17 +287,14 @@ static int bao_ioeventfd_handler(struct bao_io_client* client, struct bao_virtio int bao_ioeventfd_client_config(struct bao_dm* dm, struct bao_ioeventfd* config) { - // check if the DM and configuration are valid - if (WARN_ON(!dm || !config)) { + if (WARN_ON_ONCE(!dm || !config)) { return -EINVAL; } - // deassign the eventfd from the DM if (config->flags & BAO_IOEVENTFD_FLAG_DEASSIGN) { bao_ioeventfd_deassign(dm, config); } - // assign the eventfd to the DM return bao_ioeventfd_assign(dm, config); } @@ -285,16 +302,18 @@ int bao_ioeventfd_client_init(struct bao_dm* dm) { char name[BAO_NAME_MAX_LEN]; + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; + } + mutex_init(&dm->ioeventfds_lock); INIT_LIST_HEAD(&dm->ioeventfds); - // create a new name for the Ioeventfd client based on type and DM ID snprintf(name, sizeof(name), "bao-ioevfdc%u", dm->info.id); - // create a new I/O client (Ioeventfd client) dm->ioeventfd_client = bao_io_client_create(dm, bao_ioeventfd_handler, NULL, false, name); if (!dm->ioeventfd_client) { - return -EINVAL; + return -ENOMEM; } return 0; @@ -302,10 +321,14 @@ int bao_ioeventfd_client_init(struct bao_dm* dm) void bao_ioeventfd_client_destroy(struct bao_dm* dm) { - struct ioeventfd *p, *next; + struct ioeventfd* p; + struct ioeventfd* next; + + if (WARN_ON_ONCE(!dm)) { + return; + } mutex_lock(&dm->ioeventfds_lock); - // shutdown all the ioeventfds list_for_each_entry_safe(p, next, &dm->ioeventfds, list) bao_ioeventfd_shutdown(dm, p); mutex_unlock(&dm->ioeventfds_lock); } diff --git a/iodispatcher/irqfd.c b/iodispatcher/irqfd.c index fd7ceac..041f7de 100644 --- a/iodispatcher/irqfd.c +++ b/iodispatcher/irqfd.c @@ -5,24 +5,27 @@ * Copyright (c) Bao Project and Contributors. All rights reserved. * * Authors: - * João Peixoto + * João Peixoto + * José Martins + * David Cerdeira */ -#include "bao.h" -#include "hypercall.h" +#include +#include #include #include #include -#include /** - * struct irqfd - Properties of irqfd - * @dm: Associated DM pointer - * @wait: Entry of wait-queue - * @shutdown: Async shutdown work - * @eventfd: Associated eventfd to poll - * @list: Entry within &bao_dm.irqfds of irqfds of a DM - * @pt: Structure for select/poll on the associated eventfd + * struct irqfd - Properties of an IRQ eventfd + * @dm: Associated Bao device model + * @wait: Wait queue entry for blocking/waking + * @shutdown: Work struct for async shutdown + * @eventfd: Eventfd used to signal interrupts + * @list: List node within &bao_dm.irqfds + * @pt: Poll table for select/poll on the eventfd + * + * Represents an IRQ eventfd registered to a Bao device model. */ struct irqfd { struct bao_dm* dm; @@ -34,34 +37,37 @@ struct irqfd { }; /** - * Shutdown a irqfd - * @irqfd: The irqfd to shutdown + * bao_irqfd_shutdown - Release and remove an irqfd + * @irqfd: IRQ eventfd to shut down (lock must be held) */ static void bao_irqfd_shutdown(struct irqfd* irqfd) { u64 cnt; + + if (WARN_ON_ONCE(!irqfd || !irqfd->dm)) { + return; + } + lockdep_assert_held(&irqfd->dm->irqfds_lock); - // delete the irqfd from the list of irqfds list_del_init(&irqfd->list); - // remove the irqfd from the wait queue eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); - // release the eventfd eventfd_ctx_put(irqfd->eventfd); - // free the irqfd kfree(irqfd); } /** - * Inject a notify hypercall into the Bao Hypervisor - * @id: The DM ID + * bao_irqfd_inject - Inject a notify hypercall into the Bao hypervisor + * @id: Bao DM ID + * + * Return: 0 on success, -EFAULT if the hypercall fails. */ static int bao_irqfd_inject(int id) { - struct bao_virtio_request request = { + struct bao_remio_hypercall_ctx ctx = { .dm_id = id, .addr = 0, .op = BAO_IO_NOTIFY, @@ -70,46 +76,44 @@ static int bao_irqfd_inject(int id) .request_id = 0, }; - // notify the Hypervisor about the event - struct remio_hypercall_ret ret = bao_hypercall_remio(&request); - - if (ret.hyp_ret != 0 || ret.remio_hyp_ret != 0) { + if (bao_remio_hypercall(&ctx)) { return -EFAULT; } + return 0; } /** - * Custom wake-up handling to be notified whenever underlying eventfd is - * signaled. - * @note: This function will be called by Linux kernel poll table (irqfd->pt) - * whenever the eventfd is signaled. - * @wait: Entry of wait-queue - * @mode: Mode - * @sync: Sync - * @key: Poll bits - * @return int + * bao_irqfd_wakeup - Custom wake-up handler for eventfd signaling + * @wait: Wait queue entry + * @mode: Mode flags + * @sync: Sync indicator + * @key: Poll bits (cast from void *) + * + * Called by the Linux kernel poll table when the underlying eventfd is signaled. + * Injects a Bao notify hypercall on POLLIN or schedules shutdown on POLLHUP. + * + * Return: 0 on success, a negative error code on failure */ static int bao_irqfd_wakeup(wait_queue_entry_t* wait, unsigned int mode, int sync, void* key) { - unsigned long poll_bits = (unsigned long)key; struct irqfd* irqfd; struct bao_dm* dm; + unsigned long poll_bits; - // get the irqfd object from the wait queue - irqfd = container_of(wait, struct irqfd, wait); + if (WARN_ON_ONCE(!wait || !key)) { + return -EINVAL; + } - // get the DM from the irqfd + irqfd = container_of(wait, struct irqfd, wait); dm = irqfd->dm; + poll_bits = (unsigned long)key; - // check if the event is signaled if (poll_bits & POLLIN) { - // an event has been signaled, inject a irqfd bao_irqfd_inject(dm->info.id); } if (poll_bits & POLLHUP) { - // do shutdown work in thread to hold wqh->lock queue_work(dm->irqfd_server, &irqfd->shutdown); } @@ -117,38 +121,47 @@ static int bao_irqfd_wakeup(wait_queue_entry_t* wait, unsigned int mode, int syn } /** - * Register the file descriptor with the poll table and associate it with a wait - * queue that the kernel will monitor for events - * @file: The file to poll - * @wqh: The wait queue head - * @pt: The poll table + * bao_irqfd_poll_func - Register an IRQFD with a poll table + * @file: File to poll + * @wqh: Wait queue head + * @pt: Poll table + * + * Adds the irqfd's wait queue entry to the kernel wait queue for event monitoring. */ static void bao_irqfd_poll_func(struct file* file, wait_queue_head_t* wqh, poll_table* pt) { struct irqfd* irqfd; - // get the irqfd from the file + if (WARN_ON_ONCE(!pt || !wqh)) { + return; + } + irqfd = container_of(pt, struct irqfd, pt); - // add the irqfd wait queue entry to the wait queue add_wait_queue(wqh, &irqfd->wait); } /** - * Shutdown a irqfd - * @work: The work to shutdown the irqfd + * irqfd_shutdown_work - Workqueue handler to shutdown an irqfd + * @work: Work struct for the shutdown operation + * + * Removes and frees the irqfd from the DM under lock if it is still linked. */ static void irqfd_shutdown_work(struct work_struct* work) { struct irqfd* irqfd; struct bao_dm* dm; - // get the irqfd from the work - irqfd = container_of(work, struct irqfd, shutdown); + if (WARN_ON_ONCE(!work)) { + return; + } - // get the DM from the irqfd + irqfd = container_of(work, struct irqfd, shutdown); dm = irqfd->dm; - // shutdown the irqfd + if (WARN_ON_ONCE(!dm)) { + return; + } + mutex_lock(&dm->irqfds_lock); if (!list_empty(&irqfd->list)) { bao_irqfd_shutdown(irqfd); @@ -157,110 +170,101 @@ static void irqfd_shutdown_work(struct work_struct* work) } /** - * Assign an eventfd to a DM and create the associated irqfd. - * @dm: The DM to assign the eventfd - * @args: The &struct bao_irqfd to assign + * bao_irqfd_assign - Assign an eventfd to a DM and create an irqfd + * @dm: Bao device model to assign the eventfd + * @args: Configuration of the irqfd to assign + * + * Return: 0 on success, a negative error code on failure */ static int bao_irqfd_assign(struct bao_dm* dm, struct bao_irqfd* args) { struct eventfd_ctx* eventfd = NULL; - struct irqfd *irqfd, *tmp; + struct irqfd* irqfd; + struct irqfd* tmp; __poll_t events; struct fd f; int ret = 0; - // allocate a new irqfd object + if (WARN_ON_ONCE(!dm || !args)) { + return -EINVAL; + } + irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); if (!irqfd) { return -ENOMEM; } - // initialize the irqfd irqfd->dm = dm; INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->shutdown, irqfd_shutdown_work); - // get a reference to the file descriptor f = fdget(args->fd); if (!fd_file(f)) { ret = -EBADF; - goto out; + goto out_free_irqfd; } - // get the eventfd from the file descriptor eventfd = eventfd_ctx_fileget(fd_file(f)); if (IS_ERR(eventfd)) { ret = PTR_ERR(eventfd); - goto fail; + goto out_fdput; } - - // assign the eventfd to the irqfd irqfd->eventfd = eventfd; - // define the custom callback for the wait queue to be notified whenever - // underlying eventfd is signaled (in this case we don't need to wake-up any - // task, just to be notified when the eventfd is signaled) init_waitqueue_func_entry(&irqfd->wait, bao_irqfd_wakeup); - - // define the custom poll function behavior init_poll_funcptr(&irqfd->pt, bao_irqfd_poll_func); - // add the irqfd to the list of irqfds of the DM mutex_lock(&dm->irqfds_lock); list_for_each_entry(tmp, &dm->irqfds, list) { - if (irqfd->eventfd != tmp->eventfd) { - continue; + if (irqfd->eventfd == tmp->eventfd) { + ret = -EBUSY; + mutex_unlock(&dm->irqfds_lock); + goto out_put_eventfd; } - ret = -EBUSY; - mutex_unlock(&dm->irqfds_lock); - goto fail; } list_add_tail(&irqfd->list, &dm->irqfds); mutex_unlock(&dm->irqfds_lock); - // check the pending event in this stage by calling vfs_poll function - // (this function will internally call the custom poll function already - // defined) any event signaled upon this stage will be handled by the custom - // poll function events = vfs_poll(fd_file(f), &irqfd->pt); - - // if the event is signaled, signal Bao Hypervisor if (events & EPOLLIN) { bao_irqfd_inject(dm->info.id); } - // release the file descriptor reference fdput(f); return 0; -fail: - if (eventfd && !IS_ERR(eventfd)) { - eventfd_ctx_put(eventfd); - } +out_put_eventfd: + eventfd_ctx_put(eventfd); +out_fdput: fdput(f); -out: +out_free_irqfd: kfree(irqfd); return ret; } /** - * Deassign an eventfd from a DM and destroy the associated irqfd. - * @dm: The DM to deassign the eventfd - * @args: The &struct bao_irqfd to deassign + * bao_irqfd_deassign - Deassign an eventfd and destroy the associated irqfd + * @dm: Bao device model to remove the irqfd from + * @args: Configuration of the irqfd to deassign + * + * Return: 0 on success, a negative error code on failure */ static int bao_irqfd_deassign(struct bao_dm* dm, struct bao_irqfd* args) { - struct irqfd *irqfd, *tmp; + struct irqfd* irqfd; + struct irqfd* tmp; struct eventfd_ctx* eventfd; - // get the eventfd from the file descriptor + if (WARN_ON_ONCE(!dm || !args)) { + return -EINVAL; + } + eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) { return PTR_ERR(eventfd); } - // find the irqfd associated with the eventfd and shutdown it mutex_lock(&dm->irqfds_lock); list_for_each_entry_safe(irqfd, tmp, &dm->irqfds, list) { @@ -271,7 +275,6 @@ static int bao_irqfd_deassign(struct bao_dm* dm, struct bao_irqfd* args) } mutex_unlock(&dm->irqfds_lock); - // release the eventfd eventfd_ctx_put(eventfd); return 0; @@ -279,17 +282,14 @@ static int bao_irqfd_deassign(struct bao_dm* dm, struct bao_irqfd* args) int bao_irqfd_server_config(struct bao_dm* dm, struct bao_irqfd* config) { - // check if the DM and configuration are valid - if (WARN_ON(!dm || !config)) { + if (WARN_ON_ONCE(!dm || !config)) { return -EINVAL; } - // deassign the eventfd if (config->flags & BAO_IRQFD_FLAG_DEASSIGN) { return bao_irqfd_deassign(dm, config); } - // assign the eventfd return bao_irqfd_assign(dm, config); } @@ -297,14 +297,16 @@ int bao_irqfd_server_init(struct bao_dm* dm) { char name[BAO_NAME_MAX_LEN]; + if (WARN_ON_ONCE(!dm)) { + return -EINVAL; + } + mutex_init(&dm->irqfds_lock); INIT_LIST_HEAD(&dm->irqfds); - // create a new name for the irqfd server based on type and DM ID snprintf(name, sizeof(name), "bao-ioirqfds%u", dm->info.id); - // allocate a new workqueue for the irqfd - dm->irqfd_server = alloc_workqueue(name, 0, 0); + dm->irqfd_server = alloc_workqueue(name, WQ_UNBOUND | WQ_HIGHPRI, 0); if (!dm->irqfd_server) { return -ENOMEM; } @@ -314,12 +316,18 @@ int bao_irqfd_server_init(struct bao_dm* dm) void bao_irqfd_server_destroy(struct bao_dm* dm) { - struct irqfd *irqfd, *next; + struct irqfd* irqfd; + struct irqfd* next; + + if (WARN_ON_ONCE(!dm)) { + return; + } + + if (dm->irqfd_server) { + destroy_workqueue(dm->irqfd_server); + } - // destroy the workqueue - destroy_workqueue(dm->irqfd_server); mutex_lock(&dm->irqfds_lock); - // shutdown all the irqfds list_for_each_entry_safe(irqfd, next, &dm->irqfds, list) bao_irqfd_shutdown(irqfd); mutex_unlock(&dm->irqfds_lock); } diff --git a/ipc/Makefile b/ipc/Makefile index f520bd1..59359d1 100644 --- a/ipc/Makefile +++ b/ipc/Makefile @@ -1,3 +1,4 @@ # Object files and module definition obj-m += bao-ipc.o -ipc-y := ipcshmem.o +bao-ipc-y := ipcshmem.o +ccflags-y += -I$(PWD)/../include \ No newline at end of file diff --git a/ipc/README.md b/ipc/README.md index 94d66a0..c76dece 100644 --- a/ipc/README.md +++ b/ipc/README.md @@ -16,13 +16,13 @@ export KERN_DIR=path/to/your/linux make ipc ``` -3. Copy the `ipc.ko` file to your target filesystem as `bao_ipc.ko`. +3. Copy the `bao-ipc.ko` file to your target filesystem. ### Run instructions 1. When the Backend VM boots up, insert the kernel module: ``` -insmod bao_ipc.ko +insmod bao-ipc.ko ``` -2. From now on, you should be able to see the `/dev/baoipcX` device node if configured any Bao IPC object. \ No newline at end of file +2. From now on, you should be able to see the `/dev/baoipcX` device node if configured any Bao IPC object. diff --git a/ipc/ipcshmem.c b/ipc/ipcshmem.c index fa50df3..a24eb77 100644 --- a/ipc/ipcshmem.c +++ b/ipc/ipcshmem.c @@ -1,328 +1,256 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 /* - * Bao Hypervisor IPC Through Shared-memory Sample Driver + * Bao Hypervisor IPC Through Shared-memory Driver * * Copyright (c) Bao Project and Contributors. All rights reserved. - * - * Authors: - * David Cerdeira and José Martins */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include -#include -#include -#include -#include -#include -#include +#include +#include #include +#include -#if defined(CONFIG_ARM64) || defined(CONFIG_ARM) -#include -#include -#elif CONFIG_RISCV -#include -#endif - -#define DEV_NAME "baoipc" -#define MAX_DEVICES 16 -#define NAME_LEN 32 - -static dev_t bao_ipcshmem_devt; -struct class* cl; +#define BAO_IPCSHMEM_NAME_LEN 16 struct bao_ipcshmem { - struct cdev cdev; - struct device* dev; - + struct miscdevice miscdev; int id; - char label[NAME_LEN]; + char label[BAO_IPCSHMEM_NAME_LEN]; void* read_base; size_t read_size; void* write_base; size_t write_size; - void* physical_base; + phys_addr_t physical_base; size_t shmem_size; + void* shmem_base_addr; }; -#ifdef CONFIG_ARM64 -static uint64_t bao_ipcshmem_notify(struct bao_ipcshmem* dev) +static int bao_ipcshmem_mmap(struct file* filp, struct vm_area_struct* vma) { - register uint64_t x0 asm("x0") = - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_VENDOR_HYP, 1); - register uint64_t x1 asm("x1") = dev->id; - register uint64_t x2 asm("x2") = 0; + struct bao_ipcshmem* bao = filp->private_data; + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t paddr; - asm volatile("hvc 0\t\n" : "=r"(x0) : "r"(x0), "r"(x1), "r"(x2)); + if (!vsize) { + return -EINVAL; + } - return x0; -} -#elif CONFIG_ARM -static uint32_t bao_ipcshmem_notify(struct bao_ipcshmem* dev) -{ - register uint32_t r0 asm("r0") = - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, ARM_SMCCC_OWNER_VENDOR_HYP, 1); - register uint32_t r1 asm("r1") = dev->id; - register uint32_t r2 asm("r2") = 0; + if (offset >= bao->shmem_size || vsize > bao->shmem_size - offset) { + return -EINVAL; + } - asm volatile("hvc #0\t\n" : "=r"(r0) : "r"(r0), "r"(r1), "r"(r2)); + paddr = bao->physical_base + offset; - return r0; -} -#elif CONFIG_RISCV -static uint64_t bao_ipcshmem_notify(struct bao_ipcshmem* dev) -{ - struct sbiret ret = sbi_ecall(0x08000ba0, 1, dev->id, 0, 0, 0, 0, 0); + if (!PAGE_ALIGNED(paddr)) { + return -EINVAL; + } - return ret.error; + return remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT, vsize, vma->vm_page_prot); } -#endif -static int bao_ipcshmem_mmap_fops(struct file* filp, struct vm_area_struct* vma) +static ssize_t bao_ipcshmem_read(struct file* filp, char __user* buf, size_t count, loff_t* ppos) { struct bao_ipcshmem* bao = filp->private_data; + size_t available; - unsigned long vsize = vma->vm_end - vma->vm_start; - - if (remap_pfn_range(vma, vma->vm_start, (unsigned long)bao->physical_base >> PAGE_SHIFT, vsize, - vma->vm_page_prot)) { - return -EFAULT; + if (*ppos >= bao->read_size) { + return 0; } - return 0; -} - -static ssize_t bao_ipcshmem_read_fops(struct file* filp, char* buf, size_t count, loff_t* ppos) -{ - struct bao_ipcshmem* bao_ipcshmem = filp->private_data; - unsigned long missing = 0; - size_t len = 0; - - len = strnlen(bao_ipcshmem->read_base, bao_ipcshmem->read_size); + available = bao->read_size - *ppos; + count = min(count, available); - if (*ppos >= len) { - return 0; - } - if ((len - *ppos) < count) { - count = len - *ppos; + if (copy_to_user(buf, bao->read_base + *ppos, count)) { + return -EFAULT; } - missing = copy_to_user(buf, bao_ipcshmem->read_base + *ppos, count); - if (missing != 0) { - count = count - missing; - } *ppos += count; - return count; } -static ssize_t bao_ipcshmem_write_fops(struct file* filp, const char* buf, size_t count, +static ssize_t bao_ipcshmem_write(struct file* filp, const char __user* buf, size_t count, loff_t* ppos) { - struct bao_ipcshmem* bao_ipcshmem = filp->private_data; - unsigned long missing = 0; + struct bao_ipcshmem* bao = filp->private_data; + size_t available; - if (*ppos >= bao_ipcshmem->write_size) { + if (*ppos >= bao->write_size) { return 0; } - if (count > bao_ipcshmem->write_size) { - count = bao_ipcshmem->write_size; - } - if ((*ppos + count) > bao_ipcshmem->write_size) { - count = bao_ipcshmem->write_size - *ppos; - } - missing = copy_from_user(bao_ipcshmem->write_base + *ppos, buf, count); - if (missing != 0) { - count = count - missing; + available = bao->write_size - *ppos; + count = min(count, available); + + if (copy_from_user(bao->write_base + *ppos, buf, count)) { + return -EFAULT; } + *ppos += count; - bao_ipcshmem_notify(bao_ipcshmem); + /* Notify Bao hypervisor */ + bao_ipcshmem_hypercall(bao->id); return count; } -static int bao_ipcshmem_open_fops(struct inode* inode, struct file* filp) +static int bao_ipcshmem_open(struct inode* inode, struct file* filp) { - struct bao_ipcshmem* bao_ipcshmem = container_of(inode->i_cdev, struct bao_ipcshmem, cdev); - filp->private_data = bao_ipcshmem; + struct bao_ipcshmem* bao; - kobject_get(&bao_ipcshmem->dev->kobj); + bao = container_of(filp->private_data, struct bao_ipcshmem, miscdev); + filp->private_data = bao; return 0; } -static int bao_ipcshmem_release_fops(struct inode* inode, struct file* filp) +static int bao_ipcshmem_release(struct inode* inode, struct file* filp) { - struct bao_ipcshmem* bao_ipcshmem = container_of(inode->i_cdev, struct bao_ipcshmem, cdev); filp->private_data = NULL; - - kobject_put(&bao_ipcshmem->dev->kobj); - return 0; } -static struct file_operations bao_ipcshmem_fops = { .owner = THIS_MODULE, - .read = bao_ipcshmem_read_fops, - .write = bao_ipcshmem_write_fops, - .mmap = bao_ipcshmem_mmap_fops, - .open = bao_ipcshmem_open_fops, - .release = bao_ipcshmem_release_fops }; +static const struct file_operations bao_ipcshmem_fops = { + .owner = THIS_MODULE, + .read = bao_ipcshmem_read, + .write = bao_ipcshmem_write, + .mmap = bao_ipcshmem_mmap, + .open = bao_ipcshmem_open, + .release = bao_ipcshmem_release, +}; -static int bao_ipcshmem_register(struct platform_device* pdev) +static int bao_ipcshmem_probe(struct platform_device* pdev) { - int ret = 0; - struct device* dev = &(pdev->dev); + struct device* dev = &pdev->dev; struct device_node* np = dev->of_node; - struct module* owner = THIS_MODULE; struct resource* r; - dev_t devt; - resource_size_t shmem_size; - u32 write_offset, read_offset, write_size, read_size; - bool rd_in_range, wr_in_range, disjoint; - void* shmem_base_addr = NULL; - int id = -1; struct bao_ipcshmem* bao; + resource_size_t shmem_size; + u32 write_offset; + u32 read_offset; + u32 write_size; + u32 read_size; + u32 id; + bool rd_in_range; + bool wr_in_range; + bool disjoint; + int ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - return -EINVAL; + if (!r) { + dev_err(dev, "missing shared memory resource\n"); + return -ENODEV; + } + + ret = of_property_read_u32(np, "id", &id); + if (ret) { + dev_err(dev, "missing or invalid 'id' property\n"); + return ret; } - of_property_read_u32_index(np, "read-channel", 0, &read_offset); - of_property_read_u32_index(np, "read-channel", 1, &read_size); - of_property_read_u32_index(np, "write-channel", 0, &write_offset); - of_property_read_u32_index(np, "write-channel", 1, &write_size); - rd_in_range = (r->start + read_offset + read_size) < r->end; - wr_in_range = (r->start + write_offset + write_size) < r->end; + ret = of_property_read_u32_index(np, "read-channel", 0, &read_offset); + if (ret) { + dev_err(dev, "failed to read 'read-channel' offset: %d\n", ret); + return ret; + } + + ret = of_property_read_u32_index(np, "read-channel", 1, &read_size); + if (ret) { + dev_err(dev, "failed to read 'read-channel' size: %d\n", ret); + return ret; + } + + ret = of_property_read_u32_index(np, "write-channel", 0, &write_offset); + if (ret) { + dev_err(dev, "failed to read 'write-channel' offset: %d\n", ret); + return ret; + } + + ret = of_property_read_u32_index(np, "write-channel", 1, &write_size); + if (ret) { + dev_err(dev, "failed to read 'write-channel' size: %d\n", ret); + return ret; + } + + shmem_size = resource_size(r); + + rd_in_range = (read_offset + read_size) <= shmem_size; + wr_in_range = (write_offset + write_size) <= shmem_size; disjoint = ((read_offset + read_size) <= write_offset) || ((write_offset + write_size) <= read_offset); if (!rd_in_range || !wr_in_range || !disjoint) { - dev_err(&pdev->dev, "invalid channel layout\n"); - dev_err(&pdev->dev, "rd_in_range = %d, wr_in_range = %d, disjoint = %d\n", rd_in_range, - wr_in_range, disjoint); + dev_err(dev, "invalid read/write channel ranges\n"); return -EINVAL; } - shmem_size = r->end - r->start + 1; - shmem_base_addr = memremap(r->start, shmem_size, MEMREMAP_WB); - if (shmem_base_addr == NULL) { + bao = devm_kzalloc(dev, sizeof(*bao), GFP_KERNEL); + if (!bao) { return -ENOMEM; } - of_property_read_u32(np, "id", &id); - if (id >= MAX_DEVICES) { - dev_err(&pdev->dev, "invalid id %d\n", id); - ret = -EINVAL; - goto err_unmap; + bao->shmem_base_addr = devm_memremap(dev, r->start, shmem_size, MEMREMAP_WB); + if (!bao->shmem_base_addr) { + dev_err(dev, "failed to remap shared memory\n"); + return -ENOMEM; } - bao = devm_kzalloc(&pdev->dev, sizeof(struct bao_ipcshmem), GFP_KERNEL); - if (bao == NULL) { - ret = -ENOMEM; - goto err_unmap; - } - snprintf(bao->label, NAME_LEN, "%s%d", DEV_NAME, id); bao->id = id; bao->read_size = read_size; bao->write_size = write_size; - bao->read_base = shmem_base_addr + read_offset; - bao->write_base = shmem_base_addr + write_offset; - bao->physical_base = (void*)r->start; + bao->read_base = (u8*)bao->shmem_base_addr + read_offset; + bao->write_base = (u8*)bao->shmem_base_addr + write_offset; + bao->physical_base = r->start; bao->shmem_size = shmem_size; - cdev_init(&bao->cdev, &bao_ipcshmem_fops); - bao->cdev.owner = owner; + scnprintf(bao->label, BAO_IPCSHMEM_NAME_LEN, "baoipc%d", id); + + bao->miscdev.minor = MISC_DYNAMIC_MINOR; + bao->miscdev.name = bao->label; + bao->miscdev.fops = &bao_ipcshmem_fops; + bao->miscdev.parent = dev; - devt = MKDEV(MAJOR(bao_ipcshmem_devt), id); - ret = cdev_add(&bao->cdev, devt, 1); + ret = misc_register(&bao->miscdev); if (ret) { - goto err_unmap; + dev_err(dev, "failed to register misc device: %d\n", ret); + return ret; } - bao->dev = device_create(cl, &pdev->dev, devt, bao, bao->label); - if (IS_ERR(bao->dev)) { - ret = PTR_ERR(bao->dev); - goto err_cdev; - } - dev_set_drvdata(bao->dev, bao); + platform_set_drvdata(pdev, bao); + dev_info(dev, "Bao IPC shared memory device '%s' registered\n", bao->label); return 0; - -err_cdev: - cdev_del(&bao->cdev); -err_unmap: - memunmap(shmem_base_addr); - - dev_err(&pdev->dev, "failed initialization\n"); - return ret; } -static void bao_ipcshmem_unregister(struct platform_device* pdev) +static void bao_ipcshmem_remove(struct platform_device* pdev) { - /* TODO */ - return; + struct bao_ipcshmem* bao = platform_get_drvdata(pdev); + + if (bao) { + misc_deregister(&bao->miscdev); + } } -static const struct of_device_id of_bao_ipcshmem_match[] = { { - .compatible = "bao,ipcshmem", - }, +static const struct of_device_id of_bao_ipcshmem_match[] = { { .compatible = "bao,ipcshmem" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_bao_ipcshmem_match); static struct platform_driver bao_ipcshmem_driver = { - .probe = bao_ipcshmem_register, - .remove = bao_ipcshmem_unregister, - .driver = { - .name = DEV_NAME, - .of_match_table = of_bao_ipcshmem_match, - }, + .probe = bao_ipcshmem_probe, + .remove = bao_ipcshmem_remove, + .driver = { + .name = "baoipc", + .of_match_table = of_bao_ipcshmem_match, + }, }; -static int __init bao_ipcshmem_init(void) -{ - int ret; - - if ((cl = class_create(DEV_NAME)) == NULL) { - ret = -1; - pr_err("unable to class_create " DEV_NAME " device\n"); - return ret; - } - - ret = alloc_chrdev_region(&bao_ipcshmem_devt, 0, MAX_DEVICES, DEV_NAME); - if (ret < 0) { - pr_err("unable to alloc_chrdev_region " DEV_NAME " device\n"); - return ret; - } - - return platform_driver_register(&bao_ipcshmem_driver); -} - -static void __exit bao_ipcshmem_exit(void) -{ - platform_driver_unregister(&bao_ipcshmem_driver); - unregister_chrdev(bao_ipcshmem_devt, DEV_NAME); - class_destroy(cl); -} - -module_init(bao_ipcshmem_init); -module_exit(bao_ipcshmem_exit); +module_platform_driver(bao_ipcshmem_driver); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Cerdeira"); -MODULE_AUTHOR("José Martins"); -MODULE_DESCRIPTION("bao ipc through shared-memory sample driver"); +MODULE_AUTHOR("David Cerdeira "); +MODULE_AUTHOR("José Martins "); +MODULE_AUTHOR("João Peixoto "); +MODULE_DESCRIPTION("Bao Hypervisor IPC Through Shared-memory Driver");