l4re-base-25.08.0

This commit is contained in:
2025-09-12 15:55:45 +02:00
commit d959eaab98
37938 changed files with 9382688 additions and 0 deletions

View File

@@ -0,0 +1,20 @@
PKGDIR ?= ..
L4DIR ?= $(PKGDIR)/../..
EXTRA_TARGET += \
l4virtio \
virtqueue \
client/l4virtio \
client/virtio-block \
client/virtio-net \
server/virtio \
server/l4virtio \
server/virtio-block \
server/virtio-console \
server/virtio-console-device \
server/virtio-gpio-device \
server/virtio-i2c-device \
server/virtio-rng-device \
server/virtio-scmi-device
include $(L4DIR)/mk/include.mk

View File

@@ -0,0 +1,361 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020, 2022, 2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
*
*/
#pragma once
#include <l4/sys/factory>
#include <l4/sys/semaphore>
#include <l4/re/dataspace>
#include <l4/re/env>
#include <l4/re/util/unique_cap>
#include <l4/re/util/object_registry>
#include <l4/re/error_helper>
#include <l4/util/atomic.h>
#include <l4/util/bitops.h>
#include <l4/l4virtio/l4virtio>
#include <l4/l4virtio/virtqueue>
#include <l4/sys/consts.h>
#include <cstring>
namespace L4virtio { namespace Driver {
/**
* \brief Client-side implementation for a general virtio device.
*/
class Device
{
public:
/**
* Contacts the device and starts the initial handshake.
*
* \param srvcap Capability for device communication.
* \param manage_notify Set up a semaphore for notifications from
* the device. See below.
*
* \throws L4::Runtime_error if the initialisation fails
*
* This function contacts the server, sets up the notification
* channels and the configuration dataspace. After this is done,
* the caller can set up any dataspaces it needs. The initialisation
* then needs to be finished by calling driver_acknowledge().
*
* Per default this function creates and registers a semaphore for receiving
* notification from the device. This semaphore is used in the blocking
* functions send_and_wait(), wait() and next_used().
*
* When `manage_notify` is false, then the caller may manually register
* and handle notification interrupts from the device. This is for example
* useful, when the client runs in an application with a server loop.
*/
void driver_connect(L4::Cap<L4virtio::Device> srvcap, bool manage_notify = true)
{
_device = srvcap;
_next_devaddr = L4_SUPERPAGESIZE;
auto *e = L4Re::Env::env();
// Set up the virtio configuration page.
_config_cap = L4Re::chkcap(L4Re::Util::make_unique_cap<L4Re::Dataspace>(),
"Allocate config dataspace capability");
l4_addr_t ds_offset;
L4Re::chksys(_device->device_config(_config_cap.get(), &ds_offset),
"Request virtio config page");
if (ds_offset & ~L4_PAGEMASK)
L4Re::chksys(-L4_EINVAL, "Virtio config page is page aligned.");
L4Re::chksys(e->rm()->attach(&_config, L4_PAGESIZE,
L4Re::Rm::F::Search_addr | L4Re::Rm::F::RW,
L4::Ipc::make_cap_rw(_config_cap.get()), ds_offset,
L4_PAGESHIFT),
"Attach config dataspace");
if (memcmp(&_config->magic, "virt", 4) != 0)
L4Re::chksys(-L4_ENODEV, "Device config has wrong magic value");
if (_config->version != 2)
L4Re::chksys(-L4_ENODEV, "Invalid virtio version, must be 2");
_device->set_status(0); // reset
int status = L4VIRTIO_STATUS_ACKNOWLEDGE;
_device->set_status(status);
status |= L4VIRTIO_STATUS_DRIVER;
_device->set_status(status);
if (_config->fail_state())
L4Re::chksys(-L4_EIO, "Device failure during initialisation.");
// Set up the interrupt used to notify the device about events.
// (only supporting one interrupt with index 0 at the moment)
_host_irq = L4Re::chkcap(L4Re::Util::make_unique_cap<L4::Irq>(),
"Allocate host IRQ capability");
L4Re::chksys(_device->device_notification_irq(0, _host_irq.get()),
"Request device notification interrupt.");
// Set up the interrupt to get notifications from the device.
// (only supporting one interrupt with index 0 at the moment)
if (manage_notify)
{
_driver_notification =
L4Re::chkcap(L4Re::Util::make_unique_cap<L4::Semaphore>(),
"Allocate notification capability");
L4Re::chksys(l4_error(e->factory()->create(_driver_notification.get())),
"Create semaphore for notifications from device");
L4Re::chksys(_device->bind(0, _driver_notification.get()),
"Bind driver notification interrupt");
}
}
/**
* Register a triggerable to receive notifications from the device.
*
* \param index Index of the interrupt.
* \param[out] irq Triggerable to register for notifications.
*/
int bind_notification_irq(unsigned index, L4::Cap<L4::Triggerable> irq) const
{ return l4_error(_device->bind(index, irq)); }
/// Return true if the device is in a fail state.
bool fail_state() const { return _config->fail_state(); }
/**
* Check if a particular feature bit was negotiated with the device.
* The result is only valid after driver_acknowledge() was called
* (when the handshake with the device was completed).
*
* \param feat The feature bit.
*
* \retval true The feature is supported by both driver and device.
* \retval false The feature is not supported by the driver and/or device.
*/
bool feature_negotiated(unsigned int feat) const
{ return l4virtio_get_feature(_config->driver_features_map, feat); }
/**
* Finalize handshake with the device.
*
* Must be called after all queues have been set up and before the first
* request is sent. It is still possible to add more shared dataspaces
* after the handshake has been finished.
*
*/
int driver_acknowledge()
{
if (!l4virtio_get_feature(_config->dev_features_map,
L4VIRTIO_FEATURE_VERSION_1))
L4Re::chksys(-L4_ENODEV,
"Require Virtio 1.0 device; Legacy device not supported.");
_config->driver_features_map[0] &= _config->dev_features_map[0];
_config->driver_features_map[1] &= _config->dev_features_map[1];
_device->set_status(_config->status | L4VIRTIO_STATUS_FEATURES_OK);
if (!(_config->status & L4VIRTIO_STATUS_FEATURES_OK))
L4Re::chksys(-L4_EINVAL, "Negotiation of device features.");
_device->set_status(_config->status | L4VIRTIO_STATUS_DRIVER_OK);
if (_config->fail_state())
return -L4_EIO;
return L4_EOK;
}
/**
* Share a dataspace with the device.
*
* \param ds Dataspace to share with the device.
* \param offset Offset in dataspace where the shared part starts.
* \param size Total size in bytes of the shared space.
* \param devaddr Start of shared space in the device address space.
*
* Although this function allows to share only a part of the given dataspace
* for convenience, the granularity of sharing is always the dataspace level.
* Thus, the remainder of the dataspace is not protected from the device.
*
* When communicating with the device, addresses must be given with respect
* to the device address space. This is not the same as the virtual address
* space of the client in order to not leak information about the address
* space layout.
*/
int register_ds(L4::Cap<L4Re::Dataspace> ds, l4_umword_t offset,
l4_umword_t size, l4_uint64_t *devaddr)
{
*devaddr = next_device_address(size);
return _device->register_ds(L4::Ipc::make_cap_rw(ds), *devaddr, offset, size);
}
/**
* Send the virtqueue configuration to the device.
*
* \param num Number of queue to configure.
* \param size Size of rings in the queue, must be a power of 2)
* \param desc_addr Address of descriptor table (device address)
* \param avail_addr Address of available ring (device address)
* \param used_addr Address of used ring (device address)
*/
int config_queue(int num, unsigned size, l4_uint64_t desc_addr,
l4_uint64_t avail_addr, l4_uint64_t used_addr)
{
auto *queueconf = &_config->queues()[num];
queueconf->num = size;
queueconf->desc_addr = desc_addr;
queueconf->avail_addr = avail_addr;
queueconf->used_addr = used_addr;
queueconf->ready = 1;
return _device->config_queue(num);
}
/**
* Maximum queue size allowed by the device.
*
* \param num Number of queue for which to determine the maximum size.
*/
int max_queue_size(int num) const
{
return _config->queues()[num].num_max;
}
/**
* Send a request to the device and wait for it to be processed.
*
* \param queue Queue that contains the request in its descriptor table
* \param descno Index of first entry in descriptor table where
*
* This function provides a simple mechanism to send requests
* synchronously. It must not be used with other requests at the same
* time as it directly waits for a notification on the device irq cap.
*
* \pre driver_connect() was called with manage_notify.
*/
int send_and_wait(Virtqueue &queue, l4_uint16_t descno)
{
send(queue, descno);
// wait for a reply, we assume that no other
// request will get in the way.
auto head = wait_for_next_used(queue);
if (head < 0)
return head;
return (head == descno) ? L4_EOK : -L4_EINVAL;
}
/**
* Wait for a notification from the device.
*
* \param index Notification slot to wait for.
*
* \pre driver_connect() was called with manage_notify.
*/
int wait(int index) const
{
if (index != 0)
return -L4_EEXIST;
return l4_ipc_error(_driver_notification->down(), l4_utcb());
}
/**
* Wait for the next item to arrive in the used queue and return it.
*
* \param queue A queue.
* \param[out] len (optional) Size of valid data in finished block.
* Note that this is the value reported by the device,
* which may set it to a value that is larger than the
* original buffer size.
* \retval >=0 Descriptor number of item removed from used queue.
* \retval <0 IPC error while waiting for notification.
*
* The call blocks until the next item is available in the used queue.
*
* \pre driver_connect() was called with manage_notify.
*/
int wait_for_next_used(Virtqueue &queue, l4_uint32_t *len = nullptr) const
{
while (true)
{
int err = wait(0);
if (err < 0)
return err;
auto head = queue.find_next_used(len);
if (head != Virtqueue::Eoq) // spurious interrupt?
return head;
}
}
/**
* Send a request to the device.
*
* \param queue Queue that contains the request in its descriptor table
* \param descno Index of first entry in descriptor table where
*/
void send(Virtqueue &queue, l4_uint16_t descno)
{
queue.enqueue_descriptor(descno);
notify(queue);
}
void notify(Virtqueue &queue)
{
if (!queue.no_notify_host())
_host_irq->trigger();
}
private:
/**
* Get the next free address, covering the given area.
*
* \param size Size of requested area.
*
* Builds up a virtual address space for the device.
* Simply give out the memory linearly, it is unlikely that a client
* wants to map more than 4GB and it certainly shouldn't reallocate all the
* time.
*/
l4_uint64_t next_device_address(l4_umword_t size)
{
l4_umword_t ret;
size = l4_round_page(size);
do
{
ret = _next_devaddr;
if (l4_umword_t(~0) - ret < size)
L4Re::chksys(-L4_ENOMEM, "Out of device address space.");
}
while (!l4util_cmpxchg(&_next_devaddr, ret, ret + size));
return ret;
}
protected:
L4::Cap<L4virtio::Device> _device;
L4Re::Rm::Unique_region<L4virtio::Device::Config_hdr *> _config;
l4_umword_t _next_devaddr;
L4Re::Util::Unique_cap<L4::Semaphore> _driver_notification;
private:
L4Re::Util::Unique_cap<L4::Irq> _host_irq;
L4Re::Util::Unique_cap<L4Re::Dataspace> _config_cap;
};
} }

View File

@@ -0,0 +1,389 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2022, 2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
* Manuel von Oltersdorff-Kalettka <manuel.kalettka@kernkonzept.com>
*
*/
#pragma once
#include <l4/sys/factory>
#include <l4/sys/semaphore>
#include <l4/re/dataspace>
#include <l4/re/env>
#include <l4/re/util/unique_cap>
#include <l4/re/util/object_registry>
#include <l4/re/error_helper>
#include <l4/util/atomic.h>
#include <l4/util/bitops.h>
#include <l4/l4virtio/client/l4virtio>
#include <l4/l4virtio/l4virtio>
#include <l4/l4virtio/virtqueue>
#include <l4/l4virtio/virtio_block.h>
#include <l4/sys/consts.h>
#include <cstring>
#include <vector>
#include <functional>
namespace L4virtio { namespace Driver {
/**
* Simple class for accessing a virtio block device synchronously.
*/
class Block_device : public Device
{
public:
typedef std::function<void(unsigned char)> Callback;
private:
enum { Header_size = sizeof(l4virtio_block_header_t) };
struct Request
{
l4_uint16_t tail;
Callback callback;
Request() : tail(Virtqueue::Eoq), callback(0) {}
};
public:
/**
* Handle to an ongoing request.
*/
class Handle
{
friend Block_device;
l4_uint16_t head;
explicit Handle(l4_uint16_t descno) : head(descno) {}
public:
Handle() : head(Virtqueue::Eoq) {}
bool valid() const { return head != Virtqueue::Eoq; }
};
/**
* Establish a connection to the device and set up shared memory.
*
* \param srvcap IPC capability of the channel to the server.
* \param usermem Size of additional memory to share with device.
* \param[out] userdata Pointer to the region of user-usable memory.
* \param[out] user_devaddr Address of user-usable memory in device address
* space.
* \param qds External queue dataspace. If this capability is
* invalid, the function will attempt to allocate a
* dataspace on its own. Note that the external
* queue dataspace must be large enough.
* \param fmask0 Feature bits 0..31 that the driver supports.
* \param fmask1 Feature bits 32..63 that the driver supports.
*
* This function starts a handshake with the device and sets up the
* virtqueues for communication and the additional data structures for
* the block device. It will also allocate and share additional memory
* that the caller then can use freely, i.e. normally this memory would
* be used as a reception buffer. The caller may also decide to not make use
* of this convenience function and request 0 bytes in usermem. Then it has
* to allocate the block buffers for sending/receiving payload manually and
* share them using register_ds().
*/
void setup_device(L4::Cap<L4virtio::Device> srvcap, l4_size_t usermem,
void **userdata, Ptr<void> &user_devaddr,
L4::Cap<L4Re::Dataspace> qds = L4::Cap<L4Re::Dataspace>(),
l4_uint32_t fmask0 = -1U, l4_uint32_t fmask1 = -1U)
{
// Contact device.
driver_connect(srvcap);
if (_config->device != L4VIRTIO_ID_BLOCK)
L4Re::chksys(-L4_ENODEV, "Device is not a block device.");
if (_config->num_queues != 1)
L4Re::chksys(-L4_EINVAL, "Invalid number of queues reported.");
// Memory is shared in one large dataspace which contains queues,
// space for header/status and additional user-defined memory.
unsigned queuesz = max_queue_size(0);
l4_size_t totalsz = l4_round_page(usermem);
l4_uint64_t const header_offset =
l4_round_size(_queue.total_size(queuesz),
l4util_bsr(alignof(l4virtio_block_header_t)));
l4_uint64_t const status_offset = header_offset + queuesz * Header_size;
l4_uint64_t const usermem_offset = l4_round_page(status_offset + queuesz);
// reserve space for one header/status per descriptor
// TODO Should be reduced to 1/3 but this way no freelist is needed.
totalsz += usermem_offset;
auto *e = L4Re::Env::env();
if (!qds.is_valid())
{
_ds = L4Re::chkcap(L4Re::Util::make_unique_cap<L4Re::Dataspace>(),
"Allocate queue dataspace capability");
L4Re::chksys(e->mem_alloc()->alloc(totalsz, _ds.get(),
L4Re::Mem_alloc::Continuous
| L4Re::Mem_alloc::Pinned),
"Allocate memory for virtio structures");
_queue_ds = _ds.get();
}
else
{
if (qds->size() < totalsz)
L4Re::chksys(-L4_EINVAL, "External queue dataspace too small.");
_queue_ds = qds;
}
// Now sort out which region goes where in the dataspace.
L4Re::chksys(e->rm()->attach(&_queue_region, totalsz,
L4Re::Rm::F::Search_addr | L4Re::Rm::F::RW,
L4::Ipc::make_cap_rw(_queue_ds), 0,
L4_PAGESHIFT),
"Attach dataspace for virtio structures");
l4_uint64_t devaddr;
L4Re::chksys(register_ds(_queue_ds, 0, totalsz, &devaddr),
"Register queue dataspace with device");
_queue.init_queue(queuesz, _queue_region.get());
config_queue(0, queuesz, devaddr, devaddr + _queue.avail_offset(),
devaddr + _queue.used_offset());
_header_addr = devaddr + header_offset;
_headers = reinterpret_cast<l4virtio_block_header_t *>(_queue_region.get()
+ header_offset);
_status_addr = devaddr + status_offset;
_status = _queue_region.get() + status_offset;
user_devaddr = Ptr<void>(devaddr + usermem_offset);
if (userdata)
*userdata = _queue_region.get() + usermem_offset;
// setup the callback mechanism
_pending.assign(queuesz, Request());
// Finish handshake with device.
_config->driver_features_map[0] = fmask0;
_config->driver_features_map[1] = fmask1;
driver_acknowledge();
}
/**
* Return a reference to the device configuration.
*/
l4virtio_block_config_t const &device_config() const
{
return *_config->device_config<l4virtio_block_config_t>();
}
/**
* Start the setup of a new request.
*
* \param sector First sector to write to/read from.
* \param type Request type.
* \param callback Function to call, when the request is finished.
* May be 0 for synchronous requests.
*/
Handle start_request(l4_uint64_t sector, l4_uint32_t type,
Callback callback)
{
l4_uint16_t descno = _queue.alloc_descriptor();
if (descno == Virtqueue::Eoq)
return Handle(Virtqueue::Eoq);
L4virtio::Virtqueue::Desc &desc = _queue.desc(descno);
Request &req = _pending[descno];
// setup the header
l4virtio_block_header_t &head = _headers[descno];
head.type = type;
head.ioprio = 0;
head.sector = sector;
// and put it in the descriptor
desc.addr = Ptr<void>(_header_addr + descno * Header_size);
desc.len = Header_size;
desc.flags.raw = 0; // no write, no indirect
req.tail = descno;
req.callback = callback;
return Handle(descno);
}
/**
* Add a data block to a request that has already been set up.
*
* \param handle Handle to request previously set up with start_request().
* \param addr Address of data block in device address space.
* \param size Size of data block.
*
* \retval L4_OK Block was successfully added.
* \retval -L4_EAGAIN No descriptors available. Try again later.
*
*/
int add_block(Handle handle, Ptr<void> addr, l4_uint32_t size)
{
l4_uint16_t descno = _queue.alloc_descriptor();
if (descno == Virtqueue::Eoq)
return -L4_EAGAIN;
Request &req = _pending[handle.head];
L4virtio::Virtqueue::Desc &desc = _queue.desc(descno);
L4virtio::Virtqueue::Desc &prev = _queue.desc(req.tail);
prev.next = descno;
prev.flags.next() = true;
desc.addr = addr;
desc.len = size;
desc.flags.raw = 0;
if (_headers[handle.head].type > 0) // write or flush request
desc.flags.write() = true;
req.tail = descno;
return L4_EOK;
}
/**
* Process request asynchronously.
*
* \param handle Handle to request to send to the device
*
* \retval L4_OK Request was successfully scheduled.
* \retval -L4_EAGAIN No descriptors available. Try again later.
*
* Sends a request to the driver that was previously set up
* with start_request() and add_block() and wait for it to be
* executed.
*/
int send_request(Handle handle)
{
// add the status bit
auto descno = _queue.alloc_descriptor();
if (descno == Virtqueue::Eoq)
return -L4_EAGAIN;
Request &req = _pending[handle.head];
L4virtio::Virtqueue::Desc &desc = _queue.desc(descno);
L4virtio::Virtqueue::Desc &prev = _queue.desc(req.tail);
prev.next = descno;
prev.flags.next() = true;
desc.addr = Ptr<void>(_status_addr + descno);
desc.len = 1;
desc.flags.raw = 0;
desc.flags.write() = true;
req.tail = descno;
send(_queue, handle.head);
return L4_EOK;
}
/**
* Process request synchronously.
*
* \param handle Handle to request to process.
*
* \retval L4_EOK Request processed successfully.
* \retval -L4_EAGAIN No descriptors available. Try again later.
* \retval -L4_EIO IO error during request processing.
* \retval -L4_ENOSYS Unsupported request.
* \retval <0 Another unspecified error occurred.
*
* Sends a request to the driver that was previously set up
* with start_request() and add_block() and wait for it to be
* executed.
*/
int process_request(Handle handle)
{
// add the status bit
auto descno = _queue.alloc_descriptor();
if (descno == Virtqueue::Eoq)
return -L4_EAGAIN;
L4virtio::Virtqueue::Desc &desc = _queue.desc(descno);
L4virtio::Virtqueue::Desc &prev = _queue.desc(_pending[handle.head].tail);
prev.next = descno;
prev.flags.next() = true;
desc.addr = Ptr<void>(_status_addr + descno);
desc.len = 1;
desc.flags.raw = 0;
desc.flags.write() = true;
_pending[handle.head].tail = descno;
int ret = send_and_wait(_queue, handle.head);
unsigned char status = _status[descno];
free_request(handle);
if (ret < 0)
return ret;
switch (status)
{
case L4VIRTIO_BLOCK_S_OK: return L4_EOK;
case L4VIRTIO_BLOCK_S_IOERR: return -L4_EIO;
case L4VIRTIO_BLOCK_S_UNSUPP: return -L4_ENOSYS;
}
return -L4_EINVAL;
}
void free_request(Handle handle)
{
if (handle.head != Virtqueue::Eoq
&& _pending[handle.head].tail != Virtqueue::Eoq)
_queue.free_descriptor(handle.head, _pending[handle.head].tail);
_pending[handle.head].tail = Virtqueue::Eoq;
}
/**
* Process and free all items in the used queue.
*
* If the request has a callback registered it is called after the
* item has been removed from the queue.
*/
void process_used_queue()
{
for (l4_uint16_t descno = _queue.find_next_used();
descno != Virtqueue::Eoq;
descno = _queue.find_next_used()
)
{
if (descno >= _queue.num() || _pending[descno].tail == Virtqueue::Eoq)
L4Re::chksys(-L4_ENOSYS, "Bad descriptor number");
unsigned char status = _status[descno];
free_request(Handle(descno));
if (_pending[descno].callback)
_pending[descno].callback(status);
}
}
protected:
L4Re::Util::Unique_cap<L4Re::Dataspace> _ds;
L4::Cap<L4Re::Dataspace> _queue_ds;
private:
L4Re::Rm::Unique_region<unsigned char *> _queue_region;
l4virtio_block_header_t *_headers;
unsigned char *_status;
l4_uint64_t _header_addr;
l4_uint64_t _status_addr;
Virtqueue _queue;
std::vector<Request> _pending;
};
} }

View File

@@ -0,0 +1,299 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2022, 2024 Kernkonzept GmbH.
* Author(s): Stephan Gerhold <stephan.gerhold@kernkonzept.com>
*/
#pragma once
#include <cstring>
#include <functional>
#include <l4/cxx/exceptions>
#include <l4/cxx/minmax>
#include <l4/re/dataspace>
#include <l4/re/env>
#include <l4/re/error_helper>
#include <l4/re/util/unique_cap>
#include <l4/sys/consts.h>
#include <l4/l4virtio/client/l4virtio>
#include <l4/l4virtio/l4virtio>
#include <l4/l4virtio/virtio_net.h>
#include <l4/l4virtio/virtqueue>
namespace L4virtio { namespace Driver {
/**
* Simple class for accessing a virtio net device.
*/
class Virtio_net_device : public L4virtio::Driver::Device
{
public:
/**
* Structure for a network packet (header including data) with maximum size,
* assuming that no extra features have been negotiated.
*/
struct Packet
{
l4virtio_net_header_t hdr;
l4_uint8_t data[1500 + 14]; /* MTU + Ethernet header */
};
/**
* Return the maximum receive queue size allowed by the device.
* wait_rx() will return a descriptor number that is smaller than this size.
*/
int rx_queue_size() const
{ return max_queue_size(0); }
/**
* Return the maximum transmit queue size allowed by the device.
* tx() will fail if the amount of queued packets exceeds this size.
*/
int tx_queue_size() const
{ return max_queue_size(1); }
/**
* Establish a connection to the device and set up shared memory.
*
* \param srvcap IPC capability of the channel to the server.
*
* This function starts a handshake with the device and sets up the
* virtqueues for communication and the additional data structures for
* the network device.
*/
void setup_device(L4::Cap<L4virtio::Device> srvcap)
{
// Contact device.
driver_connect(srvcap, false);
if (_config->device != L4VIRTIO_ID_NET)
L4Re::chksys(-L4_ENODEV, "Device is not a network device.");
if (_config->num_queues < 2)
L4Re::chksys(-L4_EINVAL, "Invalid number of queues reported.");
auto rxqsz = rx_queue_size();
auto txqsz = tx_queue_size();
// Allocate memory for RX/TX queue and RX/TX packet buffers
auto rxqoff = 0;
auto txqoff = l4_round_size(rxqoff + rxqsz * _rxq.total_size(rxqsz),
L4virtio::Virtqueue::Desc_align);
auto rxpktoff = l4_round_size(txqoff + txqsz * _txq.total_size(txqsz),
L4virtio::Virtqueue::Desc_align);
auto txpktoff = rxpktoff + rxqsz * sizeof(Packet);
auto totalsz = txpktoff + txqsz * sizeof(Packet);
_queue_ds = L4Re::chkcap(L4Re::Util::make_unique_cap<L4Re::Dataspace>(),
"Allocate queue dataspace capability");
auto *e = L4Re::Env::env();
L4Re::chksys(e->mem_alloc()->alloc(totalsz, _queue_ds.get(),
L4Re::Mem_alloc::Continuous
| L4Re::Mem_alloc::Pinned),
"Allocate memory for virtio structures");
L4Re::chksys(e->rm()->attach(&_queue_region, totalsz,
L4Re::Rm::F::Search_addr | L4Re::Rm::F::RW,
L4::Ipc::make_cap_rw(_queue_ds.get()), 0,
L4_PAGESHIFT),
"Attach dataspace for virtio structures");
l4_uint64_t devaddr;
L4Re::chksys(register_ds(_queue_ds.get(), 0, totalsz, &devaddr),
"Register queue dataspace with device");
_rxq.init_queue(rxqsz, _queue_region.get() + rxqoff);
_txq.init_queue(txqsz, _queue_region.get() + txqoff);
config_queue(0, rxqsz, devaddr + rxqoff,
devaddr + rxqoff + _rxq.avail_offset(),
devaddr + rxqoff + _rxq.used_offset());
config_queue(1, txqsz, devaddr + txqoff,
devaddr + txqoff + _txq.avail_offset(),
devaddr + txqoff + _txq.used_offset());
_rxpkts = reinterpret_cast<Packet*>(_queue_region.get() + rxpktoff);
_txpkts = reinterpret_cast<Packet*>(_queue_region.get() + txpktoff);
// Prepare descriptors to save work later
for (l4_uint16_t descno = 0; descno < rxqsz; ++descno)
{
auto &desc = _rxq.desc(descno);
desc.addr = L4virtio::Ptr<void>(devaddr + rxpktoff +
descno * sizeof(Packet));
desc.len = sizeof(Packet);
desc.flags.write() = 1;
}
for (l4_uint16_t descno = 0; descno < txqsz; ++descno)
{
auto &desc = _txq.desc(descno);
desc.addr = L4virtio::Ptr<void>(devaddr + txpktoff +
descno * sizeof(Packet));
desc.len = sizeof(Packet);
}
// Setup notification IRQ
_driver_notification_irq =
L4Re::chkcap(L4Re::Util::make_unique_cap<L4::Irq>(),
"Allocate notification capability");
L4Re::chksys(l4_error(e->factory()->create(_driver_notification_irq.get())),
"Create irq for notifications from device");
L4Re::chksys(_device->bind(0, _driver_notification_irq.get()),
"Bind driver notification interrupt");
// Finish handshake with device
l4virtio_set_feature(_config->driver_features_map,
L4VIRTIO_FEATURE_VERSION_1);
l4virtio_set_feature(_config->driver_features_map, L4VIRTIO_NET_F_MAC);
driver_acknowledge();
}
/**
* Return a reference to the device configuration.
*/
l4virtio_net_config_t const &device_config() const
{
return *_config->device_config<l4virtio_net_config_t>();
}
/**
* Bind the rx notification IRQ to the specified thread.
*
* \param thread Thread to bind the notification IRQ to.
* \param label Label to assign to the IRQ.
*/
int bind_rx_notification_irq(L4::Cap<L4::Thread> thread, l4_umword_t label)
{
return l4_error(_driver_notification_irq->bind_thread(thread, label));
}
/**
* Return a reference to the RX packet buffer of the specified descriptor,
* e.g. from wait_rx().
*
* \param descno Descriptor number in the virtio queue.
*/
Packet &rx_pkt(l4_uint16_t descno)
{
if (descno >= _rxq.num())
throw L4::Bounds_error("Invalid used descriptor number in RX queue");
return _rxpkts[descno];
}
/**
* Block until a network packet has been received from the device and return
* the descriptor number.
*
* \pre The calling thread must be bound to the rx notification IRQ via
* `bind_rx_notification_irq()`.
*
* \param[out] len (optional) Length of valid data in RX packet.
*
* \return Descriptor number of received packet.
*
* The packet data can be obtained with rx_pkt(). finish_rx() should be
* called after the packet buffer can be returned to the RX queue.
*
*/
l4_uint16_t wait_rx(l4_uint32_t *len = nullptr)
{
l4_uint16_t descno;
// Wait until used descriptor becomes available.
for (;;)
{
descno = _rxq.find_next_used(len);
if (descno != Virtqueue::Eoq)
break;
L4Re::chksys(_driver_notification_irq->receive(), "Wait for RX");
}
if (len)
// Ensure that the length provided by the device in wait_for_next_used()
// is not larger than the buffer and subtract the length of the header.
*len = cxx::min(*len - sizeof(_rxpkts[0].hdr), sizeof(_rxpkts[0].data));
return descno;
}
/**
* Free an RX descriptor number to make it available for the RX queue again.
*
* \param descno Descriptor number in the virtio queue.
*
* Usually queue_rx() should be called afterwards to queue the freed
* descriptor(s).
*/
void finish_rx(l4_uint16_t descno)
{
_rxq.free_descriptor(descno, descno);
}
/**
* Queue new available descriptors in the RX queue.
*/
void queue_rx()
{
l4_uint16_t descno;
while ((descno = _rxq.alloc_descriptor()) != Virtqueue::Eoq)
_rxq.enqueue_descriptor(descno);
notify(_rxq);
}
/**
* Attempt to allocate a descriptor in the TX queue and transmit the packet,
* after calling the prepare callback.
*
* \param prepare Function that fills the packet with data, should return
* the length of the data copied to the packet.
*
* \retval true The packet was queued.
* \retval false TX queue is full.
*
* The prepare callback should fill the packet with data and return the
* length of the packet data (without the size of the virtio-net packet
* header).
*/
bool tx(std::function<l4_uint32_t(Packet&)> prepare)
{
auto descno = _txq.alloc_descriptor();
if (descno == Virtqueue::Eoq)
{
// Try again after cleaning old descriptors that have already been used
free_used_tx_descriptors();
descno = _txq.alloc_descriptor();
if (descno == Virtqueue::Eoq)
return false;
}
auto &pkt = _txpkts[descno];
auto &desc = _txq.desc(descno);
desc.len = sizeof(pkt.hdr) + prepare(pkt);
send(_txq, descno);
return true;
}
private:
void free_used_tx_descriptors()
{
l4_uint16_t used;
while ((used = _txq.find_next_used()) != Virtqueue::Eoq)
{
if (used >= _txq.num())
throw L4::Bounds_error("Invalid used descriptor number in TX queue");
_txq.free_descriptor(used, used);
}
}
private:
L4Re::Util::Unique_cap<L4Re::Dataspace> _queue_ds;
L4Re::Rm::Unique_region<l4_uint8_t *> _queue_region;
L4Re::Util::Unique_cap<L4::Irq> _driver_notification_irq;
L4virtio::Driver::Virtqueue _rxq, _txq;
Packet *_rxpkts, *_txpkts;
};
} }

View File

@@ -0,0 +1,225 @@
// vi:set ft=cpp: -*- Mode: C++ -*-
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2013-2024 Kernkonzept GmbH.
* Author(s): Alexander Warg <alexander.warg@kernkonzept.com>
* Matthias Lange <matthias.lange@kernkonzept.com>
*
*/
#pragma once
#include "virtio.h"
#include <l4/sys/capability>
#include <l4/sys/cxx/ipc_client>
#include <l4/re/dataspace>
#include <l4/sys/irq>
#include <l4/cxx/utils>
namespace L4virtio {
/**
* IPC interface for virtio over L4 IPC.
*
* The L4virtio protocol is an adaption of the mmio virtio transport 1.0(4).
* This interface allows to exchange the necessary resources: device
* configuration page, notification interrupts and dataspaces for payload.
*
* Notification interrupts can be configured independently for changes to
* the configuration space and each queue through special L4virtio-specific
* notify_index fields in the config page and queue configuration. The
* interface distinguishes between device-to-driver and driver-to-device
* notification interrupts.
*
* Device-to-driver interrupts are configured via the ICU interface. The
* device announces the maximum number of supported interrupts via Icu::info().
* The driver can then bind interrupts using Icu::bind().
*
* Driver-to-device interrupts must be requested from the device through
* device_notification_irq().
*/
class Device :
public L4::Kobject_t<Device, L4::Icu, L4VIRTIO_PROTOCOL,
L4::Type_info::Demand_t<1> >
{
public:
typedef l4virtio_config_queue_t Config_queue;
struct Config_hdr : l4virtio_config_hdr_t
{
Config_queue *queues() const
{ return l4virtio_config_queues(this); }
template <typename T>
T *device_config() const
{
return static_cast<T*>(l4virtio_device_config(this));
}
int config_queue(unsigned num, L4::Cap<L4::Triggerable> out_notify,
L4::Cap<L4::Triggerable> in_notify,
l4_timeout_s to = L4_IPC_TIMEOUT_NEVER)
{
return send_cmd(L4VIRTIO_CMD_CFG_QUEUE | num,
out_notify, in_notify, to);
}
int notify_queue(unsigned num, L4::Cap<L4::Triggerable> out_notify,
L4::Cap<L4::Triggerable> in_notify,
l4_timeout_s to = L4_IPC_TIMEOUT_NEVER)
{
return send_cmd(L4VIRTIO_CMD_NOTIFY_QUEUE | num,
out_notify, in_notify, to);
}
/**
* Check the VIRTIO status register device and driver failure bits.
*
* \return true if either FAILED or DEVICE_NEEDS_RESET is set.
*
* Called by driver-side code to read the shared memory status word and
* check the fail state of both the device and driver.
*/
bool fail_state() const
{
auto cfg_status = cxx::access_once(&status);
return cfg_status
& (L4VIRTIO_STATUS_FAILED | L4VIRTIO_STATUS_DEVICE_NEEDS_RESET);
}
int set_status(unsigned new_status, L4::Cap<L4::Triggerable> out_notify,
L4::Cap<L4::Triggerable> in_notify,
l4_timeout_s to = L4_IPC_TIMEOUT_NEVER)
{
return send_cmd(L4VIRTIO_CMD_SET_STATUS | new_status,
out_notify, in_notify, to);
}
int cfg_changed(unsigned reg, L4::Cap<L4::Triggerable> out_notify,
L4::Cap<L4::Triggerable> in_notify,
l4_timeout_s to = L4_IPC_TIMEOUT_NEVER)
{
return send_cmd(L4VIRTIO_CMD_CFG_CHANGED | reg,
out_notify, in_notify, to);
}
int send_cmd(unsigned command, L4::Cap<L4::Triggerable> out_notify,
L4::Cap<L4::Triggerable> in_notify,
l4_timeout_s to = L4_IPC_TIMEOUT_NEVER)
{
cxx::write_now(&cmd, command);
if (out_notify)
out_notify->trigger();
auto utcb = l4_utcb();
auto ipc_to = l4_timeout(L4_IPC_TIMEOUT_0, to);
do
{
if (in_notify)
if (l4_ipc_error(l4_ipc_receive(in_notify.cap(), utcb, ipc_to),
utcb) == L4_IPC_RETIMEOUT)
break;
}
while (cxx::access_once(&cmd));
return cxx::access_once(&cmd) ? -L4_EBUSY : L4_EOK;
}
};
/**
* Write the VIRTIO status register.
*
* \param status Status word to write to the VIRTIO status.
*
* \retval 0 on success.
*
* \note All other registers are accessed via shared memory.
*/
L4_INLINE_RPC_OP(L4VIRTIO_OP_SET_STATUS, long,
set_status, (unsigned status));
/**
* Trigger queue configuration of the given queue.
*
* Usually all queues are configured when the status is written to running.
* However, in some cases queues shall be disabled or enabled dynamically, in
* this case this function triggers a reconfiguration from the shared memory
* register of the queue config.
*
* \param queue Queue index for the queue to be configured.
*
* \retval 0 on success.
* \retval -L4_EIO The queue's status is invalid.
* \retval -L4_ERANGE The queue index exceeds the number of queues.
* \retval -L4_EINVAL Otherwise.
*/
L4_INLINE_RPC_OP(L4VIRTIO_OP_CONFIG_QUEUE, long,
config_queue, (unsigned queue));
/**
* Register a shared data space with VIRTIO host
*
* \param ds_cap Dataspace capability to register. The lower 8 bits determine
* the rights mask with which the guest's rights are masked during
* the registration of the dataspace at the VIRTIO host.
* \param base VIRTIO guest physical start address of shared memory region
* \param offset Offset within the data space that is attached to the
* given `base` in the guest physical memory.
* \param size Size of the memory region in the guest
*
* \retval L4_EOK Operation successful.
* \retval -L4_EINVAL The `ds_cap` capability is invalid, does not refer to
* a valid dataspace, is not a trusted dataspace if
* trusted dataspace validation is enabled, or `size` and
* `offset` specify an invalid region.
* \retval -L4_ENOMEM The limit of dataspaces that can be registered has been
* reached or no capability slot could be allocated.
* \retval -L4_ERANGE `offset` is lager than the size of the dataspace.
* \retval <0 Any error returned by the dataspace when queried for
* information during setup or any error returned by the
* region manager from attaching the dataspace.
*/
L4_INLINE_RPC_OP(L4VIRTIO_OP_REGISTER_DS, long,
register_ds, (L4::Ipc::Cap<L4Re::Dataspace> ds_cap,
l4_uint64_t base, l4_umword_t offset,
l4_umword_t size));
/**
* Get the dataspace with the L4virtio configuration page.
*
* \param config_ds Capability for receiving the dataspace capability for
* the shared L4-VIRTIO config data space.
* \param ds_offset Offset into the dataspace where the device configuration
* structure starts.
*/
L4_INLINE_RPC_OP(L4VIRTIO_OP_DEVICE_CONFIG, long, device_config,
(L4::Ipc::Out<L4::Cap<L4Re::Dataspace> > config_ds,
l4_addr_t *ds_offset));
/**
* Get the notification interrupt corresponding to the given index.
*
* \param index Index of the interrupt.
* \param[out] irq Triggerable for the given index.
*
* \retval L4_EOK Success.
* \retval L4_ENOSYS IRQ notification not supported by device.
* \retval <0 Other error.
*
* An index is only guaranteed to return an IRQ object when the index is
* set in one of the device notify index fields. The device must return
* the same interrupt for a given index as long as the index is in use.
* If an index disappears as a result of a configuration change and then is
* reused later, the interrupt is not guaranteed to be the same.
*
* Interrupts must always be rerequested after a device reset.
*/
L4_INLINE_RPC_OP(L4VIRTIO_OP_GET_DEVICE_IRQ, long, device_notification_irq,
(unsigned index, L4::Ipc::Out<L4::Cap<L4::Triggerable> > irq));
typedef L4::Typeid::Rpcs<set_status_t, config_queue_t, register_ds_t,
device_config_t, device_notification_irq_t>
Rpcs;
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,591 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2014-2020, 2023-2024 Kernkonzept GmbH.
* Author(s): Alexander Warg <alexander.warg@kernkonzept.com>
*
*/
#pragma once
#include <l4/sys/types.h>
#include <l4/cxx/bitfield>
#include <l4/cxx/minmax>
#include <l4/cxx/utils>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include "../virtqueue"
/**
* \ingroup l4virtio_transport
*
* L4-VIRTIO Transport C++ API
*/
namespace L4virtio {
namespace Svr {
/**
* \brief Type of the device status register.
*/
struct Dev_status
{
unsigned char raw; ///< Raw value of the VIRTIO device status register.
Dev_status() = default;
/// Make Status from raw value.
explicit Dev_status(l4_uint32_t v) : raw(v) {}
CXX_BITFIELD_MEMBER(0, 0, acked, raw);
CXX_BITFIELD_MEMBER(1, 1, driver, raw);
CXX_BITFIELD_MEMBER(2, 2, driver_ok, raw);
CXX_BITFIELD_MEMBER(3, 3, features_ok, raw);
CXX_BITFIELD_MEMBER(6, 7, fail_state, raw);
CXX_BITFIELD_MEMBER(6, 6, device_needs_reset, raw);
CXX_BITFIELD_MEMBER(7, 7, failed, raw);
/**
* Check if the device is in running state.
*
* \return true if the device is in running state.
*
* The device is in running state when acked(), driver(), features_ok(), and
* driver_ok() return true, and device_needs_reset() and failed() return
* false.
*/
bool running() const
{
return (raw == 0xf);
}
};
/**
* \brief Type for device feature bitmap
*/
struct Dev_features
{
l4_uint32_t raw; ///< The raw value of the features bitmap
Dev_features() = default;
/// Make Features from a raw bitmap.
explicit Dev_features(l4_uint32_t v) : raw(v) {}
CXX_BITFIELD_MEMBER(28, 28, ring_indirect_desc, raw);
CXX_BITFIELD_MEMBER(29, 29, ring_event_idx, raw);
};
/**
* Virtqueue implementation for the device
*
* This class represents a single virtqueue, with a local running available
* index.
*
* \note The Virtqueue implementation is not thread-safe.
*/
class Virtqueue : public L4virtio::Virtqueue
{
public:
/**
* VIRTIO request, essentially a descriptor from the available ring.
*/
class Head_desc
{
friend class Virtqueue;
private:
Virtqueue::Desc const *_d;
Head_desc(Virtqueue *r, unsigned i) : _d(r->desc(i)) {}
public:
/// Make invalid (NULL) request.
Head_desc() : _d(0) {}
/// \return True if the request is valid (not NULL).
bool valid() const { return _d; }
/// \return True if the request is valid (not NULL).
explicit operator bool () const
{ return valid(); }
/// \return Pointer to the head descriptor of the request.
Desc const *desc() const
{ return _d; }
};
struct Request : Head_desc
{
Virtqueue *ring = nullptr;
Request() = default;
private:
friend class Virtqueue;
Request(Virtqueue *r, unsigned i) : Head_desc(r, i), ring(r) {}
};
/**
* Get the next available descriptor from the available ring.
*
* \pre The queue must be in working state.
* \return A Request for the next available descriptor, the Request is invalid
* if there are no descriptors in the available ring.
* \note The return value must be checked even when a previous desc_avail()
* returned true.
*
*/
Request next_avail()
{
if (L4_LIKELY(_current_avail != _avail->idx))
{
rmb();
unsigned head = _current_avail & _idx_mask;
++_current_avail;
return Request(this, _avail->ring[head]);
}
return Request();
}
/**
* Return unfinished descriptors to the available ring, i.e. reset the local
* next index of the available ring to the given descriptor.
*
* \param d descriptor of the request that is to be marked as finished.
*
* \pre queue must be in working state.
*
* \pre `d` must be a valid request from this queue, obtained via
* next_avail(), that has not yet been finished, and in addition, no
* descriptors following it have been finished.
*/
void rewind_avail(Head_desc const &d)
{
unsigned head_idx = d._d - _desc;
// Calculate the distance between _current_avail and head_idx, taking into
// account that _current_avail might have wrapped around with respect to
// _idx_mask in the meantime.
_current_avail -= (_current_avail - head_idx) & _idx_mask;
}
/**
* Test for available descriptors.
*
* \return true if there are descriptors available, false if not.
* \pre The queue must be in working state.
*/
bool desc_avail() const
{
return _current_avail != _avail->idx;
}
/**
* Put the given descriptor into the used ring.
*
* \param r Request that shall be marked as finished.
* \param len The total number of bytes written.
*
* \pre queue must be in working state.
*
* \pre `r` must be a valid request from this queue.
*/
void consumed(Head_desc const &r, l4_uint32_t len = 0)
{
l4_uint16_t i = _used->idx & _idx_mask;
_used->ring[i] = Used_elem(r._d - _desc, len);
wmb();
++_used->idx;
}
/**
* Put multiple descriptors into the used ring.
*
* A range of descriptors, specified by `begin` and `end` iterators is
* added. Each iterator points to a struct that has a `first` member that
* is a `Head_desc` and a `second` member that is the corresponding number
* of bytes written.
*
* \tparam ITER The type of the iterator (inferred).
* \param begin Iterator pointing to first new descriptor.
* \param end Iterator pointing to one past last entry.
*
* \pre queue must be in working state.
*/
template<typename ITER>
void consumed(ITER const &begin, ITER const &end)
{
l4_uint16_t added = 0;
l4_uint16_t idx = _used->idx;
for (auto elem = begin ; elem != end; ++elem, ++added)
_used->ring[(idx + added) & _idx_mask]
= Used_elem(elem->first._d - _desc, elem->second);
wmb();
_used->idx += added;
}
/**
* Add a descriptor to the used ring, and notify an observer.
*
* \tparam QUEUE_OBSERVER The type of the observer (inferred).
* \param d descriptor of the request that is to be marked as
* finished.
* \param o Pointer to the observer that is notified.
* \param len Number of bytes written for this request.
*
* \pre queue must be in working state.
*
* \pre `d` must be a valid request from this queue.
*/
template<typename QUEUE_OBSERVER>
void finish(Head_desc &d, QUEUE_OBSERVER *o, l4_uint32_t len = 0)
{
consumed(d, len);
o->notify_queue(this);
d._d = 0;
}
/**
* Add a range of descriptors to the used ring, and notify an observer once.
*
* The iterators are passed to consumed<ITER>(ITER const &, ITER const &),
* and the requirements detailed there apply.
*
* \tparam ITER type of the iterator (inferred)
* \tparam QUEUE_OBSERVER the type of the observer (inferred).
* \param begin iterator pointing to first element.
* \param end iterator pointing to one past last element.
* \param o pointer to the observer that is notified.
*
* \pre queue must be in working state.
*/
template<typename ITER, typename QUEUE_OBSERVER>
void finish(ITER const &begin, ITER const &end, QUEUE_OBSERVER *o)
{
consumed(begin, end);
o->notify_queue(this);
}
/**
* Set the 'no notify' flag for this queue.
*
* This function may be called on a disabled queue.
*/
void disable_notify()
{
if (L4_LIKELY(ready()))
_used->flags.no_notify() = 1;
}
/**
* Clear the 'no notify' flag for this queue.
*
* This function may be called on a disabled queue.
*/
void enable_notify()
{
if (L4_LIKELY(ready()))
_used->flags.no_notify() = 0;
}
/**
* Get a descriptor from the descriptor list.
*
* \param idx The index of the descriptor.
*
* \pre `idx` < `num`
* \pre queue must be in working state
*/
Desc const *desc(unsigned idx) const
{ return _desc + idx; }
};
/**
* \brief Abstract data buffer.
*/
struct Data_buffer
{
char *pos; ///< Current buffer position
l4_uint32_t left; ///< Bytes left in buffer
Data_buffer() = default;
/**
* \brief Create buffer for object `p`.
*
* \tparam T Type of object (implicit)
* \param p Pointer to object.
*
* The buffer shall point to the start of the object `p` and the size left
* is sizeof(T).
*/
template<typename T>
explicit Data_buffer(T *p)
: pos(reinterpret_cast<char *>(p)), left(sizeof(T))
{}
/**
* Set buffer for object `p`.
*
* \tparam T Type of object (implicit)
* \param p Pointer to object.
*
* The buffer shall point to the start of the object `p` and the size left
* is sizeof(T).
*/
template<typename T>
void set(T *p)
{
pos = reinterpret_cast<char *>(p);
left = sizeof(T);
}
/**
* Copy contents from this buffer to the destination buffer.
*
* \param dst Destination buffer.
* \param max (optional) Maximum number of bytes to copy.
* \return the number of bytes copied.
*
* This function copies at most `max` bytes from this to `dst`. If
* `max` is omitted, copies the maximum number of bytes available
* that fit `dst`.
*/
l4_uint32_t copy_to(Data_buffer *dst, l4_uint32_t max = UINT_MAX)
{
unsigned long bytes = cxx::min(cxx::min(left, dst->left), max);
memcpy(dst->pos, pos, bytes);
left -= bytes;
pos += bytes;
dst->left -= bytes;
dst->pos += bytes;
return bytes;
}
/**
* Skip given number of bytes in this buffer.
*
* \param bytes Number of bytes that shall be skipped.
* \return The number of bytes skipped.
*
* Try to skip the given number of bytes in this buffer, if there are less
* bytes left in the buffer that given then at most left bytes are skipped
* and the amount is returned.
*/
l4_uint32_t skip(l4_uint32_t bytes)
{
unsigned long b = cxx::min(left, bytes);
left -= b;
pos += b;
return b;
}
/**
* Check if there are no more bytes left in the buffer.
*
* \return true if there are no more bytes left in the buffer.
*/
bool done() const
{ return left == 0; }
};
class Request_processor;
/**
* Exception used by Queue to indicate descriptor errors.
*/
struct Bad_descriptor
{
/// The error code
enum Error
{
Bad_address, ///< Address cannot be translated
Bad_rights, ///< Missing access rights on memory
Bad_flags, ///< Invalid combination of descriptor flags
Bad_next, ///< Invalid next index
Bad_size ///< Invalid size of memory block
};
/// The processor that triggered the exception
Request_processor const *proc;
// The error code
Error error;
/**
* Make a bad descriptor exception.
*
* \param proc The request processor causing the exception
* \param e The error code.
*/
Bad_descriptor(Request_processor const *proc, Error e)
: proc(proc), error(e)
{}
/**
* Get a human readable description of the error code.
*
* \return Message describing the error.
*/
char const *message() const
{
static char const *const err[] =
{
[Bad_address] = "Descriptor address cannot be translated",
[Bad_rights] = "Insufficient memory access rights",
[Bad_flags] = "Invalid descriptor flags",
[Bad_next] = "The descriptor's `next` index is invalid",
[Bad_size] = "Invalid size of the memory block"
};
if (error >= (sizeof(err) / sizeof(err[0])) || !err[error])
return "Unknown error";
return err[error];
}
};
/**
* Encapsulate the state for processing a VIRTIO request.
*
* A VIRTIO request is a possibly chained list of descriptors retrieved from
* the available ring of a virtqueue, using Virtqueue::next_avail().
*
* The descriptor processing depends on helper (DESC_MAN) for interpreting the
* descriptors in the context of the device implementation.
*
* DESC_MAN has to provide the functionality to safely dereference a
* descriptor from a descriptor list.
*
* The following methods must be provided by DESC_MAN:
* * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
* Request_processor const *proc,
* Virtqueue::Desc const **table) \endcode
* This function is used to dereference `desc` as an indirect descriptor
* table, and must return a pointer to an indirect descriptor table.
* * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
* Request_processor const *proc, ...) \endcode
* This function is used to dereference a descriptor as a normal data
* buffer, and '...' are the arguments that are passed to start() and next().
*/
class Request_processor
{
private:
/// pointer to descriptor table (may point to an indirect table)
Virtqueue::Desc const *_table;
/// currently processed descriptor
Virtqueue::Desc _current;
/// number of entries in the current descriptor table (_table)
l4_uint16_t _num;
public:
/**
* Start processing a new request.
*
* \tparam DESC_MAN Type of descriptor manager (implicit).
* \param dm Descriptor manager that is used to translate VIRTIO
* descriptor addresses.
* \param ring VIRTIO ring of the request.
* \param request VIRTIO request from Virtqueue::next_avail()
* \param args Extra arguments passed to dm->load_desc()
*
* \pre The given request must be valid.
*
* \throws Bad_descriptor The descriptor has an invalid size or load_desc()
* has thrown an exception by itself.
*/
template<typename DESC_MAN, typename ...ARGS>
void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
{
_current = cxx::access_once(request.desc());
if (_current.flags.indirect())
{
dm->load_desc(_current, this, &_table);
_num = _current.len / sizeof(Virtqueue::Desc);
if (L4_UNLIKELY(!_num))
throw Bad_descriptor(this, Bad_descriptor::Bad_size);
_current = cxx::access_once(_table);
}
else
{
_table = ring->desc(0);
_num = ring->num();
}
dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
}
/**
* Start processing a new request.
*
* \tparam DESC_MAN Type of descriptor manager (implicit).
* \param dm Descriptor manager that is used to translate VIRTIO
* descriptor addresses.
* \param request VIRTIO request from Virtqueue::next_avail()
* \param args Extra arguments passed to dm->load_desc()
* \pre The given request must be valid.
*/
template<typename DESC_MAN, typename ...ARGS>
Virtqueue::Request const &start(DESC_MAN *dm, Virtqueue::Request const &request, ARGS... args)
{
start(dm, request.ring, request, cxx::forward<ARGS>(args)...);
return request;
}
/**
* Get the flags of the currently processed descriptor.
*
* \return The flags of the currently processed descriptor.
*/
Virtqueue::Desc::Flags current_flags() const
{ return _current.flags; }
/**
* Are there more chained descriptors?
*
* \return true if there are more chained descriptors in the current request.
*/
bool has_more() const
{ return _current.flags.next(); }
/**
* Switch to the next descriptor in a descriptor chain.
*
* \tparam DESC_MAN Type of descriptor manager (implicit).
* \param dm Descriptor manager that is used to translate VIRTIO
* descriptor addresses.
* \param args Extra arguments passed to dm->load_desc()
*
* \retval true A next descriptor is available.
* \retval false No descriptor available.
*
* \throws Bad_descriptor The `next` index of this descriptor is invalid.
*/
template<typename DESC_MAN, typename ...ARGS>
bool next(DESC_MAN *dm, ARGS... args)
{
if (!_current.flags.next())
return false;
if (L4_UNLIKELY(_current.next >= _num))
throw Bad_descriptor(this, Bad_descriptor::Bad_next);
_current = cxx::access_once(_table + _current.next);
if (0) // we ignore this for performance reasons
if (L4_UNLIKELY(_current.flags.indirect()))
throw Bad_descriptor(this, Bad_descriptor::Bad_flags);
// must throw an exception in case of a bad descriptor
dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
return true;
}
};
}
}

View File

@@ -0,0 +1,693 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017-2021, 2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
*
*/
#pragma once
#include <l4/cxx/unique_ptr>
#include <l4/re/util/unique_cap>
#include <climits>
#include <l4/l4virtio/virtio.h>
#include <l4/l4virtio/virtio_block.h>
#include <l4/l4virtio/server/l4virtio>
#include <l4/sys/cxx/ipc_epiface>
namespace L4virtio { namespace Svr {
template <typename Ds_data> class Block_dev_base;
/**
* A request to read or write data.
*/
template<typename Ds_data>
class Block_request
{
friend class Block_dev_base<Ds_data>;
enum { Header_size = sizeof(l4virtio_block_header_t) };
public:
struct Data_block
{
/// Pointer to virtio memory descriptor.
Driver_mem_region_t<Ds_data> *mem;
/// Virtual address of the data block (in device space).
void *addr;
/// Length of datablock in bytes (max 4MB).
l4_uint32_t len;
Data_block() = default;
Data_block(Driver_mem_region_t<Ds_data> *m, Virtqueue::Desc const &desc,
Request_processor const *)
: mem(m), addr(m->local(desc.addr)), len(desc.len)
{}
};
/**
* Compute the total size of the data in the request.
*
* \retval Size in bytes or 0 if there was an error.
*
* \throws L4::Runtime_error(-L4_EIO) Request has a bad format.
*
* Note that this operation is relatively expensive as
* it has to iterate over the complete list of blocks.
*/
unsigned data_size() const
{
Request_processor rp;
Data_block data;
rp.start(_mem_list, _request, &data);
unsigned total = data.len;
try
{
while (rp.has_more())
{
rp.next(_mem_list, &data);
total += data.len;
}
}
catch (Bad_descriptor const &e)
{
// need to convert the exception because e contains a raw pointer to rp
throw L4::Runtime_error(-L4_EIO, "bad virtio descriptor");
}
if (total < Header_size + 1)
throw L4::Runtime_error(-L4_EIO, "virtio request too short");
return total - Header_size - 1;
}
/**
* Check if the request contains more data blocks.
*/
bool has_more()
{
// peek into the remaining data
while (_data.len == 0 && _rp.has_more())
_rp.next(_mem_list, &_data);
// there always must be one byte left for status
return (_data.len > 1 || _rp.has_more());
}
/**
* Return next block in scatter-gather list.
*
* \return Information about the next data block.
*
* \throws L4::Runtime_error No more data block is available.
* \throws Bad_descriptor Virtio request is corrupted.
*/
Data_block next_block()
{
Data_block out;
if (_data.len == 0)
{
if (!_rp.has_more())
throw L4::Runtime_error(-L4_EEXIST,
"No more data blocks in virtio request");
if (_todo_blocks == 0)
throw Bad_descriptor(&_rp, Bad_descriptor::Bad_size);
--_todo_blocks;
_rp.next(_mem_list, &_data);
}
if (_data.len > _max_block_size)
throw Bad_descriptor(&_rp, Bad_descriptor::Bad_size);
out = _data;
if (!_rp.has_more())
{
--(out.len);
_data.len = 1;
_data.addr = static_cast<char *>(_data.addr) + out.len;
}
else
_data.len = 0; // is consumed
return out;
}
/// Return the block request header.
l4virtio_block_header_t const &header() const
{ return _header; }
private:
Block_request(Virtqueue::Request req, Driver_mem_list_t<Ds_data> *mem_list,
unsigned max_blocks, l4_uint32_t max_block_size)
: _mem_list(mem_list),
_request(req),
_todo_blocks(max_blocks),
_max_block_size(max_block_size)
{
// read header which should be in the first block
_rp.start(mem_list, _request, &_data);
--_todo_blocks;
if (_data.len < Header_size)
throw Bad_descriptor(&_rp, Bad_descriptor::Bad_size);
_header = *(static_cast<l4virtio_block_header_t *>(_data.addr));
_data.addr = static_cast<char *>(_data.addr) + Header_size;
_data.len -= Header_size;
// if there is no space for status bit we cannot really recover
if (!_rp.has_more() && _data.len == 0)
throw Bad_descriptor(&_rp, Bad_descriptor::Bad_size);
}
int release_request(Virtqueue *queue, l4_uint8_t status, unsigned sz)
{
// write back status
// If there was an error on the way or the status byte is in its
// own block, fast-forward to the last block.
if (_rp.has_more())
{
while (_rp.next(_mem_list, &_data) && _todo_blocks > 0)
--_todo_blocks;
if (_todo_blocks > 0 && _data.len > 0)
*(static_cast<l4_uint8_t *>(_data.addr) + _data.len - 1) = status;
else
return -L4_EIO; // too many data blocks
}
else if (_data.len > 0)
*(static_cast<l4_uint8_t *>(_data.addr)) = status;
else
return -L4_EIO; // no space for final status byte
// now release the head
queue->consumed(_request, sz);
return L4_EOK;
}
/**
* The list of memory areas for the device.
* Points to the memory list of the parent device, which always must
* have a longer livespan than the request.
*/
Driver_mem_list_t<Ds_data> *_mem_list;
/// Type and destination information.
l4virtio_block_header_t _header;
/// Request processor containing the current state.
Request_processor _rp;
/// Current data chunk in flight.
Data_block _data;
/// Original virtio request.
Virtqueue::Request _request;
/// Number of blocks that may still be processed.
unsigned _todo_blocks;
/// Maximum length of a single block.
l4_uint32_t _max_block_size;
};
struct Block_features : public Dev_config::Features
{
Block_features() = default;
Block_features(l4_uint32_t raw) : Dev_config::Features(raw) {}
/** Maximum size of any single segment is in size_max. */
CXX_BITFIELD_MEMBER( 1, 1, size_max, raw);
/** Maximum number of segments in a request is in seg_max. */
CXX_BITFIELD_MEMBER( 2, 2, seg_max, raw);
/** Disk-style geometry specified in geometry. */
CXX_BITFIELD_MEMBER( 4, 4, geometry, raw);
/** Device is read-only. */
CXX_BITFIELD_MEMBER( 5, 5, ro, raw);
/** Block size of disk is in blk_size. */
CXX_BITFIELD_MEMBER( 6, 6, blk_size, raw);
/** Cache flush command support. */
CXX_BITFIELD_MEMBER( 9, 9, flush, raw);
/** Device exports information about optimal IO alignment. */
CXX_BITFIELD_MEMBER(10, 10, topology, raw);
/** Device can toggle its cache between writeback and writethrough modes. */
CXX_BITFIELD_MEMBER(11, 11, config_wce, raw);
/** Device supports multiqueue. */
CXX_BITFIELD_MEMBER(12, 12, mq, raw);
/** Device can support discard command. */
CXX_BITFIELD_MEMBER(13, 13, discard, raw);
/** Device can support write zeroes command. */
CXX_BITFIELD_MEMBER(14, 14, write_zeroes, raw);
};
/**
* Base class for virtio block devices.
*
* Use this class as a base to implement your own specific block device.
*/
template <typename Ds_data>
class Block_dev_base : public L4virtio::Svr::Device_t<Ds_data>
{
private:
L4Re::Util::Unique_cap<L4::Irq> _kick_guest_irq;
Virtqueue _queue;
unsigned _vq_max;
l4_uint32_t _max_block_size = UINT_MAX;
Dev_config_t<l4virtio_block_config_t> _dev_config;
public:
typedef Block_request<Ds_data> Request;
protected:
Block_features negotiated_features() const
{ return _dev_config.negotiated_features(0); }
Block_features device_features() const
{ return _dev_config.host_features(0); }
void set_device_features(Block_features df)
{ _dev_config.host_features(0) = df.raw; }
/**
* Sets the maximum size of any single segment reported to client.
*
* The limit is also applied to any incoming requests.
* Requests with larger segments result in an IO error being
* reported to the client. That means that process_request() can
* safely make the assumption that all segments in the received
* request are smaller.
*/
void set_size_max(l4_uint32_t sz)
{
_dev_config.priv_config()->size_max = sz;
Block_features df = device_features();
df.size_max() = true;
set_device_features(df);
_max_block_size = sz;
}
/**
* Sets the maximum number of segments in a request
* that is reported to client.
*/
void set_seg_max(l4_uint32_t sz)
{
_dev_config.priv_config()->seg_max = sz;
Block_features df = device_features();
df.seg_max() = true;
set_device_features(df);
}
/**
* Set disk geometry that is reported to the client.
*/
void set_geometry(l4_uint16_t cylinders, l4_uint8_t heads, l4_uint8_t sectors)
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
pc->geometry.cylinders = cylinders;
pc->geometry.heads = heads;
pc->geometry.sectors = sectors;
Block_features df = device_features();
df.geometry() = true;
set_device_features(df);
}
/**
* Sets block disk size to be reported to the client.
*
* Setting this does not change the logical sector size used
* for addressing the device.
*/
void set_blk_size(l4_uint32_t sz)
{
_dev_config.priv_config()->blk_size = sz;
Block_features df = device_features();
df.blk_size() = true;
set_device_features(df);
}
/**
* Sets the I/O alignment information reported back to the client.
*
* \param physical_block_exp Number of logical blocks per physical block(log2)
* \param alignment_offset Offset of the first aligned logical block
* \param min_io_size Suggested minimum I/O size in blocks
* \param opt_io_size Optimal I/O size in blocks
*/
void set_topology(l4_uint8_t physical_block_exp,
l4_uint8_t alignment_offset,
l4_uint32_t min_io_size,
l4_uint32_t opt_io_size)
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
pc->topology.physical_block_exp = physical_block_exp;
pc->topology.alignment_offset = alignment_offset;
pc->topology.min_io_size = min_io_size;
pc->topology.opt_io_size = opt_io_size;
Block_features df = device_features();
df.topology() = true;
set_device_features(df);
}
/** Enables the flush command. */
void set_flush()
{
Block_features df = device_features();
df.flush() = true;
set_device_features(df);
}
/** Sets cache mode and enables the writeback toggle.
*
* \param writeback Mode of the cache (0 for writethrough, 1 for writeback).
*/
void set_config_wce(l4_uint8_t writeback)
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
pc->writeback = writeback;
Block_features df = device_features();
df.config_wce() = true;
set_device_features(df);
}
/** Get the writeback field from the configuration space.
*
* \return Value of the writeback field.
*/
l4_uint8_t get_writeback()
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
return pc->writeback;
}
/**
* Sets constraints for and enables the discard command.
*
* \param max_discard_sectors Maximum discard sectors size.
* \param max_discard_seg Maximum discard segment number.
* \param discard_sector_alignment Can be used by the driver when splitting a
* request based on alignment.
*/
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg,
l4_uint32_t discard_sector_alignment)
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
pc->max_discard_sectors = max_discard_sectors;
pc->max_discard_seg = max_discard_seg;
pc->discard_sector_alignment = discard_sector_alignment;
Block_features df = device_features();
df.discard() = true;
set_device_features(df);
}
/**
* Sets constraints for and enables the write zeroes command.
*
* \param max_write_zeroes_sectors Maximum write zeroes sectors size.
* \param max_write_zeroes_seg maximum write zeroes segment number.
* \param write_zeroes_may_unmap Set if a write zeroes request can result in
* deallocating one or more sectors.
*/
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors,
l4_uint32_t max_write_zeroes_seg,
l4_uint8_t write_zeroes_may_unmap)
{
l4virtio_block_config_t volatile *pc = _dev_config.priv_config();
pc->max_write_zeroes_sectors = max_write_zeroes_sectors;
pc->max_write_zeroes_seg = max_write_zeroes_seg;
pc->write_zeroes_may_unmap = write_zeroes_may_unmap;
Block_features df = device_features();
df.write_zeroes() = true;
set_device_features(df);
}
public:
/**
* Create a new virtio block device.
*
* \param vendor Vendor ID
* \param queue_size Number of entries to provide in avail and used queue.
* \param capacity Size of the device in 512-byte sectors.
* \param read_only True, if the device should not be writable.
*/
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity,
bool read_only)
: L4virtio::Svr::Device_t<Ds_data>(&_dev_config),
_vq_max(queue_size),
_dev_config(vendor, L4VIRTIO_ID_BLOCK, 1)
{
this->reset_queue_config(0, queue_size);
Block_features df(0);
df.ring_indirect_desc() = true;
df.ro() = read_only;
set_device_features(df);
_dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
_dev_config.priv_config()->capacity = capacity;
}
/**
* Reset the actual hardware device.
*/
virtual void reset_device() = 0;
/**
* Return true, if the queues should not be processed further.
*/
virtual bool queue_stopped() = 0;
/**
* Releases resources related to a request and notifies the client.
*
* \param req Pointer to request that has finished.
* \param sz Number of bytes consumed.
* \param status Status of request (see L4virtio_block_status).
*
* This function must be called when an asynchronous request finishes,
* either successfully or with an error. The status byte in the request
* must have been set prior to calling it.
*/
void finalize_request(cxx::unique_ptr<Request> req, unsigned sz,
l4_uint8_t status = L4VIRTIO_BLOCK_S_OK)
{
if (_dev_config.status().fail_state() || !_queue.ready())
return;
if (req->release_request(&_queue, status, sz) < 0)
this->device_error();
if (_queue.no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
_kick_guest_irq->trigger();
// Request can be dropped here.
}
int reconfig_queue(unsigned idx) override
{
if (idx == 0 && this->setup_queue(&_queue, 0, _vq_max))
return 0;
return -L4_EINVAL;
}
void reset() override
{
_queue.disable();
_dev_config.reset_queue(0, _vq_max);
_dev_config.reset_hdr();
reset_device();
}
protected:
bool check_for_new_requests()
{
if (!_queue.ready() || queue_stopped())
return false;
if (_dev_config.status().fail_state())
return false;
return _queue.desc_avail();
}
/// Return one request if available.
cxx::unique_ptr<Request> get_request()
{
cxx::unique_ptr<Request> req;
if (!_queue.ready() || queue_stopped())
return req;
if (_dev_config.status().fail_state())
return req;
auto r = _queue.next_avail();
if (!r)
return req;
try
{
cxx::unique_ptr<Request> cur{
new Request(r, &(this->_mem_info), _vq_max, _max_block_size)};
req = cxx::move(cur);
}
catch (Bad_descriptor const &e)
{
this->device_error();
return req;
}
return req;
}
private:
void register_single_driver_irq() override
{
_kick_guest_irq = L4Re::Util::Unique_cap<L4::Irq>(
L4Re::chkcap(this->server_iface()->template rcv_cap<L4::Irq>(0)));
L4Re::chksys(this->server_iface()->realloc_rcv_cap(0));
}
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_kick_guest_irq->trigger();
}
bool check_queues() override
{
if (!_queue.ready())
{
reset();
return false;
}
return true;
}
};
template <typename Ds_data>
struct Block_dev
: Block_dev_base<Ds_data>,
L4::Epiface_t<Block_dev<Ds_data>, L4virtio::Device>
{
private:
class Irq_object : public L4::Irqep_t<Irq_object>
{
public:
Irq_object(Block_dev<Ds_data> *parent) : _parent(parent) {}
void handle_irq()
{
_parent->kick();
}
private:
Block_dev<Ds_data> *_parent;
};
Irq_object _irq_handler;
protected:
L4::Epiface *irq_iface()
{ return &_irq_handler; }
public:
Block_dev(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity,
bool read_only)
: Block_dev_base<Ds_data>(vendor, queue_size, capacity, read_only),
_irq_handler(this)
{}
/**
* Attach device to an object registry.
*
* \param registry Object registry that will be responsible for dispatching
* requests.
* \param service Name of an existing capability the device should use.
*
* This functions registers the general virtio interface as well as the
* interrupt handler which is used for receiving client notifications.
*/
L4::Cap<void> register_obj(L4::Registry_iface *registry,
char const *service = 0)
{
L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
L4::Cap<void> ret;
if (service)
ret = registry->register_obj(this, service);
else
ret = registry->register_obj(this);
L4Re::chkcap(ret);
return ret;
}
L4::Cap<void> register_obj(L4::Registry_iface *registry,
L4::Cap<L4::Rcv_endpoint> ep)
{
L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
return L4Re::chkcap(registry->register_obj(this, ep));
}
typedef Block_request<Ds_data> Request;
/**
* Implements the actual processing of data in the device.
*
* \param req The request to be processed.
* \return If false, no further requests will be scheduled.
*
* Synchronous and asynchronous processing of the data is supported.
* For asynchronous mode, the function should set up the worker
* and then return false. In synchronous mode, the function should
* return true, once processing is complete. If there is an error
* and processing is aborted, the status flag of `req` needs to be set
* accordingly and the request immediately finished with finish_request()
* if the client is to be answered.
*/
virtual bool process_request(cxx::unique_ptr<Request> &&req) = 0;
protected:
L4::Ipc_svr::Server_iface *server_iface() const override
{
return this->L4::Epiface::server_iface();
}
void kick()
{
for (;;)
{
auto req = this->get_request();
if (!req)
return;
if (!this->process_request(cxx::move(req)))
return;
}
}
private:
L4::Cap<L4::Irq> device_notify_irq() const override
{
return L4::cap_cast<L4::Irq>(_irq_handler.obj_cap());
}
};
} }

View File

@@ -0,0 +1,818 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2019-2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
* Phillip Raffeck <phillip.raffeck@kernkonzept.com>
* Steffen Liebergeld <steffen.liebergeld@kernkonzept.com>
* Jan Klötzke <jan.kloetzke@kernkonzept.com>
*/
#pragma once
#include <l4/l4virtio/server/l4virtio>
#include <l4/re/error_helper>
namespace L4virtio { namespace Svr { namespace Console {
/// Virtio console specific feature bits.
struct Features : Dev_config::Features
{
Features() = default;
explicit Features(l4_uint32_t raw) : Dev_config::Features(raw) {}
/// Configuration `cols` and `rows` are valid.
CXX_BITFIELD_MEMBER(0, 0, console_size, raw);
/// Device has support for multiple ports.
CXX_BITFIELD_MEMBER(1, 1, console_multiport, raw);
/// Device has support for emergency write.
CXX_BITFIELD_MEMBER(2, 2, emerg_write, raw);
};
/// Virtio console control message.
struct Control_message
{
/// Possible control events.
enum Events
{
/// Sent by driver at initialization.
Device_ready = 0,
/// Sent by device to create new ports.
Device_add = 1,
/// Sent by device to remove added ports.
Device_remove = 2,
/// Sent by driver as response to `Device_add`.
Port_ready = 3,
/// Sent by device to nominate port as console port.
Console_port = 4,
/// Sent by device to indicate a console size change.
Resize = 5,
/// Sent by device and driver to indicate whether a port is open.
Port_open = 6,
/// Sent by device to tag a port.
Port_name = 7,
};
l4_uint32_t id; ///< Port number.
l4_uint16_t event; ///< Control event, see `Events`.
l4_uint16_t value; ///< Extra information.
Control_message() {}
Control_message(l4_uint32_t i, l4_uint16_t e, l4_uint16_t v)
: id(i), event(e), value(v)
{}
};
/// Specialised `Virtqueue::Request` providing access to control message payload.
struct Control_request
{
/// Virtual address of the data block (in device space).
Control_message *msg;
/// Length of datablock in bytes.
l4_uint32_t len;
/// Pointer to driver memory region.
Driver_mem_region *mem;
};
/**
* Representation of a Virtio console port.
*
* Each port consists of a pair of queues for sending and receiving.
*
* A port may be added and removed at runtime when the multi-port feature
* is enabled. The states are as follows:
*
* ```
* +----------+ port_remove()
* | DISABLED |<----------------------- [all]
* +----------+
* |
* | port_add()
* v
* +----------+ process_port_ready(0)
* | ADDED + --------------+
* +----------+ | +--------+
* | +------------>| |
* | process_port_ready(1) | |
* v | FAILED |
* +----------+ process_port_ready(0) | |
* | |---------------------------->| |
* | READY | process_port_ready(1) | |
* | |<----------------------------| |
* +----------+<----------+ +--------+
* | | ^
* | port_open(true) | port_open(false) |
* v | | process_port_ready(0)
* +----------+ | |
* | OPEN |-----------+ |
* +----------+----------------------------------+
* ```
*/
struct Port
{
/**
* Possible states of a virtio console port.
*/
enum Port_status
{
/// Reset state, waiting for port to be added.
Port_disabled = 0,
/// Port has been added by device, waiting for ready message.
Port_added,
/// Port is ready but still closed.
Port_ready,
/// Port is in a working state.
Port_open,
/// Device failure, port unusable.
Port_failed,
/// Number of port states. Must be last.
Port_num_states,
};
/// Size of control queues, also used as default size.
enum { Control_queue_size = 0x10 };
Virtqueue tx; ///< Receiveq of the port.
Virtqueue rx; ///< Transmitq of the port.
Port_status status; ///< State the port is in.
Port_status reported_status; ///< State the port was last reported.
unsigned vq_max; ///< Maximum queue sizes for this port.
Port() : status(Port_disabled), vq_max(Control_queue_size) {}
Port(Port const &) = delete;
Port &operator = (Port const &) = delete;
virtual ~Port() = default;
/// Check that the port is open.
bool is_open() const
{ return status == Port_open; }
/// Reset the port to the initial state and disable its virtqueues.
virtual void reset()
{
status = Port_disabled;
reported_status = Port_disabled;
}
/// Check that both virtqueues are set up correctly.
bool queues_ready() const
{ return tx.ready() && rx.ready(); }
/// Check that device implementation may write to receive queues.
bool rx_ready() const
{ return is_open() && rx.ready(); }
/// Check that device implementation may read from transmit queues.
bool tx_ready() const
{ return is_open() && tx.ready(); }
/// State transition from last report state to current state
struct Transition {
l4_int16_t event; ///< Control_message::Events or <0 if no event is sent.
l4_uint16_t value; ///< Extra information.
Port_status next; ///< Next Port_status state
};
/**
* State transition table from last report state to current state.
*
* Not all transitions can be made directly. For example, if the last
* reported state was `Port_disabled` and the current state is `Port_open`,
* the device has to send two messages: `Control_message::Device_add` and
* `Control_message::Port_open`. This is expressed by going through an
* intermediate state (`Port_ready`) on the reporting side.
*
* For the purpose of the driver there are only three coarse states:
*
* 1. The port does not exist (Port_disabled).
* 2. The port exists but is closed on the device side (Port_added,
* Port_ready, Port_failed).
* 3. The port exists and is open on the device side (Port_open).
*
* The state transition table with Port_added, Port_ready and Port_failed as
* current state are thus identical.
*/
static constexpr Transition
state_transitions[Port_num_states][Port_num_states] =
{
/* reported current */
/* Port_disabled */ /* Port_disabled */ {{ -1, 0, Port_disabled },
/* Port_added */ { Control_message::Device_add, 0,
Port_added },
/* Port_ready */ { Control_message::Device_add, 0,
Port_ready },
/* Port_open */ { Control_message::Device_add, 0,
Port_ready },
/* Port_failed */ { Control_message::Device_add, 0,
Port_failed }},
/* Port_added */ /* Port_disabled */ {{ Control_message::Device_remove,
0, Port_disabled },
/* Port_added */ { -1, 0, Port_added },
/* Port_ready */ { -1, 0, Port_ready },
/* Port_open */ { Control_message::Port_open, 1,
Port_open },
/* Port_failed */ { -1, 0, Port_failed }},
/* Port_ready */ /* Port_disabled */ {{ Control_message::Device_remove,
0, Port_disabled },
/* Port_added */ { -1, 0, Port_added },
/* Port_ready */ { -1, 0, Port_ready },
/* Port_open */ { Control_message::Port_open, 1,
Port_open },
/* Port_failed */ { -1, 0, Port_failed }},
/* Port_open */ /* Port_disabled */ {{ Control_message::Port_open, 0,
Port_ready },
/* Port_added */ { Control_message::Port_open, 0,
Port_added },
/* Port_ready */ { Control_message::Port_open, 0,
Port_ready },
/* Port_open */ { -1, 0, Port_open },
/* Port_failed */ { Control_message::Port_open, 0,
Port_ready }},
/* Port_failed */ /* Port_disabled */ {{ Control_message::Device_remove,
0, Port_disabled },
/* Port_added */ { -1, 0, Port_added },
/* Port_ready */ { -1, 0, Port_ready },
/* Port_open */ { Control_message::Port_open, 1,
Port_open },
/* Port_failed */ { -1, 0, Port_failed }},
};
};
/**
* Base class implementing a virtio console functionality.
*
* It is possible to activate the MULTIPORT feature, in which case incoming
* control messages need to be dispatched by calling
* `handle_control_message()`. The derived class must additionally override
* `process_device_ready()`, `process_port_ready()` and `process_port_open()`
* to implement the actual behaviour. The derived class has the following
* responsibilities:
* - inform the driver about usable ports once the device is ready as
* signaled in process_device_ready(), see the wrapper `port_add()`.
* - inform the driver about unusable ports, see the wrapper `port_remove()`.
* - react to open/close events, see the wrapper `port_open()`.
*
* This implementation provides no means to handle interrupts or notify guests,
* therefore derived classes have to provide this functionality, see
* `notify_queue()` and `handle_control_message()`. Similarly, all interaction
* with data queues has to be implemented. Memory for port structures must be
* managed by the implementor as well.
*
* Use this class as a base to implement your own specific console device.
*/
class Virtio_con : public L4virtio::Svr::Device
{
enum Virtqueue_names
{
Ctrl_rx = 2, ///< Communication queue from device to driver.
Ctrl_tx = 3, ///< Communication queue from driver to device.
};
struct Serial_config_space
{
l4_uint16_t cols;
l4_uint16_t rows;
l4_uint32_t max_nr_ports;
l4_uint32_t emerg_wr;
} __attribute__((packed));
public:
/**
* Create a new multiport console device.
*
* \param max_ports Maximum number of ports the device should be
* able to handle (ignored when `enable_multiport`
* is false).
* \param enable_multiport Enable the control queue for dynamic handling
* of ports.
*/
explicit Virtio_con(unsigned max_ports, bool enable_multiport)
: L4virtio::Svr::Device(&_dev_config),
_num_ports(enable_multiport ? max_ports : 1),
_dev_config(L4VIRTIO_VENDOR_KK, L4VIRTIO_ID_CONSOLE,
enable_multiport ? max_ports * 2 + 2 : 2)
{
if (_num_ports < 1)
L4Re::chksys(-L4_EINVAL, "At least one port is required.");
Features hf(0);
hf.console_multiport() = enable_multiport;
_dev_config.host_features(0) = hf.raw;
if (enable_multiport)
_dev_config.priv_config()->max_nr_ports = _num_ports;
_dev_config.reset_hdr();
}
void reset_queue_configs()
{
for (unsigned q = 0; q < _dev_config.num_queues(); ++q)
reset_queue_config(q, max_queue_size(q));
}
int reconfig_queue(unsigned index) override
{
if (index >= _dev_config.num_queues())
return -L4_ERANGE;
if (setup_queue(get_queue(index), index, max_queue_size(index)))
return 0;
return -L4_EINVAL;
}
/**
* Return true if the multiport feature is enabled and control queues are
* available.
*/
bool multiport_enabled() const
{
return _negotiated_features.console_multiport()
&& _dev_config.num_queues() > Ctrl_rx;
}
bool ctrl_queue_ready() const
{ return _ctrl_port.is_open(); }
bool check_features(void) override
{
_negotiated_features = Features(_dev_config.negotiated_features(0));
return true;
}
bool check_queues() override
{
// NOTE
// The VIRTIO specification states:
// "The port 0 receive and transmit queues always exist"
// The linux driver however does not setup port 0 if the multiport feature
// is negotiated.
// We just go along with the linux driver and do not expect port 0 to be up,
// if the multiport feature is negotiated.
if (multiport_enabled())
// If MULTIPORT was negotiated, ctrl queues should be set up.
return _ctrl_port.queues_ready();
// If MULTIPORT was not negotiated, port 0 should be set up.
port(0)->status = Port::Port_open;
return port(0)->queues_ready();
}
/**
* Send a DEVICE_ADD message and update the internal state.
*
* \param idx Port that should be added.
*
* \retval L4_EOK Message has been sent.
* \retval -L4_EPERM Invalid state transition.
*
* \pre `idx` must be smaller than the configured number of ports.
* \pre Port must not already exist.
*/
int port_add(unsigned idx)
{
Port *p = port(idx);
if (p->status != Port::Port_disabled)
return -L4_EPERM;
p->status = Port::Port_added;
port_report_status(idx);
return L4_EOK;
}
/**
* Send a DEVICE_REMOVE message and update the internal state.
*
* \param idx Port that should be removed.
*
* \retval L4_EOK Message has been sent.
* \retval -L4_EPERM Invalid state transition.
*
* \pre `idx` must be smaller than the configured number of ports.
* \pre Port must already exist.
*/
int port_remove(unsigned idx)
{
Port *p = port(idx);
if (p->status == Port::Port_disabled)
return -L4_EPERM;
p->status = Port::Port_disabled;
port_report_status(idx);
return L4_EOK;
}
/**
* Send a PORT_OPEN message and update the internal state.
*
* \param idx Port that should be opened or closed.
* \param open Open or close port.
*
* \retval L4_EOK Message has been sent.
* \retval -L4_EPERM Invalid state transition.
*
* \pre `idx` must be smaller than the configured number of ports.
* \pre Port must be ready when opening or open when closing.
*/
int port_open(unsigned idx, bool open)
{
Port *p = port(idx);
if ((open && p->status != Port::Port_ready)
|| (!open && p->status != Port::Port_open))
return -L4_EPERM;
p->status = open ? Port::Port_open : Port::Port_ready;
port_report_status(idx);
return L4_EOK;
}
/**
* Send a PORT_NAME message to announce the port name.
*
* \param idx Port that should be opened or closed.
* \param name The port name
*
* \retval L4_EOK Message has been sent.
* \retval -L4_EPERM Control message is not allowed in the current state.
* \return Errors from send_control_message()
*
* \pre `idx` must be smaller than the configured number of ports.
* \pre Port must already exist.
*/
int port_name(unsigned idx, char const *name)
{
Port *p = port(idx);
if (p->status == Port::Port_disabled)
return -L4_EPERM;
return send_control_message(idx, Control_message::Port_name, 0, name);
}
/**
* Send control message to driver.
*
* \param idx Port number.
* \param event Kind of control event.
* \param value Extra information for the event.
* \param name Name to be used for Port_name message
*
* \retval L4_EOK Message has been sent.
* \retval -L4_ENODEV Control queue is not ready.
* \retval -L4_EBUSY Currently no descriptor available in the control queue.
* \retval -L4_ENOMEM Client-issued descriptor too small. Device will be set
* to failed state.
*
* \pre `port` must be smaller than the configured number of ports.
*
* The convenience functions `port_add()`, `port_remove()` and `port_open()`
* should cover the most use cases and are the preferred way of communication
* with the driver. If you use this function directly, it is your
* responsibility to guarantee no invalid control messages are sent to the
* driver.
*/
int send_control_message(l4_uint32_t idx, l4_uint16_t event,
l4_uint16_t value = 0, const char *name = 0)
{
if (!ctrl_queue_ready())
return -L4_ENODEV;
Virtqueue *q = &_ctrl_port.rx;
if (!q->ready())
return -L4_ENODEV;
Virtqueue::Request r = q->next_avail();
if (!r)
return -L4_EBUSY;
Request_processor rp;
Control_request req;
rp.start(this, r, &req);
if (req.len < sizeof(Control_message))
return -L4_ENOMEM;
Control_message msg(idx, event, value);
memcpy(req.msg, &msg, sizeof(msg));
if (event == Control_message::Port_name && name)
{
size_t name_len = cxx::min(req.len - sizeof(msg), strlen(name));
memcpy(reinterpret_cast<char*>(req.msg) + sizeof(msg), name, name_len);
q->finish(r, this, sizeof(msg) + name_len);
}
else
q->finish(r, this, sizeof(msg));
return L4_EOK;
}
/**
* Handle control message received from the driver.
*
* \retval L4_EOK Message has been handled.
* \retval -L4_ENODEV Control queue is not ready.
* \retval -L4_EINVAL Received an unexpected control event.
*
* This function performs the basic handling of control messages from the
* driver. It does all necessary work with the control queues and performs
* some sanity checks. All other work is deferred to the derived class, see
* `process_device_ready()`, `process_port_ready()` and `process_port_open()`.
*/
int handle_control_message()
{
// Report port state transitions if that failed in the past...
if (_report_port_state)
{
_report_port_state = false;
for (unsigned i = 0; i < _num_ports; ++i)
if (!port_report_status(i))
_report_port_state = true;
}
Virtqueue *q = &_ctrl_port.tx;
if (!q->ready())
return -L4_ENODEV;
int ret = L4_EOK;
Virtqueue::Request r;
while ((r = q->next_avail()))
{
Request_processor rp;
Control_request req;
rp.start(this, r, &req);
Control_message msg;
if (req.len < sizeof(msg))
{
// Just ignore malformed input.
q->finish(r, this);
ret = -L4_EINVAL;
continue;
}
memcpy(&msg, req.msg, sizeof(msg));
q->finish(r, this);
if (_ctrl_port.status == Port::Port_disabled)
{
// When the control queue is disabled, only device ready is accepted.
if (msg.event == Control_message::Device_ready)
{
if (msg.value)
_ctrl_port.status = Port::Port_open;
}
process_device_ready(msg.value);
continue;
}
if (!ctrl_queue_ready())
continue;
// Ignore invalid port ids
if (msg.id >= max_ports())
break;
switch (msg.event)
{
case Control_message::Port_ready:
process_port_ready(msg.id, msg.value);
break;
case Control_message::Port_open:
process_port_open(msg.id, msg.value);
break;
default:
ret = -L4_EINVAL;
break;
}
}
return ret;
}
/// \internal
void load_desc(L4virtio::Virtqueue::Desc const &desc,
Request_processor const *proc,
L4virtio::Virtqueue::Desc const **table)
{
this->_mem_info.load_desc(desc, proc, table);
}
/// \internal
void load_desc(L4virtio::Virtqueue::Desc const &desc,
Request_processor const *proc,
Control_request *data)
{
auto *region = this->_mem_info.find(desc.addr.get(), desc.len);
if (L4_UNLIKELY(!region))
throw Bad_descriptor(proc, Bad_descriptor::Bad_address);
data->msg = reinterpret_cast<Control_message *>(region->local(desc.addr));
data->len = desc.len;
data->mem = region;
}
void reset() override
{
for (unsigned p = 0; p < _num_ports; ++p)
port(p)->reset();
_ctrl_port.reset();
reset_queue_configs();
_dev_config.reset_hdr();
_negotiated_features = Features(0);
_report_port_state = false;
reset_device();
}
/**
* Reset the state of the actual console device.
*
* This callback is called at the end of `reset()`, allowing the derived class
* to reset internal state.
*/
virtual void reset_device() {}
/**
* Notify queue of available data.
*
* \param queue Virtqueue to notify.
*
* This callback is called whenever data is sent to `queue`. It is the
* responsibility of the derived class to perform all necessary notification
* actions, e.g. triggering guest interrupts.
*/
virtual void notify_queue(Virtqueue *queue) = 0;
/**
* Return the specified port.
*
* \param port Port number.
*
* \pre Port number must be lower than the configured maximum number of ports.
*/
virtual Port *port(unsigned port) = 0;
virtual Port const *port(unsigned port) const = 0;
/**
* Callback called on DEVICE_READY event.
*
* \param value The value field of the control message, indicating if the
* initialization was successful.
*
* Needs to be overridden by the derived class if the MULTIPORT feature is
* enabled. Control messages may be sent only after the driver has
* successfully initialized the device.
*/
virtual void process_device_ready(l4_uint16_t value) = 0;
/**
* Callback called on PORT_READY event.
*
* \param id The id field of the control message, i.e. the port number.
* \param value The value field of the control message, indicating if the
* initialization was successful.
*
* May be overridden by the derived class if the MULTIPORT feature is
* enabled. This default implementation just sets the status of the port
* according to the driver message.
*/
virtual void process_port_ready(l4_uint32_t id, l4_uint16_t value)
{
Port *p = port(id);
switch (p->status)
{
case Port::Port_added:
case Port::Port_ready:
p->status = value ? Port::Port_ready : Port::Port_failed;
break;
case Port::Port_open:
if (!value)
p->status = Port::Port_failed;
break;
default:
// invalid state for PORT_READY message
break;
}
}
/**
* Callback called on PORT_OPEN event.
*
* \param id The id field of the control message, i.e. the port number.
* \param value The value field of the control message, indicating if the
* port was opened or closed.
*
* Signal that an application has opened the port. Can to be overridden by
* the derived class if the MULTIPORT feature is enabled.
*/
virtual void process_port_open(l4_uint32_t id, l4_uint16_t value) = 0;
unsigned max_ports() const
{ return _num_ports; }
private:
bool is_control_queue(unsigned q) const
{ return q == Ctrl_rx || q == Ctrl_tx; }
unsigned queue_to_port(unsigned q) const
{ return (q == 0 || q == 1) ? 0 : (q / 2) - 1; }
/**
* Returns the maximum queue size for the given index.
*
* \param q Index of queue to query.
*
* This function must only be called in contexts, where q is known to be
* within range.
*/
unsigned max_queue_size(unsigned q) const
{
if (is_control_queue(q))
return _ctrl_port.vq_max;
return port(queue_to_port(q))->vq_max;
}
/**
* Returns the virtqueue associated with the given index.
*
* \param q Number of queue to return.
*
* This function must only be called in contexts, where q is known to be
* within range.
*/
Virtqueue *get_queue(unsigned q)
{
Port *p;
if (is_control_queue(q))
p = &_ctrl_port;
else
p = port(queue_to_port(q));
if (q & 1)
return &p->tx;
else
return &p->rx;
}
/**
* Report the current state of the port to the driver.
*
* On each state transition, the state might need to be reported to the
* driver. Because the control queue might run out of buffers, the reported
* state might deviate a longer time from the actual device port state.
*
* \retval false The state transition could not be fully reported.
* \retval true Reported state matches current port state.
*/
bool port_report_status(unsigned idx)
{
Port *p = port(idx);
while (p->status != p->reported_status)
{
auto const &trans
= Port::state_transitions[p->reported_status][p->status];
if (trans.event >= 0
&& send_control_message(idx, trans.event, trans.value) < 0)
{
_report_port_state = true;
return false;
}
p->reported_status = trans.next;
}
return true;
}
unsigned _num_ports;
bool _report_port_state = false;
protected:
Dev_config_t<Serial_config_space> _dev_config;
Port _ctrl_port;
Features _negotiated_features{0};
};
}}} // name space

View File

@@ -0,0 +1,466 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2019-2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
* Phillip Raffeck <phillip.raffeck@kernkonzept.com>
* Steffen Liebergeld <steffen.liebergeld@kernkonzept.com>
* Jan Klötzke <jan.kloetzke@kernkonzept.com>
*/
#pragma once
#include <l4/cxx/bitmap>
#include <l4/cxx/static_vector>
#include <l4/l4virtio/server/l4virtio>
#include <l4/l4virtio/server/virtio-console>
#include <l4/re/error_helper>
namespace L4virtio { namespace Svr { namespace Console {
/**
* A console port with associated read/write state.
*
* Tracks the notification of the device implementation and holds the state
* when receiving data from the driver.
*/
struct Device_port : public Port
{
struct Buffer : Data_buffer
{
Buffer() = default;
Buffer(Driver_mem_region const *r,
Virtqueue::Desc const &d,
Request_processor const *)
{
pos = static_cast<char *>(r->local(d.addr));
left = d.len;
}
};
Request_processor rp; ///< Request processor associated with current request.
Virtqueue::Request request; ///< Current virtio tx queue request.
Buffer src; ///< Source data block to process.
bool poll_in_req = true;
bool poll_out_req = true;
void reset() override
{
Port::reset();
request = Virtqueue::Request();
poll_in_req = true;
poll_out_req = true;
}
};
/**
* Base class implementing a virtio console device with L4Re-based notification
* handling.
*
* This console device is derived from Virtio_con and already includes
* functionality to handle interrupts and notify drivers. If an interrupt is
* received, all the necessary interaction with the virtqueues is performed and
* only the actual data processing has to be done by the derived class. By
* default all available ports are added and an "open"-request of a port by the
* driver is automatically acknowledged. The derived class can optionally
* change this behaviour by overriding `process_device_ready()`,
* `process_port_ready()` and `process_port_open()`.
*
* This class provides a stream-based interface to access the port data with
* edge-triggered notification callbacks. If a port receives data from the
* driver the derived class is notified with the `rx_data_available()`
* callback. The actual data can be retrieved by `port_read()`. If there was
* not enough data to be read, the call will return the available partial data.
* Only then will the `rx_data_available()` callback be triggered again.
*
* Data on a port may be transmitted by `port_write()`. If there were not
* enough buffers available, only a part of the data will be transmitted. Once
* there are new buffers available, the `tx_space_available()` callback will be
* invoked. This callback will be called again only after a previous
* `port_write()` was not able to send all requested data.
*
* Use this class as a base to provide your own high-level console device. You
* must derive from this class as well as L4::Epiface_t<..., L4virtio::Device>.
* For a working device the irq_iface() must be registered too. A typical
* implementation might look like the following:
*
* \code
* class My_console
* : public L4virtio::Svr::Console::Device,
* public L4::Epiface_t<My_console, L4virtio::Device>
* {
* public:
* My_console(L4Re::Util::Object_registry *r)
* : L4virtio::Svr::Console::Device(0x100)
* {
* init_mem_info(4);
* L4Re::chkcap(r->register_irq_obj(irq_iface()), "virtio notification IRQ");
* }
*
* void rx_data_available(unsigned port) override
* {
* // call port_read() to fetch available data
* }
*
* void tx_space_available(unsigned port) override
* {
* // can call port_write() to send (pending) data
* }
* };
*
* My_console console(registry);
* registry->register_obj(&console, ...);
* \endcode
*
* The maximum number of memory regions (init_mem_info()) should correlate
* with the number of supported ports.
*/
class Device
: public Virtio_con
{
class Irq_object : public L4::Irqep_t<Irq_object>
{
public:
Irq_object(Device *parent) : _parent(parent) {}
void handle_irq() { _parent->kick(); }
private:
Device *_parent;
};
protected:
L4::Epiface *irq_iface()
{ return &_irq_handler; }
public:
/**
* Create a new console device.
*
* \param vq_max Maximum number of buffers in data queues.
*
* Create a console device with no multiport support, i.e. control queues are
* disabled.
*/
explicit Device(unsigned vq_max)
: Virtio_con(1, false),
_irq_handler(this),
_ports(cxx::make_unique<Device_port[]>(1))
{
_ports[0].vq_max = vq_max;
reset_queue_configs();
}
/**
* Create a new console device.
*
* \param vq_max Maximum number of buffers in data queues.
* \param ports Number of ports (maximum 32).
*
* Create a console device with multiport support, i.e. control queues are
* enabled.
*/
explicit Device(unsigned vq_max, unsigned ports)
: Virtio_con(ports, true),
_irq_handler(this),
_ports(cxx::make_unique<Device_port[]>(ports))
{
for (unsigned i = 0; i < ports; ++i)
_ports[i].vq_max = vq_max;
reset_queue_configs();
}
/**
* Create a new console Device.
*
* \param vq_max_nums Maximum number of buffers in data queues, given as a
* cxx::static_vector with one entry per port.
*
* Create a console device with multiport support, i.e. control queues are
* enabled.
*/
explicit Device(cxx::static_vector<unsigned> const &vq_max_nums)
: Virtio_con(vq_max_nums.size(), true),
_irq_handler(this),
_ports(cxx::make_unique<Device_port[]>(max_ports()))
{
for (unsigned i = 0; i < vq_max_nums.size(); ++i)
_ports[i].vq_max = vq_max_nums[i];
reset_queue_configs();
}
void register_single_driver_irq() override
{
_kick_driver_irq = L4Re::Util::Unique_cap<L4::Irq>(
L4Re::chkcap(server_iface()->rcv_cap<L4::Irq>(0)));
L4Re::chksys(server_iface()->realloc_rcv_cap(0));
}
L4::Cap<L4::Irq> device_notify_irq() const override
{ return _irq_handler.obj_cap(); }
void notify_queue(Virtqueue *queue) override
{
if (queue->no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
_kick_driver_irq->trigger();
}
/**
* Callback to notify that new data is available to be read from \a port.
*/
virtual void rx_data_available(unsigned port) = 0;
/**
* Callback to notify that data can be written to \a port.
*/
virtual void tx_space_available(unsigned port) = 0;
/**
* Return true, if the queues should not be processed further.
*/
virtual bool queues_stopped()
{ return false; }
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_kick_driver_irq->trigger();
}
void kick()
{
if (queues_stopped())
return;
// We're not interested in logging any errors, just ignore return value.
handle_control_message();
for (unsigned i = 0; i < max_ports(); ++i)
{
auto &p = _ports[i];
if (p.poll_in_req && p.tx_ready() && p.tx.desc_avail())
{
p.poll_in_req = false;
rx_data_available(i);
}
if (p.poll_out_req && p.rx_ready() && p.rx.desc_avail())
{
p.poll_out_req = false;
tx_space_available(i);
}
}
}
/**
* Read data from port.
*
* Will read up to \a len bytes from \a port into \a buf. Returns the number
* of bytes read, which may be less if not enough data was available. If all
* data was read, the rx_data_available() callback will be invoked the next
* time the driver queues new data for the port. The callback won't be called
* again until all data was consumed again.
*
* \param buf The destination buffer
* \param len Size of the buffer
* \param port Port index to read data from
* \return Number of bytes read
*/
unsigned port_read(char *buf, unsigned len, unsigned port = 0)
{
unsigned total = 0;
Device_port &p = _ports[port];
Virtqueue *q = &p.tx;
Data_buffer dst;
dst.pos = buf;
dst.left = len;
while (dst.left)
{
try
{
// Make sure we have a valid request where we can read data from
if (!p.request.valid())
{
p.request = p.tx_ready() ? q->next_avail()
: Virtqueue::Request();
if (!p.request.valid())
break;
p.rp.start(mem_info(), p.request, &p.src);
}
total += p.src.copy_to(&dst);
// We might have eaten up the current descriptor. Move to the next
// if this is the case. At the end of the descriptor chain we have
// to retire the current request altogether.
if (!p.src.left)
{
if (!p.rp.next(mem_info(), &p.src))
{
q->finish(p.request, this);
p.request = Virtqueue::Request();
}
}
}
catch (Bad_descriptor const &)
{
q->finish(p.request, this);
p.request = Virtqueue::Request();
device_error();
break;
}
}
if (total < len)
p.poll_in_req = true;
return total;
}
/**
* Write data to port.
*
* Will write up to \a len bytes to \a port from \a buf. Returns the number
* of bytes written, which may be less if not enough virtio buffers were
* available. If not all data could be written, the tx_space_available()
* callback will be invoked the next time the driver queues new receive
* buffers for the port. The callback won't be called again until all receive
* buffers were filled again.
*
* \param buf The souce buffer
* \param len Size of the buffer
* \param port Port index to write data to
* \return Number of bytes written
*/
unsigned port_write(char const *buf, unsigned len, unsigned port = 0)
{
unsigned total = 0;
Device_port &p = _ports[port];
Virtqueue *q = &p.rx;
Data_buffer src;
src.pos = const_cast<char*>(buf);
src.left = len;
Request_processor rp;
while (src.left)
{
auto r = p.rx_ready() ? q->next_avail() : Virtqueue::Request();
if (!r.valid())
break;
l4_uint32_t chunk = 0;
try
{
Device_port::Buffer dst;
rp.start(mem_info(), r, &dst);
for (;;)
{
chunk += src.copy_to(&dst);
if (!src.left)
break;
if (!rp.next(mem_info(), &dst))
break;
}
}
catch (Bad_descriptor const &)
{
device_error();
}
q->finish(r, this, chunk);
total += chunk;
}
if (total < len)
p.poll_out_req = true;
return total;
}
/**
* Callback called on DEVICE_READY event.
*
* \param value The value field of the control message, indicating if the
* initialization was successful.
*
* By default, this function adds all ports if the driver indicates
* successful initialization. Override this function to perform custom
* actions for a DEVICE_READY event. It is then your responsibility to inform
* the driver about usable ports, see `port_add()`.
*/
void process_device_ready(l4_uint16_t value) override
{
if (!value)
return;
for (unsigned i = 0; i < max_ports(); ++i)
port_add(i);
}
/**
* Callback called on PORT_READY event.
*
* \param id The id field of the control message, i.e. the port number.
* \param value The value field of the control message, indicating if the
* initialization was successful.
*
* By default, this function opens the port if the driver is ready.
* Otherwise, the port is removed if the driver failed to set it up
* correctly. Override this function to perform custom actions for a
* PORT_READY event, _after_ the generic management of the base class. It is
* then your responsibility to inform the driver about connected or unusable
* ports. See `port_open()` and `port_remove()`.
*/
void process_port_ready(l4_uint32_t id, l4_uint16_t value) override
{
Virtio_con::process_port_ready(id, value);
Port *p = port(id);
if (p->status == Port::Port_failed)
port_remove(id);
else if (p->status == Port::Port_ready)
port_open(id, true);
}
/**
* Callback called on PORT_OPEN event.
*
* \param id The id field of the control message, i.e. the port number.
* \param value The value field of the control message, indicating if the
* port was opened or closed.
*
* The default implementation does nothing. Override it to implement some
* custom logic to respond to open/close events of the driver.
*/
virtual void process_port_open(l4_uint32_t id, l4_uint16_t value)
{
static_cast<void>(id);
static_cast<void>(value);
}
protected:
Port* port(unsigned idx) override
{
return &_ports[idx];
}
Port const *port(unsigned idx) const override
{
return &_ports[idx];
}
private:
Irq_object _irq_handler;
cxx::unique_ptr<Device_port[]> _ports;
L4Re::Util::Unique_cap<L4::Irq> _kick_driver_irq;
};
}}} // name space

View File

@@ -0,0 +1,541 @@
// vi:set ft=cpp: -*- Mode: C++ -*-
/*
* Copyright (C) 2025 Kernkonzept GmbH.
* Author(s): Christian Pötzsch <christian.poetzsch@kernkonzept.com>
*
* License: see LICENSE.spdx (in this directory or the directories above)
*/
#pragma once
#include <l4/re/error_helper>
#include <l4/sys/cxx/ipc_epiface>
#include <l4/l4virtio/server/virtio>
#include <l4/l4virtio/server/l4virtio>
#include <l4/l4virtio/l4virtio>
#include <l4/re/error_helper>
#include <l4/re/util/object_registry>
#include <l4/re/util/br_manager>
#include <l4/sys/cxx/ipc_epiface>
#include <l4/cxx/pair>
#include <vector>
#include <memory>
namespace L4virtio {
namespace Svr {
/* GPIO message status types */
enum : l4_uint8_t
{
Gpio_status_ok = 0x0,
Gpio_status_err = 0x1
};
/* GPIO message types */
enum : l4_uint8_t
{
Gpio_msg_get_line_names = 0x1,
Gpio_msg_get_direction = 0x2,
Gpio_msg_set_direction = 0x3,
Gpio_msg_get_value = 0x4,
Gpio_msg_set_value = 0x5,
Gpio_msg_set_irq_type = 0x6
};
/* GPIO value types */
enum : l4_uint8_t
{
Gpio_low = 0x0,
Gpio_high = 0x1
};
/* GPIO direction types */
enum : l4_uint8_t
{
Gpio_direction_none = 0x0,
Gpio_direction_out = 0x1,
Gpio_direction_in = 0x2
};
/* GPIO interrupt types */
enum : l4_uint8_t
{
Gpio_irq_type_none = 0x0,
Gpio_irq_type_edge_rising = 0x1,
Gpio_irq_type_edge_falling = 0x2,
Gpio_irq_type_edge_both = 0x3,
Gpio_irq_type_level_high = 0x4,
Gpio_irq_type_level_low = 0x8
};
/* GPIO interrupt status types */
enum : l4_uint8_t
{
Gpio_irq_status_invalid = 0x0,
Gpio_irq_status_valid = 0x1
};
struct Gpio_request
{
l4_uint16_t type;
l4_uint16_t gpio;
l4_uint32_t value;
};
static_assert(sizeof(Gpio_request) == 8,
"Gpio_request contains padding bytes.");
struct Gpio_response
{
l4_uint8_t status;
l4_uint8_t value;
};
static_assert(sizeof(Gpio_response) == 2,
"Gpio_response contains padding bytes.");
struct Gpio_irq_request
{
l4_uint16_t gpio;
};
static_assert(sizeof(Gpio_irq_request) == 2,
"Gpio_irq_request contains padding bytes.");
struct Gpio_irq_response
{
l4_uint8_t status;
};
static_assert(sizeof(Gpio_irq_response) == 1,
"Gpio_irq_response contains padding bytes.");
struct Gpio_request_msg
{
struct Gpio_request *in_hdr = nullptr;
struct Gpio_response *out_hdr = nullptr;
};
struct Gpio_irq_request_msg
{
struct Gpio_irq_request *in_hdr = nullptr;
struct Gpio_irq_response *out_hdr = nullptr;
};
/**
* A server implementation of the virtio-gpio protocol.
*
* \tparam Request_handler The type that is used to handle incomming requests.
* Needs to have:
* - bool get_direction(l4_uint16_t gpio, l4_uint8_t *dir)
* - bool set_direction(l4_uint16_t gpio, l4_uint8_t dir)
* - bool get_value(l4_uint16_t gpio, l4_uint8_t *val)
* - bool set_value(l4_uint16_t gpio, l4_uint8_t val)
* - bool set_irq_type(l4_uint16_t gpio, l4_uint8_t mode)
* - bool enable_irq(l4_uint16_t gpio,
* std::shared_ptr<Virtio_gpio::Irq_handler> const &hdl)
* functions.
* \tparam Epiface The Epiface to derive from. Defaults to
* `L4virtio::Device`.
*/
template <typename Request_handler,
typename Epiface = L4virtio::Device>
class Virtio_gpio : public L4virtio::Svr::Device,
public L4::Epiface_t<Virtio_gpio<Request_handler,
Epiface>,
Epiface>
{
private:
enum
{
queue_size = 128,
};
public:
using Gpio_request_handler = Request_handler;
/**
* Handler for an gpio pin irq.
*
* This notifies the virtio client that an gpio pin irq happened or the
* operation was canceled.
*
* This needs to be called by any server implementation of the Virtio_gpio
* class, after an irq was enabled with the enable method of the
* Gpio_request_handler.
*/
struct Irq_handler
{
Irq_handler(Virtio_gpio *gpio, L4virtio::Svr::Virtqueue *q,
L4virtio::Svr::Virtqueue::Head_desc const &head,
l4_uint8_t *status)
: _gpio(gpio), _q(q), _head(head), _status(status)
{}
void handle_irq()
{
*_status = Gpio_irq_status_valid;
_q->finish(_head, _gpio, sizeof(Gpio_irq_response));
}
void cancel()
{
*_status = Gpio_irq_status_invalid;
_q->finish(_head, _gpio, sizeof(Gpio_irq_response));
}
private:
Virtio_gpio *_gpio;
L4virtio::Svr::Virtqueue *_q;
L4virtio::Svr::Virtqueue::Head_desc _head;
l4_uint8_t *_status;
};
/**
* Handler for the host irq.
*
* An `L4::Irqep_t` to handle irqs send to the server.
*/
struct Host_irq : L4::Irqep_t<Host_irq>
{
explicit Host_irq(Virtio_gpio *gpio)
: L4::Irqep_t<Host_irq>(), _gpio(gpio) {}
void handle_irq()
{ _gpio->handle_queue(); }
private:
Virtio_gpio *_gpio;
};
/**
* Generic handler for the Virtio requests
*/
struct Request_processor : L4virtio::Svr::Request_processor
{
Request_processor(L4virtio::Svr::Virtqueue *q, Gpio_request_handler *hndlr,
Virtio_gpio *gpio)
: _q(q), _req_handler(hndlr), _gpio(gpio), _head(), _req()
{}
protected:
bool init_queue()
{
auto r = _q->next_avail();
if (L4_UNLIKELY(!r))
return false;
_head = start(_gpio->mem_info(), r, &_req);
return true;
}
/**
* The driver prepares the GPIO request in two data parts:
* 1st: in_hdr
* 2rd: out_hdr
*
* This parses the two Data_buffers and create the Gpio_* structure.
*/
template <typename T>
T get_request()
{
T req;
req.in_hdr = reinterpret_cast<decltype(T::in_hdr)>(_req.pos);
// Need the next output buffer.
if (!next(_gpio->mem_info(), &_req) || !current_flags().write())
return req;
req.out_hdr = reinterpret_cast<decltype(T::out_hdr)>(_req.pos);
return req;
}
struct Data_buffer : public L4virtio::Svr::Data_buffer
{
Data_buffer()
{
pos = nullptr;
left = 0;
}
// This constructor is called from within start, so make it available.
Data_buffer(L4virtio::Svr::Driver_mem_region const *r,
L4virtio::Svr::Virtqueue::Desc const &d,
L4virtio::Svr::Request_processor const *)
{
pos = static_cast<char *>(r->local(d.addr));
left = d.len;
}
};
L4virtio::Svr::Virtqueue *_q;
Gpio_request_handler *_req_handler;
Virtio_gpio *_gpio;
L4virtio::Svr::Virtqueue::Head_desc _head;
Data_buffer _req;
};
// Handler for the gpio request queue
struct Req_processor : Request_processor
{
using Request_processor::Request_processor;
void handle_request()
{
if (!this->_head)
if (!this->init_queue())
return;
using Consumed_entry =
cxx::Pair<L4virtio::Svr::Virtqueue::Head_desc, l4_uint32_t>;
std::vector<Consumed_entry> consumed;
for (;;)
{
Gpio_request_msg req = this->template get_request<Gpio_request_msg>();
if (!req.in_hdr || !req.out_hdr)
{
this->_gpio->device_error();
break;
}
// default response is error
req.out_hdr->status = Gpio_status_err;
switch (req.in_hdr->type)
{
case Gpio_msg_get_line_names:
// we don't support this
break;
case Gpio_msg_get_direction:
{
if (this->_req_handler->get_direction(req.in_hdr->gpio,
&req.out_hdr->value))
req.out_hdr->status = Gpio_status_ok;
break;
}
case Gpio_msg_set_direction:
{
if (req.in_hdr->value == Gpio_direction_none ||
req.in_hdr->value == Gpio_direction_out ||
req.in_hdr->value == Gpio_direction_in)
{
if (this->_req_handler->set_direction(req.in_hdr->gpio,
req.in_hdr->value))
req.out_hdr->status = Gpio_status_ok;
}
break;
}
case Gpio_msg_get_value:
{
if (this->_req_handler->get_value(req.in_hdr->gpio,
&req.out_hdr->value))
req.out_hdr->status = Gpio_status_ok;
break;
}
case Gpio_msg_set_value:
{
if (req.in_hdr->value == Gpio_low ||
req.in_hdr->value == Gpio_high)
{
if (this->_req_handler->set_value(req.in_hdr->gpio,
req.in_hdr->value))
req.out_hdr->status = Gpio_status_ok;
}
break;
}
case Gpio_msg_set_irq_type:
{
if (req.in_hdr->value == Gpio_irq_type_none ||
req.in_hdr->value == Gpio_irq_type_edge_rising ||
req.in_hdr->value == Gpio_irq_type_edge_falling ||
req.in_hdr->value == Gpio_irq_type_edge_both ||
req.in_hdr->value == Gpio_irq_type_level_high ||
req.in_hdr->value == Gpio_irq_type_level_low)
{
if (this->_req_handler->set_irq_type(req.in_hdr->gpio,
req.in_hdr->value))
req.out_hdr->status = Gpio_status_ok;
}
break;
}
}
// Save the descriptors which are done
consumed.emplace_back(this->_head, sizeof(Gpio_response));
if (!this->init_queue())
break;
}
// Put all finished descriptors back into the used list and notify the
// driver.
this->_q->finish(consumed.begin(), consumed.end(), this->_gpio);
this->_head = Virtqueue::Head_desc();
}
};
// Handler for the gpio event queue
struct Irq_req_processor : Request_processor
{
using Request_processor::Request_processor;
void handle_request()
{
if (!this->_head)
if (!this->init_queue())
return;
for (;;)
{
// There is only one type of message in the event queue. This
// basically arms (unmask) the irq.
Gpio_irq_request_msg req = this->template get_request<Gpio_irq_request_msg>();
if (!req.in_hdr || !req.out_hdr)
{
this->_gpio->device_error();
break;
}
// Save the virtio descriptor for this event in an extra Irq_handler
// object. The descriptor will be returned to the client when the irq
// is triggered or canceled.
this->_req_handler->enable_irq(req.in_hdr->gpio,
std::make_shared<Irq_handler>(this->_gpio,
this->_q,
this->_head,
&req.out_hdr->status));
if (!this->init_queue())
break;
}
this->_head = Virtqueue::Head_desc();
}
};
struct Features : public L4virtio::Svr::Dev_config::Features
{
Features() = default;
Features(l4_uint32_t raw) : L4virtio::Svr::Dev_config::Features(raw) {}
CXX_BITFIELD_MEMBER(0, 0, gpio_f_irq, raw);
};
struct Gpio_config_space
{
l4_uint16_t ngpio;
l4_uint8_t padding[2];
l4_uint32_t gpio_names_size;
};
Virtio_gpio(Gpio_request_handler *hndlr,
L4Re::Util::Object_registry *registry,
l4_uint16_t ngpio)
: L4virtio::Svr::Device(&_dev_config),
_registry(registry),
_dev_config(L4VIRTIO_VENDOR_KK, L4VIRTIO_ID_GPIO, 2),
_host_irq(this),
_req_processor(&_q[0], hndlr, this),
_irq_req_processor(&_q[1], hndlr, this)
{
init_mem_info(2);
for (size_t i = 0; i < 2; i++)
{
reset_queue_config(i, queue_size);
setup_queue(&_q[i], i, queue_size);
}
registry->register_irq_obj(&_host_irq);
Features hf(0);
hf.ring_indirect_desc() = true;
hf.gpio_f_irq() = true;
_dev_config.host_features(0) = hf.raw;
_dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
// fill gpio config space
_dev_config.priv_config()->ngpio = ngpio;
_dev_config.priv_config()->gpio_names_size = 0; // not supported
_dev_config.reset_hdr();
}
~Virtio_gpio()
{ _registry->unregister_obj(&_host_irq); }
void notify_queue(L4virtio::Svr::Virtqueue *queue)
{
if (queue->no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
L4Re::chkipc(_notify_guest_irq->trigger(), "trigger guest irq");
}
void handle_queue()
{
_req_processor.handle_request();
_irq_req_processor.handle_request();
}
void reset() override
{}
bool check_queues() override
{ return true; }
int reconfig_queue(unsigned idx) override
{
if (idx >= sizeof(_q) / sizeof(_q[0]))
return -L4_ERANGE;
return setup_queue(_q + idx, idx, queue_size);
}
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_notify_guest_irq->trigger();
}
L4::Ipc_svr::Server_iface *server_iface() const override
{ return L4::Epiface::server_iface(); }
long op_set_status(L4virtio::Device::Rights r, unsigned status)
{ return L4virtio::Svr::Device::op_set_status(r, status); }
long op_config_queue(L4virtio::Device::Rights r, unsigned queue)
{ return L4virtio::Svr::Device::op_config_queue(r, queue); }
long op_device_config(L4virtio::Device::Rights r,
L4::Ipc::Cap<L4Re::Dataspace> &config_ds,
l4_addr_t &ds_offset)
{ return L4virtio::Svr::Device::op_device_config(r, config_ds, ds_offset); }
L4::Cap<L4::Irq> device_notify_irq() const override
{ return L4::cap_cast<L4::Irq>(_host_irq.obj_cap()); }
void register_single_driver_irq() override
{
_notify_guest_irq = L4Re::chkcap
(server_iface()->template rcv_cap<L4::Irq>(0));
L4Re::chksys(server_iface()->realloc_rcv_cap(0));
}
private:
L4Re::Util::Object_registry *_registry;
L4virtio::Svr::Dev_config_t<Gpio_config_space> _dev_config;
Host_irq _host_irq;
L4::Cap<L4::Irq> _notify_guest_irq;
L4virtio::Svr::Virtqueue _q[2];
Req_processor _req_processor;
Irq_req_processor _irq_req_processor;
};
} // namespace Svr
} // namespace L4virtio

View File

@@ -0,0 +1,382 @@
// vi:set ft=cpp: -*- Mode: C++ -*-
/*
* Copyright (C) 2024 Kernkonzept GmbH.
* Author(s): Martin Kuettler <martin.kuettler@kernkonzept.com>
*
* License: see LICENSE.spdx (in this directory or the directories above)
*/
#pragma once
#include <l4/re/error_helper>
#include <l4/sys/cxx/ipc_epiface>
#include <l4/l4virtio/server/virtio>
#include <l4/l4virtio/server/l4virtio>
#include <l4/l4virtio/l4virtio>
#include <l4/re/error_helper>
#include <l4/re/util/object_registry>
#include <l4/re/util/br_manager>
#include <l4/sys/cxx/ipc_epiface>
#include <vector>
#include <l4/cxx/pair>
namespace L4virtio {
namespace Svr {
enum : l4_uint8_t
{
I2c_msg_ok = 0,
I2c_msg_err = 1,
};
struct I2c_request_flags
{
l4_uint32_t raw;
CXX_BITFIELD_MEMBER(0, 0, fail_next, raw);
CXX_BITFIELD_MEMBER(1, 1, m_rd, raw);
};
static_assert(sizeof(I2c_request_flags) == 4,
"I2c_request_flags contains padding bytes.");
struct I2c_out_hdr
{
l4_uint16_t addr;
l4_uint16_t padding;
I2c_request_flags flags;
};
static_assert(sizeof(I2c_out_hdr) == 8, "I2c_out_hdr contains padding bytes.");
struct I2c_in_hdr
{
l4_uint8_t status;
};
static_assert(sizeof(I2c_in_hdr) == 1, "I2c_in_hdr contains padding bytes.");
struct I2c_req
{
struct I2c_out_hdr out_hdr;
unsigned buf_len;
l4_uint8_t* buf;
struct I2c_in_hdr *in_hdr;
unsigned write_size;
void set_status(l4_uint8_t status)
{
in_hdr->status = status;
}
};
/**
* A server implementation of the virtio-i2c protocol.
*
* \tparam Request_handler The type that is used to handle incomming requests.
* Needs to have `handle_read(l4_uint8_t *, unsigned)`
* and `handle_write(l4_uint8_t const *, unsigned)`
* functions.
* \tparam Epiface The Epiface to derive from. Defaults to
* `L4virtio::Device`.
*/
template <typename Request_handler,
typename Epiface = L4virtio::Device>
class Virtio_i2c : public L4virtio::Svr::Device,
public L4::Epiface_t<Virtio_i2c<Request_handler,
Epiface>,
Epiface>
{
private:
enum
{
Num_request_queues = 1,
queue_size = 128,
};
public:
using I2c_request_handler = Request_handler;
/**
* Handler for the host irq.
*
* An `L4::Irqep_t` to handle irqs send to the server.
*/
class Host_irq : public L4::Irqep_t<Host_irq>
{
public:
explicit Host_irq(Virtio_i2c *i2c) : L4::Irqep_t<Host_irq>(), _i2c(i2c) {}
void handle_irq()
{
_i2c->handle_queue();
}
private:
Virtio_i2c *_i2c;
};
/**
* Handler for the Virtio requests
*/
class Request_processor : public L4virtio::Svr::Request_processor
{
public:
struct Data_buffer : public L4virtio::Svr::Data_buffer
{
Data_buffer()
{
pos = nullptr;
left = 0;
}
// This constructor is called from within start, so make it available.
Data_buffer(L4virtio::Svr::Driver_mem_region const *r,
L4virtio::Svr::Virtqueue::Desc const &d,
L4virtio::Svr::Request_processor const *)
{
pos = static_cast<char *>(r->local(d.addr));
left = d.len;
}
};
Request_processor(L4virtio::Svr::Virtqueue *q, I2c_request_handler *hndlr,
Virtio_i2c *i2c)
: _q(q), _req_handler(hndlr), _i2c(i2c), _head(), _req(),
_fail_next(false)
{}
bool init_queue()
{
auto r = _q->next_avail();
if (L4_UNLIKELY(!r))
return false;
_head = start(_i2c->mem_info(), r, &_req);
return true;
}
/**
* Linux prepares the I2C request in three data parts:
* 1st: out_hdr
* 2nd: buffer (optional)
* 3rd: in_hdr
*
* This parses the three Data_buffers and recreate the
* virtio_i2c_req structure.
*/
I2c_req get_request()
{
I2c_req request;
memcpy(&request.out_hdr, _req.pos, sizeof(I2c_out_hdr));
// number of bytes to be written in the answer.
request.write_size = sizeof(I2c_in_hdr);
request.buf_len = 0;
Data_buffer req;
// 2nd part: either the optional buffer or the in_hdr
if (next(_i2c->mem_info(), &req))
{
request.buf_len += req.left;
request.buf = reinterpret_cast<l4_uint8_t *>(req.pos);
}
// 3rd part: in_hdr
if (next(_i2c->mem_info(), &req))
{
// 2nd part was indeed a buffer
if (request.out_hdr.flags.m_rd())
request.write_size += request.buf_len;
// actual 3rd part
request.in_hdr = reinterpret_cast<I2c_in_hdr *>(req.pos);
}
else
{
// no 3rd part, 2nd part is in_hdr;
request.in_hdr = reinterpret_cast<I2c_in_hdr *>(request.buf);
request.buf = nullptr;
request.buf_len = 0;
}
return request;
}
void handle_request()
{
if (!_head)
if (!init_queue())
return;
using Consumed_entry =
cxx::Pair<L4virtio::Svr::Virtqueue::Head_desc, l4_uint32_t>;
std::vector<Consumed_entry> consumed;
for (;;)
{
auto r = get_request();
if (_fail_next)
{
r.set_status(I2c_msg_err);
_fail_next = r.out_hdr.flags.fail_next();
}
else
{
bool ok;
l4_uint16_t i2c_addr = r.out_hdr.addr >> 1;
if (r.out_hdr.flags.m_rd())
ok = _req_handler->handle_read(i2c_addr, r.buf, r.buf_len);
else
ok = _req_handler->handle_write(i2c_addr, r.buf, r.buf_len);
if (ok)
{
r.set_status(I2c_msg_ok);
_fail_next = false;
}
else
{
r.set_status(I2c_msg_err);
_fail_next = r.out_hdr.flags.fail_next();
}
}
consumed.emplace_back(_head, r.write_size);
if (!init_queue())
break;
}
_q->finish(consumed.begin(), consumed.end(), _i2c);
_head = Virtqueue::Head_desc();
}
private:
L4virtio::Svr::Virtqueue *_q;
I2c_request_handler *_req_handler;
Virtio_i2c *_i2c;
L4virtio::Svr::Virtqueue::Head_desc _head;
Data_buffer _req;
bool _fail_next;
};
struct Features : public L4virtio::Svr::Dev_config::Features
{
Features() = default;
Features(l4_uint32_t raw) : L4virtio::Svr::Dev_config::Features(raw) {}
// This feature is mandatory. The driver is requested to abort communication
// if this is not offered.
CXX_BITFIELD_MEMBER(0, 0, zero_length_request, raw);
};
Virtio_i2c(I2c_request_handler *hndlr, L4Re::Util::Object_registry *registry)
: L4virtio::Svr::Device(&_dev_config),
_dev_config(L4VIRTIO_VENDOR_KK, L4VIRTIO_ID_I2C, Num_request_queues),
_req_handler(hndlr),
_host_irq(this),
_request_processor(&_q, hndlr, this)
{
init_mem_info(2);
reset_queue_config(0, queue_size);
setup_queue(&_q, 0, queue_size);
registry->register_irq_obj(&_host_irq);
Features hf(0);
hf.ring_indirect_desc() = true;
hf.zero_length_request() = true;
_dev_config.host_features(0) = hf.raw;
_dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
_dev_config.reset_hdr();
}
void notify_queue(L4virtio::Svr::Virtqueue *)
{
if (_q.no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
L4Re::chkipc(_notify_guest_irq->trigger(), "trigger guest irq");
}
void handle_queue()
{
_request_processor.handle_request();
}
void reset() override
{
}
bool check_queues() override
{
return true;
}
int reconfig_queue(unsigned idx) override
{
if (idx != 0)
return -L4_ERANGE;
setup_queue(&_q, 0, queue_size);
return L4_EOK;
}
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_notify_guest_irq->trigger();
}
L4::Ipc_svr::Server_iface *server_iface() const override
{
return L4::Epiface::server_iface();
}
long op_set_status(L4virtio::Device::Rights r, unsigned status)
{
return L4virtio::Svr::Device::op_set_status(r, status);
}
long op_config_queue(L4virtio::Device::Rights r, unsigned queue)
{
return L4virtio::Svr::Device::op_config_queue(r, queue);
}
long op_device_config(L4virtio::Device::Rights r,
L4::Ipc::Cap<L4Re::Dataspace> &config_ds,
l4_addr_t &ds_offset)
{
return L4virtio::Svr::Device::op_device_config(r, config_ds, ds_offset);
}
L4::Cap<L4::Irq> device_notify_irq() const override
{
return L4::cap_cast<L4::Irq>(_host_irq.obj_cap());
}
void register_single_driver_irq() override
{
_notify_guest_irq = L4Re::chkcap
(server_iface()->template rcv_cap<L4::Irq>(0));
L4Re::chksys(server_iface()->realloc_rcv_cap(0));
}
private:
L4virtio::Svr::Dev_config_t<L4virtio::Svr::No_custom_data>_dev_config;
I2c_request_handler *_req_handler;
L4virtio::Svr::Virtqueue _q;
Host_irq _host_irq;
L4::Cap<L4::Irq> _notify_guest_irq;
Request_processor _request_processor;
};
} // namespace Svr
} // namespace L4virtio

View File

@@ -0,0 +1,229 @@
// vi:ft=cpp: -*- Mode: C++ -*-
/*
* Copyright (C) 2024 Kernkonzept GmbH.
* Author(s): Martin Kuettler <martin.kuettler@kernkonzept.com>
*
* License: see LICENSE.spdx (in this directory or the directories above)
*/
#pragma once
#include <l4/re/error_helper>
#include <l4/sys/cxx/ipc_epiface>
#include <l4/l4virtio/server/virtio>
#include <l4/l4virtio/server/l4virtio>
#include <l4/l4virtio/l4virtio>
namespace L4virtio {
namespace Svr {
/**
* A server implementation of the virtio-rng protocol.
*
* \tparam Rnd_state The type that implements the random data generation.
* `Rnd_state::get_random(int len, unsigned char *buf)`
* is called to get len random bytes written into buf
* TODO: virtio-rng supports providing less random bytes
* then requested. This API currently does not support that,
* as I do not have a test case.
* \tparam Epiface The Epiface to derive from. Defaults to `L4virtio::Device`.
*/
template <typename Rnd_state, typename Epiface = L4virtio::Device>
class Virtio_rng : public L4virtio::Svr::Device,
public L4::Epiface_t<Virtio_rng<Rnd_state>, Epiface>
{
private:
enum
{
Num_request_queues = 1,
queue_size = 128,
};
public:
using Random_state = Rnd_state;
/**
* Handler for the host irq.
*
* An `L4::Irqep_t` to handle irqs send to the server.
*/
class Host_irq : public L4::Irqep_t<Host_irq>
{
public:
explicit Host_irq(Virtio_rng *rng) : L4::Irqep_t<Host_irq>(), _rng(rng) {}
void handle_irq()
{
_rng->handle_queue();
}
private:
Virtio_rng *_rng;
};
/**
* Handler for the Virtio requests
*/
class Request_processor : public L4virtio::Svr::Request_processor
{
public:
struct Data_buffer : public L4virtio::Svr::Data_buffer
{
Data_buffer() = default;
// This constructor is called from within start, so make it available.
Data_buffer(L4virtio::Svr::Driver_mem_region const *r,
L4virtio::Svr::Virtqueue::Desc const &d,
L4virtio::Svr::Request_processor const *)
{
pos = static_cast<char *>(r->local(d.addr));
left = d.len;
}
};
Request_processor(L4virtio::Svr::Virtqueue *q, Random_state *rnd,
Virtio_rng *rng)
: _q(q), _rnd(rnd), _rng(rng), _head() {}
bool init_queue()
{
auto r = _q->next_avail();
if (L4_UNLIKELY(!r))
return false;
_head = start(_rng->mem_info(), r, &_req);
return true;
}
void handle_request()
{
if (!_head)
if (!init_queue())
return;
for (;;)
{
auto const pos = reinterpret_cast<unsigned char *>(_req.pos);
_rnd->get_random(_req.left, pos);
_q->finish(_head, _rng, _req.left);
if (!init_queue())
break;
}
return;
}
private:
L4virtio::Svr::Virtqueue *_q;
Random_state *_rnd;
Virtio_rng *_rng;
L4virtio::Svr::Virtqueue::Head_desc _head;
Data_buffer _req;
};
Virtio_rng(Random_state *rnd, L4::Registry_iface *registry)
: L4virtio::Svr::Device(&_dev_config),
_dev_config(L4VIRTIO_VENDOR_KK, L4VIRTIO_ID_RNG, Num_request_queues),
_rnd(rnd),
_host_irq(this),
_request_processor(&_q, rnd, this)
{
init_mem_info(2);
reset_queue_config(0, queue_size);
setup_queue(&_q, 0, queue_size);
registry->register_irq_obj(&_host_irq);
L4virtio::Svr::Dev_config::Features hf;
hf.ring_indirect_desc() = true;
_dev_config.host_features(0) = hf.raw;
_dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
_dev_config.reset_hdr();
}
void notify_queue(L4virtio::Svr::Virtqueue *)
{
if (_q.no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
L4Re::chkipc(_notify_guest_irq->trigger(), "trigger guest irq");
}
void handle_queue()
{
_request_processor.handle_request();
}
void reset() override
{
}
bool check_queues() override
{
return true;
}
int reconfig_queue(unsigned idx) override
{
if (idx != 0)
return -L4_ERANGE;
setup_queue(&_q, 0, queue_size);
return L4_EOK;
}
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_notify_guest_irq->trigger();
}
L4::Ipc_svr::Server_iface *server_iface() const override
{
return L4::Epiface::server_iface();
}
long op_set_status(L4virtio::Device::Rights r, unsigned status)
{
return L4virtio::Svr::Device::op_set_status(r, status);
}
long op_config_queue(L4virtio::Device::Rights r, unsigned queue)
{
return L4virtio::Svr::Device::op_config_queue(r, queue);
}
long op_device_config(L4virtio::Device::Rights r,
L4::Ipc::Cap<L4Re::Dataspace> &config_ds,
l4_addr_t &ds_offset)
{
return L4virtio::Svr::Device::op_device_config(r, config_ds, ds_offset);
}
L4::Cap<L4::Irq> device_notify_irq() const override
{
return L4::cap_cast<L4::Irq>(_host_irq.obj_cap());
}
void register_single_driver_irq() override
{
_notify_guest_irq = L4Re::chkcap
(server_iface()->template rcv_cap<L4::Irq>(0));
L4Re::chksys(server_iface()->realloc_rcv_cap(0));
}
private:
L4virtio::Svr::Dev_config_t<L4virtio::Svr::No_custom_data>_dev_config;
Random_state *_rnd;
L4virtio::Svr::Virtqueue _q;
Host_irq _host_irq;
L4::Cap<L4::Irq> _notify_guest_irq;
Request_processor _request_processor;
};
} // namespace Svr
} // namespace L4virtio

View File

@@ -0,0 +1,892 @@
// vi:ft=cpp
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2024 Kernkonzept GmbH.
* Author(s): Christian Pötzsch <christian.poetzsch@kernkonzept.com>
*
* License: see LICENSE.spdx (in this directory or the directories above)
*/
#pragma once
#include <l4/cxx/bitmap>
#include <l4/l4virtio/l4virtio>
#include <l4/l4virtio/server/l4virtio>
#include <l4/l4virtio/server/virtio>
#include <l4/re/util/object_registry>
#include <map>
#include <vector>
namespace L4virtio { namespace Svr { namespace Scmi {
/// SCMI version
enum
{
Version = 0x20000
};
/// SCMI error codes
enum
{
Success = 0,
Not_supported = -1,
Invalid_parameters = -2,
Denied = -3,
Not_found = -4,
Out_of_range = -5,
Busy = -6,
Comms_error = -7,
Generic_error = -8,
Hardware_error = -9,
Protocol_error = -10
};
/// SCMI header
struct Scmi_hdr_t
{
l4_uint32_t hdr_raw = 0;
CXX_BITFIELD_MEMBER(18, 27, token, hdr_raw);
CXX_BITFIELD_MEMBER(10, 17, protocol_id, hdr_raw);
CXX_BITFIELD_MEMBER( 8, 9, message_type, hdr_raw);
CXX_BITFIELD_MEMBER( 0, 7, message_id, hdr_raw);
};
/// SCMI protocol ids
enum
{
Base_protocol = 0x10,
Power_domain_management_protocol = 0x11,
System_power_management_protocol = 0x12,
Performance_domain_management_protocol = 0x13,
Clock_management_protocol = 0x14,
Sensor_management_protocol = 0x15,
Reset_domain_management_protocol = 0x16,
Voltage_domain_management_protocol = 0x17
};
/// SCMI common protocol message ids
enum
{
Protocol_version = 0x0,
Protocol_attributes = 0x1,
Protocol_message_attributes = 0x2,
};
/// SCMI base protocol message ids
enum
{
Base_discover_vendor = 0x3,
Base_discover_sub_vendor = 0x4,
Base_discover_implementation_version = 0x5,
Base_discover_list_protocols = 0x6,
Base_discover_agent = 0x7,
Base_notify_errors = 0x8,
Base_set_device_permissions = 0x9,
Base_set_protocol_permissions = 0xa,
Base_reset_agent_configuration = 0xb
};
/// SCMI base protocol attributes
struct Base_attr_t
{
l4_uint32_t attr_raw = 0;
CXX_BITFIELD_MEMBER(8, 15, nagents, attr_raw);
CXX_BITFIELD_MEMBER(0, 7, nprots, attr_raw);
};
/// SCMI performance domain management protocol message ids
enum
{
Performance_domain_attributes = 0x3,
Performance_describe_levels = 0x4,
Performance_limits_set = 0x5,
Performance_limits_get = 0x6,
Performance_level_set = 0x7,
Performance_level_get = 0x8,
Performance_notify_limits = 0x9,
Performance_notify_level = 0xa,
Performance_describe_fastchannel = 0xb,
};
/// SCMI performance protocol attributes
struct Performance_attr_t
{
l4_uint32_t attr_raw = 0;
CXX_BITFIELD_MEMBER(16, 16, power, attr_raw);
CXX_BITFIELD_MEMBER( 0, 15, domains, attr_raw);
l4_uint32_t stat_addr_low = 0;
l4_uint32_t stat_addr_high = 0;
l4_uint32_t stat_len = 0;
};
/// SCMI performance domain protocol attributes
struct Performance_domain_attr_t
{
l4_uint32_t attr_raw = 0;
CXX_BITFIELD_MEMBER(31, 31, set_limits, attr_raw);
CXX_BITFIELD_MEMBER(30, 30, set_perf_level, attr_raw);
CXX_BITFIELD_MEMBER(29, 29, perf_limits_change_notify, attr_raw);
CXX_BITFIELD_MEMBER(28, 28, perf_level_change_notify, attr_raw);
CXX_BITFIELD_MEMBER(27, 27, fast_channel, attr_raw);
l4_uint32_t rate_limit_raw = 0;
CXX_BITFIELD_MEMBER( 0, 19, rate_limit, rate_limit_raw);
l4_uint32_t sustained_freq = 0;
l4_uint32_t sustained_perf_level = 0;
char name[16] = { 0 };
};
/// SCMI performance describe levels numbers
struct Performance_describe_levels_n_t
{
l4_uint32_t num_levels_raw = 0;
CXX_BITFIELD_MEMBER(16, 31, nremain_perf_levels, num_levels_raw);
CXX_BITFIELD_MEMBER( 0, 11, nperf_levels, num_levels_raw);
};
/// SCMI performance describe level
struct Performance_describe_level_t
{
l4_uint32_t perf_level = 0;
l4_uint32_t power_cost = 0;
l4_uint16_t trans_latency = 0;
l4_uint16_t res0 = 0;
};
template<typename OBSERV>
struct Queue_worker : Request_processor
{
Queue_worker(OBSERV *o, Virtqueue *queue)
: o(o), q(queue)
{}
bool init_queue()
{
auto r = q->next_avail();
if (L4_UNLIKELY(!r))
return false;
head = start(o->mem_info(), r, &req);
return true;
}
bool next()
{ return Request_processor::next(o->mem_info(), &req); }
void finish(l4_uint32_t total)
{ q->finish(head, o, total); }
template<typename T>
l4_ssize_t read(Data_buffer *buf, T *data, l4_size_t s = sizeof(T))
{
buf->pos = reinterpret_cast<char *>(data);
buf->left = s;
l4_size_t chunk = 0;
for (;;)
{
chunk += req.copy_to(buf);
if (req.done())
next();
if (!buf->left)
break;
}
if (chunk != s)
return -1;
return chunk;
}
template<typename T>
l4_ssize_t write(Data_buffer *buf, T *data, l4_size_t s = sizeof(T))
{
buf->pos = reinterpret_cast<char *>(data);
buf->left = s;
l4_size_t chunk = 0;
for (;;)
{
chunk += buf->copy_to(&req);
if (req.done())
next();
if (!buf->left)
break;
}
if (chunk != s)
return -1;
return chunk;
}
l4_ssize_t handle_request()
{
try
{
if (!head && L4_UNLIKELY(!init_queue()))
return 0;
for (;;)
{
l4_ssize_t total = 0;
l4_ssize_t res = 0;
Scmi_hdr_t hdr;
Data_buffer buf = Data_buffer(&hdr);
if ((res = read(&buf, &hdr)) < 0)
return res;
// Search/execute handler for given protocol
auto proto = o->proto(hdr.protocol_id());
if (proto)
{
if ((res = proto->handle_request(hdr, buf, this)) < 0)
return res;
total += res;
}
else
{
if ((res = write(&buf, &hdr)) < 0)
return res;
total += res;
l4_int32_t status = Not_supported;
if ((res = write(&buf, &status)) < 0)
return res;
total += res;
}
finish(total);
head = Virtqueue::Head_desc();
if (L4_UNLIKELY(!init_queue()))
return 0;
}
}
catch (L4virtio::Svr::Bad_descriptor const &e)
{
return e.error;
}
return 0;
}
private:
struct Buffer : Data_buffer
{
Buffer() = default;
Buffer(L4virtio::Svr::Driver_mem_region const *r,
Virtqueue::Desc const &d, Request_processor const *)
{
pos = static_cast<char *>(r->local(d.addr));
left = d.len;
}
};
/// Current head
Virtqueue::Head_desc head;
Buffer req;
/// Pointer to the device the end point belongs to
OBSERV *o;
Virtqueue *q;
};
/**
* Base class for all protocols.
*
* Defines an interface for processing the virtio buffers for the implemented
* protocol.
*/
template<typename OBSERV>
struct Proto
{
virtual l4_ssize_t handle_request(Scmi_hdr_t &hdr,
Data_buffer &buf,
Queue_worker<OBSERV> *qw) = 0;
};
/**
* A server implementation of the virtio-scmi protocol.
*
* Use this class as a base to implement your own specific SCMI device.
*
* SCMI defines multiple protocols which can be optionally handled. This server
* implementation is flexible enough to handle any combination of them. The
* user of this server has to deviate from the provided Proto classes (for the
* protocols he want to handle) and needs to implement the required callbacks.
*
* Right now, support for the base and the performance protocol is provided.
*
* The base protocol is mandatory.
*
* If you want to use this from a Uvmm Linux guest, the device tree needs to
* look something like this:
*
* \code
* firmware {
* scmi {
* compatible = "arm,scmi-virtio";
*
* #address-cells = <1>;
* #size-cells = <0>;
*
* // ... supported protocols ...
* };
* };
* \endcode
*/
class Scmi_dev : public L4virtio::Svr::Device
{
struct Features : L4virtio::Svr::Dev_config::Features
{
Features() = default;
Features(l4_uint32_t raw) : L4virtio::Svr::Dev_config::Features(raw) {}
};
struct Host_irq : public L4::Irqep_t<Host_irq>
{
Scmi_dev *c;
explicit Host_irq(Scmi_dev *c) : c(c) {}
void handle_irq() { c->kick(); }
};
enum
{
Queue_size = 0x10
};
public:
Scmi_dev(L4Re::Util::Object_registry *registry)
: L4virtio::Svr::Device(&_dev_config),
_dev_config(L4VIRTIO_VENDOR_KK, L4VIRTIO_ID_SCMI, 1),
_host_irq(this),
_request_worker(this, &_q[0])
{
init_mem_info(2);
L4Re::chkcap(registry->register_irq_obj(&_host_irq),
"Register irq object");
Features hf(0);
hf.ring_indirect_desc() = true;
_dev_config.host_features(0) = hf.raw;
_dev_config.set_host_feature(L4VIRTIO_FEATURE_VERSION_1);
_dev_config.reset_hdr();
reset();
}
/// Add an actual protocol implementation with the given id to the server.
void add_proto(l4_uint32_t id, Proto<Scmi_dev> *proto)
{ _protos.insert({id, proto}); }
Proto<Scmi_dev> *proto(l4_uint32_t id) const
{
if (_protos.find(id) != _protos.end())
return _protos.at(id);
return nullptr;
}
void notify_queue(L4virtio::Virtqueue *queue)
{
if (queue->no_notify_guest())
return;
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_VRING);
_kick_guest_irq->trigger();
}
private:
L4::Cap<L4::Irq> device_notify_irq() const override
{ return L4::cap_cast<L4::Irq>(_host_irq.obj_cap()); }
void register_single_driver_irq() override
{
_kick_guest_irq = L4Re::Util::Unique_cap<L4::Irq>(
L4Re::chkcap(server_iface()->template rcv_cap<L4::Irq>(0)));
L4Re::chksys(server_iface()->realloc_rcv_cap(0));
}
void kick()
{
if (_request_worker.handle_request() < 0)
device_error();
}
void reset() override
{
for (Virtqueue &q : _q)
q.disable();
for (l4_uint32_t i = 0; i < _dev_config.num_queues(); i++)
reset_queue_config(i, Queue_size);
}
bool check_queues() override
{
return true;
}
int reconfig_queue(unsigned idx) override
{
if (idx >= sizeof(_q) / sizeof(_q[0]))
return -L4_ERANGE;
return setup_queue(_q + idx, idx, Queue_size);
}
void trigger_driver_config_irq() override
{
_dev_config.add_irq_status(L4VIRTIO_IRQ_STATUS_CONFIG);
_kick_guest_irq->trigger();
}
L4virtio::Svr::Dev_config_t<L4virtio::Svr::No_custom_data> _dev_config;
Host_irq _host_irq;
L4Re::Util::Unique_cap<L4::Irq> _kick_guest_irq;
Virtqueue _q[1];
Queue_worker<Scmi_dev> _request_worker;
std::map<l4_uint32_t, Proto<Scmi_dev> *> _protos;
};
/**
* Base class for the SCMI base protocol.
*
* Use this class as a base to implement the base protocol.
*/
class Base_proto : public Proto<Scmi_dev>
{
/// Return the base protocol attributes, like the number of supported
/// protocols.
virtual l4_int32_t fill_attr(Base_attr_t *attr) const = 0;
/// Return a list of supported protocols.
virtual std::vector<l4_uint32_t> prots() const = 0;
l4_ssize_t handle_request(Scmi_hdr_t &hdr, Data_buffer &buf,
Queue_worker<Scmi_dev> *qw) override
{
l4_ssize_t total = 0;
l4_ssize_t res = 0;
switch (hdr.message_id())
{
case Protocol_version:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Success;
l4_uint32_t version = Version;
} version;
if ((res = qw->write(&buf, &version)) < 0)
return res;
total += res;
break;
}
case Protocol_attributes:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
Base_attr_t ba;
l4_int32_t status = fill_attr(&ba);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
if (status == Success)
{
if ((res = qw->write(&buf, &ba)) < 0)
return res;
total += res;
}
break;
}
case Protocol_message_attributes:
{
l4_uint32_t msg_id = 0;
if ((res = qw->read(&buf, &msg_id)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Not_found;
l4_uint32_t attr = 0;
} attr;
if (msg_id >= Protocol_version &&
msg_id <= Base_discover_list_protocols)
attr.status = Success;
if ((res = qw->write(&buf, &attr)) < 0)
return res;
total += res;
break;
}
case Base_discover_vendor:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Success;
l4_uint8_t vendor_identifier[16] = { "L4Re" };
} vendor;
if ((res = qw->write(&buf, &vendor)) < 0)
return res;
total += res;
break;
}
case Base_discover_sub_vendor:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Success;
l4_uint8_t vendor_identifier[16] = { "Scmi" };
} vendor;
if ((res = qw->write(&buf, &vendor)) < 0)
return res;
total += res;
break;
}
case Base_discover_implementation_version:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Success;
l4_uint32_t version = 1;
} version;
if ((res = qw->write(&buf, &version)) < 0)
return res;
total += res;
break;
}
case Base_discover_list_protocols:
{
l4_uint32_t skip = 0;
if ((res = qw->read(&buf, &skip)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
auto p = prots();
struct
{
l4_int32_t status = Success;
l4_uint32_t num;
} proto;
proto.num = p.size();
if ((res = qw->write(&buf, &proto)) < 0)
return res;
total += res;
// Array of uint32 where 4 protos are packed into one uint32. So
// round up to 4 bytes and fill the array byte by byte.
l4_uint8_t parr[(p.size() + 3) / 4 * 4] = { 0 };
for (l4_size_t i = 0; i < p.size(); i++)
parr[i] = p.at(i);
if ((res = qw->write(&buf, parr, sizeof(parr))) < 0)
return res;
total += res;
break;
}
default:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
l4_int32_t status = Not_supported;
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
break;
}
}
return total;
}
};
/**
* Base class for the SCMI performance protocol.
*
* Use this class as a base to implement the performance protocol.
*
* If you want to use this from a Uvmm Linux guest, the device tree needs to
* look something like this:
*
* \code
* firmware {
* scmi {
* compatible = "arm,scmi-virtio";
*
* #address-cells = <1>;
* #size-cells = <0>;
*
* cpufreq: protocol@13 {
* reg = <0x13>;
* #clock-cells = <1>;
* };
* };
* };
* ....
*
* cpu@0 {
* device_type = "cpu";
* reg = <0x0>;
* clocks = <&cpufreq 0>; // domain_id
* };
* \endcode
*/
class Perf_proto : public Proto<Scmi_dev>
{
/// Return the performance protocol attributes, like the number of supported
/// domains.
virtual l4_int32_t fill_attr(Performance_attr_t *attr) const = 0;
/// Return the performance protocol domain attributes for a given domain,
/// like if setting the performance level or limits is allowed.
virtual l4_int32_t fill_domain_attr(l4_uint32_t domain_id,
Performance_domain_attr_t *attr) const = 0;
/// Return the amount of supported performance levels for a given domain.
virtual l4_int32_t fill_describe_levels_n(l4_uint32_t domain_id,
l4_uint32_t level_idx,
Performance_describe_levels_n_t *attr) const = 0;
/// Return a list of supported performance levels with their attributes for a
/// given domain.
virtual l4_int32_t fill_describe_levels(l4_uint32_t domain_id,
l4_uint32_t level_idx,
l4_uint32_t num,
Performance_describe_level_t *attr) const = 0;
/// Set the performance level for a given domain.
virtual l4_int32_t level_set(l4_uint32_t domain_id,
l4_uint32_t perf_level) = 0;
/// Get the performance level for a given domain.
virtual l4_int32_t level_get(l4_uint32_t domain_id,
l4_uint32_t *perf_level) const = 0;
l4_ssize_t handle_request(Scmi_hdr_t &hdr, Data_buffer &buf,
Queue_worker<Scmi_dev> *qw) override
{
l4_ssize_t total = 0;
l4_ssize_t res = 0;
switch (hdr.message_id())
{
case Protocol_version:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Success;
l4_uint32_t version = Version;
} version;
if ((res = qw->write(&buf, &version)) < 0)
return res;
total += res;
break;
}
case Protocol_attributes:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
Performance_attr_t pa;
l4_int32_t status = fill_attr(&pa);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
if (status == Success)
{
if ((res = qw->write(&buf, &pa)) < 0)
return res;
total += res;
}
break;
}
case Protocol_message_attributes:
{
l4_uint32_t msg_id = 0;
if ((res = qw->read(&buf, &msg_id)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
struct
{
l4_int32_t status = Not_found;
l4_uint32_t attr_raw = 0;
CXX_BITFIELD_MEMBER(0, 0, fast_channel, attr_raw); // ignored
} attr;
if ((msg_id >= Protocol_version &&
msg_id <= Performance_describe_levels) ||
(msg_id >= Performance_level_set &&
msg_id <= Performance_level_get))
attr.status = Success;
if ((res = qw->write(&buf, &attr)) < 0)
return res;
total += res;
break;
}
case Performance_domain_attributes:
{
l4_uint32_t domain_id = 0;
if ((res = qw->read(&buf, &domain_id)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
Performance_domain_attr_t attr;
l4_int32_t status = fill_domain_attr(domain_id, &attr);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
if (status == Success)
{
if ((res = qw->write(&buf, &attr)) < 0)
return res;
total += res;
}
break;
}
case Performance_describe_levels:
{
struct
{
l4_uint32_t domain_id = 0;
l4_uint32_t level_idx = 0;
} param;
if ((res = qw->read(&buf, &param)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
// First figure out how many levels we support
Performance_describe_levels_n_t attr;
l4_int32_t status = fill_describe_levels_n(param.domain_id, param.level_idx,
&attr);
if (status != Success)
{
// On error bail out early
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
break;
}
// Now fetch the actual levels
Performance_describe_level_t attr1[attr.nperf_levels().get()];
status = fill_describe_levels(param.domain_id, param.level_idx,
attr.nperf_levels(), attr1);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
if (status == Success)
{
// Write both answers to the client
if ((res = qw->write(&buf, &attr)) < 0)
return res;
total += res;
if ((res = qw->write(&buf, attr1, sizeof(attr1))) < 0)
return res;
total += res;
}
break;
}
case Performance_level_set:
{
struct
{
l4_uint32_t domain_id;
l4_uint32_t perf_level;
} param;
if ((res = qw->read(&buf, &param)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
l4_int32_t status = level_set(param.domain_id, param.perf_level);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
break;
}
case Performance_level_get:
{
l4_uint32_t domain_id = 0;
if ((res = qw->read(&buf, &domain_id)) < 0)
return res;
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
l4_uint32_t perf_level;
l4_int32_t status = level_get(domain_id, &perf_level);
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
if (status == Success)
{
if ((res = qw->write(&buf, &perf_level)) < 0)
return res;
total += res;
}
break;
}
default:
{
if ((res = qw->write(&buf, &hdr)) < 0)
return res;
total += res;
l4_int32_t status = Not_supported;
if ((res = qw->write(&buf, &status)) < 0)
return res;
total += res;
break;
}
}
return total;
}
};
} /* Scmi */ } /* Svr */ } /* L4virtio */

View File

@@ -0,0 +1,352 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2013-2022, 2024 Kernkonzept GmbH.
* Author(s): Alexander Warg <alexander.warg@kernkonzept.com>
* Matthias Lange <matthias.lange@kernkonzept.com>
*
*/
#pragma once
/**
* \defgroup l4virtio L4 VIRTIO Interface
*/
/**
* \defgroup l4virtio_transport L4 VIRTIO Transport Layer
* \ingroup l4virtio
*
* L4 specific VIRTIO Transport layer.
*
* The L4 specific VIRTIO Transport layer is based on L4Re::Dataspace as shared
* memory and L4::Irq for signaling. The VIRTIO configuration space is mostly
* based on a shared memory implementation too and accompanied by two IPC
* functions to synchronize the configuration between device and driver.
*
* \{
*/
#include <l4/sys/compiler.h>
#include <l4/sys/utcb.h>
#include <l4/sys/ipc.h>
#include <l4/sys/types.h>
/** L4-VIRTIO protocol number */
enum L4_virtio_protocol
{
L4VIRTIO_PROTOCOL = 0,
};
enum L4virtio_magic
{
L4VIRTIO_MAGIC = 0x74726976
};
enum L4virtio_vendor
{
L4VIRTIO_VENDOR_KK = 0x44
};
/**
* Opcodes to setup and configure a device
*/
enum L4_virtio_opcodes
{
L4VIRTIO_OP_SET_STATUS = 0, /**< Write device status register. */
L4VIRTIO_OP_CONFIG_QUEUE = 1, /**< Configure queue. */
L4VIRTIO_OP_REGISTER_DS = 3, /**< Register shared memory with device */
L4VIRTIO_OP_DEVICE_CONFIG = 4, /**< Get device config page. */
L4VIRTIO_OP_GET_DEVICE_IRQ = 5, /**< Retrieve device notification IRQ. */
};
/** Virtio device IDs as reported in the driver's config space. */
enum L4virtio_device_ids
{
L4VIRTIO_ID_NET = 1, /**< Virtual ethernet card. */
L4VIRTIO_ID_BLOCK = 2, /**< General block device. */
L4VIRTIO_ID_CONSOLE = 3, /**< Simple device for data IO via ports. */
L4VIRTIO_ID_RNG = 4, /**< Entropy source. */
L4VIRTIO_ID_BALLOON = 5, /**< Memory ballooning device. */
L4VIRTIO_ID_RPMSG = 7, /**< Device using rpmsg protocol. */
L4VIRTIO_ID_SCSI = 8, /**< SCSI host device. */
L4VIRTIO_ID_9P = 9, /**< Device using 9P transport protocol. */
L4VIRTIO_ID_RPROC_SERIAL = 11, /**< Rproc serial device. */
L4VIRTIO_ID_CAIF = 12, /**< Device using CAIF network protocol. */
L4VIRTIO_ID_GPU = 16, /**< GPU */
L4VIRTIO_ID_INPUT = 18, /**< Input */
L4VIRTIO_ID_VSOCK = 19, /**< Vsock transport */
L4VIRTIO_ID_CRYPTO = 20, /**< Crypto */
L4VIRTIO_ID_FS = 26, /**< FS */
L4VIRTIO_ID_SCMI = 32, /**< Scmi device */
L4VIRTIO_ID_I2C = 34, /**< I2C device */
L4VIRTIO_ID_GPIO = 41, /**< Gpio device */
L4VIRTIO_ID_SOCK = 0x9999, /**< Unofficial socket device. */
};
/** Virtio device status bits. */
enum L4virtio_device_status
{
L4VIRTIO_STATUS_ACKNOWLEDGE = 1, /**< Guest OS has found device. */
L4VIRTIO_STATUS_DRIVER = 2, /**< Guest OS knows how to drive device. */
L4VIRTIO_STATUS_DRIVER_OK = 4, /**< Driver is set up. */
L4VIRTIO_STATUS_FEATURES_OK = 8, /**< Driver has acknowledged feature set. */
L4VIRTIO_STATUS_DEVICE_NEEDS_RESET = 0x40, /**< Device detected fatal error. */
L4VIRTIO_STATUS_FAILED = 0x80 /**< Driver detected fatal error. */
};
/** L4virtio-specific feature bits. */
enum L4virtio_feature_bits
{
/// Virtio protocol version 1 supported. Must be 1 for L4virtio.
L4VIRTIO_FEATURE_VERSION_1 = 32,
/// Status and queue config are set via cmd field instead of via IPC.
L4VIRTIO_FEATURE_CMD_CONFIG = 160
};
/**
* VIRTIO IRQ status codes (l4virtio_config_hdr_t::irq_status).
* \note l4virtio_config_hdr_t::irq_status is currently unused.
*/
enum L4_virtio_irq_status
{
L4VIRTIO_IRQ_STATUS_VRING = 1, /**< VRING IRQ pending flag */
L4VIRTIO_IRQ_STATUS_CONFIG = 2, /**< CONFIG IRQ pending flag */
};
/**
* Virtio commands for device configuration.
*/
enum L4_virtio_cmd
{
L4VIRTIO_CMD_NONE = 0x00000000, ///< No command pending
L4VIRTIO_CMD_SET_STATUS = 0x01000000, ///< Set the status register
L4VIRTIO_CMD_CFG_QUEUE = 0x02000000, ///< Configure a queue
L4VIRTIO_CMD_CFG_CHANGED = 0x04000000, ///< Device config changed
L4VIRTIO_CMD_NOTIFY_QUEUE = 0x08000000, ///< Configure a queue
L4VIRTIO_CMD_MASK = 0xff000000, ///< Mask to get command bits
};
/**
* L4-VIRTIO config header, provided in shared data space.
*/
typedef struct l4virtio_config_hdr_t
{
/* Virtio(0x00): device config */
l4_uint32_t magic; /**< magic value (must be 'virt'). */
l4_uint32_t version; /**< VIRTIO version */
l4_uint32_t device; /**< device ID */
l4_uint32_t vendor; /**< vendor ID */
/* Virtio(0x10): device features */
l4_uint32_t dev_features; /**< device features windows selected by device_feature_sel */
l4_uint32_t dev_features_sel;
l4_uint32_t _res1[2];
/* Virtio(0x20): driver features */
l4_uint32_t driver_features;
l4_uint32_t driver_features_sel;
/* L4Virtio(0x28): L4 queue */
l4_uint32_t num_queues; /**< number of virtqueues */
l4_uint32_t queues_offset; /**< offset of virtqueue config array */
/* Virtio(0x30): queue status */
l4_uint32_t queue_sel;
l4_uint32_t queue_num_max;
l4_uint32_t queue_num;
l4_uint32_t _res3[2];
l4_uint32_t queue_ready;
l4_uint32_t _res4[2];
/* Virtio(0x50): queue notify */
l4_uint32_t queue_notify;
l4_uint32_t _res5[3];
/* Virtio(0x60): interrupt handling */
l4_uint32_t irq_status;
l4_uint32_t irq_ack;
l4_uint32_t _res6[2];
/* Virtio(0x70): Device status register (read-only). The register must be
* written using l4virtio_set_status(). */
l4_uint32_t status;
/* L4Virtio(0x74): W: Event index to be used for config notifications (device to driver) */
l4_uint32_t cfg_driver_notify_index;
/* L4Virtio(0x78): R: Event index to be used for config notifications (driver to device) */
l4_uint32_t cfg_device_notify_index;
/* L4Virtio(0x7c) L4 specific command register polled by the driver iff supported */
l4_uint32_t cmd;
/* Virtio(0x80): queue descriptors */
l4_uint64_t queue_desc;
l4_uint32_t _res8[2];
l4_uint64_t queue_avail;
l4_uint32_t _res9[2];
l4_uint64_t queue_used;
l4_uint32_t _res10[1];
/* Virtio(0xac): shared memory region */
l4_uint32_t shm_sel;
l4_uint64_t shm_len;
l4_uint64_t shm_base;
/* L4Virtio(0xc0): use the unused space here for device and driver feature bitmaps */
l4_uint32_t dev_features_map[6];
l4_uint32_t _res11[2];
l4_uint32_t driver_features_map[6];
l4_uint32_t _res12[1];
/* Virtio(0xfc): config generation */
l4_uint32_t generation;
} l4virtio_config_hdr_t;
/**
* Queue configuration entry. An array of such entries is available at the
* l4virtio_config_hdr_t::queues_offset in the config data space.
*
* Consistency rules for the queue config are:
* - A driver might read `num_max` at any time.
* - A driver must write to `num`, `desc_addr`, `avail_addr`, and `used_addr`
* only when `ready` is zero (0). Values in these fields are validated and
* used by the device only after successfully setting `ready` to one (1),
* either by the IPC or by L4VIRTIO_CMD_CFG_QUEUE.
* - The value of `device_notify_index` is valid only when `ready` is one.
* - The driver might write to `device_notify_index` at any time, however
* the change is guaranteed to take effect after a successful
* L4VIRTIO_CMD_CFG_QUEUE or after a config_queue IPC. Note, the change
* might also have immediate effect, depending on the device
* implementation.
*/
typedef struct l4virtio_config_queue_t
{
/** R: maximum number of descriptors supported by this queue*/
l4_uint16_t num_max;
/** RW: number of descriptors configured for this queue */
l4_uint16_t num;
/** RW: queue ready flag (read-write) */
l4_uint16_t ready;
/** W: Event index to be used for device notifications (device to driver) */
l4_uint16_t driver_notify_index;
l4_uint64_t desc_addr; /**< W: address of descriptor table */
l4_uint64_t avail_addr; /**< W: address of available ring */
l4_uint64_t used_addr; /**< W: address of used ring */
/** R: Event index to be used by the driver (driver to device) */
l4_uint16_t device_notify_index;
} l4virtio_config_queue_t;
L4_BEGIN_DECLS
/**
* Get the pointer to the first queue config.
* \param cfg Pointer to the config header.
* \return pointer to queue config of queue 0.
*/
L4_INLINE l4virtio_config_queue_t *
l4virtio_config_queues(l4virtio_config_hdr_t const *cfg)
{
return (l4virtio_config_queue_t *)(((l4_addr_t)cfg) + cfg->queues_offset);
}
/**
* Get the pointer to the device configuration.
* \param cfg Pointer to the config header.
* \return pointer to device configuration structure.
*/
L4_INLINE void *
l4virtio_device_config(l4virtio_config_hdr_t const *cfg)
{
return (void *)(((l4_addr_t)cfg) + 0x100);
}
/**
* Set the given feature bit in a feature map.
*/
L4_INLINE void
l4virtio_set_feature(l4_uint32_t *feature_map, unsigned feat)
{
unsigned idx = feat / 32;
if (idx < 8)
feature_map[idx] |= 1UL << (feat % 32);
}
/**
* Clear the given feature bit in a feature map.
*/
L4_INLINE void
l4virtio_clear_feature(l4_uint32_t *feature_map, unsigned feat)
{
unsigned idx = feat / 32;
if (idx < 8)
feature_map[idx] &= ~(1UL << (feat % 32));
}
/**
* Check if the given bit in a feature map is set.
*/
L4_INLINE unsigned
l4virtio_get_feature(l4_uint32_t *feature_map, unsigned feat)
{
unsigned idx = feat / 32;
if (idx >= 8)
return 0;
return feature_map[idx] & (1UL << (feat % 32));
}
/**
* \param cap Capability to the VIRTIO host
*
* \copydoc L4virtio::Device::set_status
*/
L4_CV int
l4virtio_set_status(l4_cap_idx_t cap, unsigned status) L4_NOTHROW;
/**
* \param cap Capability to the VIRTIO host.
*
* \copydoc L4virtio::Device::config_queue
*/
L4_CV int
l4virtio_config_queue(l4_cap_idx_t cap, unsigned queue) L4_NOTHROW;
/**
* \param cap Capability to the VIRTIO host
*
* \copydoc L4virtio::Device::register_ds
*/
L4_CV int
l4virtio_register_ds(l4_cap_idx_t cap, l4_cap_idx_t ds_cap,
l4_uint64_t base, l4_umword_t offset,
l4_umword_t size) L4_NOTHROW;
/**
* \param cap Capability to the L4-VIRTIO host
*
* \copydoc L4virtio::Device::device_config
*/
L4_CV int
l4virtio_device_config_ds(l4_cap_idx_t cap, l4_cap_idx_t config_ds,
l4_addr_t *ds_offset) L4_NOTHROW;
/**
* \param cap Capability to the L4-VIRTIO host
*
* \copydoc L4virtio::Device::device_notification_irq
*/
L4_CV int
l4virtio_device_notification_irq(l4_cap_idx_t cap, unsigned index,
l4_cap_idx_t irq) L4_NOTHROW;
L4_END_DECLS
/**\}*/

View File

@@ -0,0 +1,103 @@
/* SPDX-License-Identifier: MIT */
/*
* (c) 2014 Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
*/
#pragma once
/**
* \ingroup l4virtio
* \defgroup l4virtio_block L4 VIRTIO Block Device
* \{
*/
#include <l4/sys/types.h>
/**
* Kinds of operation over a block device.
*/
enum L4virtio_block_operations
{
L4VIRTIO_BLOCK_T_IN = 0, /**< Read from device */
L4VIRTIO_BLOCK_T_OUT = 1, /**< Write to device */
L4VIRTIO_BLOCK_T_FLUSH = 4, /**< Flush data to disk */
L4VIRTIO_BLOCK_T_GET_ID = 8, /**< Get device ID */
L4VIRTIO_BLOCK_T_DISCARD = 11, /**< Discard a range of sectors */
L4VIRTIO_BLOCK_T_WRITE_ZEROES = 13, /**< Write zeroes to a range of sectors */
};
/**
* Status of a finished block request.
*/
enum L4virtio_block_status
{
L4VIRTIO_BLOCK_S_OK = 0, /**< Request finished successfully */
L4VIRTIO_BLOCK_S_IOERR = 1, /**< IO error on device */
L4VIRTIO_BLOCK_S_UNSUPP = 2 /**< Operation is not supported */
};
/**
* Header structure of a request for a block device.
*/
typedef struct l4virtio_block_header_t
{
l4_uint32_t type; /**< Kind of request, see L4virtio_block_operations */
l4_uint32_t ioprio; /**< Priority (unused) */
l4_uint64_t sector; /**< First sector to read/write */
} l4virtio_block_header_t;
enum L4virtio_block_discard_flags_t
{
L4VIRTIO_BLOCK_DISCARD_F_UNMAP = 0x00000001UL,
L4VIRTIO_BLOCK_DISCARD_F_RESERVED = 0xFFFFFFFEUL,
};
/**
* Structure used for the write zeroes and discard commands.
*/
typedef struct l4virtio_block_discard_t
{
l4_uint64_t sector;
l4_uint32_t num_sectors;
l4_uint32_t flags;
} l4virtio_block_discard_t;
/**
* Device configuration for block devices.
*/
typedef struct l4virtio_block_config_t
{
l4_uint64_t capacity; /**< Capacity of device in 512-byte sectors */
l4_uint32_t size_max; /**< Maximum size of a single segment */
l4_uint32_t seg_max; /**< Maximum number of segments per request */
struct l4virtio_block_config_geometry_t
{
l4_uint16_t cylinders;
l4_uint8_t heads;
l4_uint8_t sectors;
} geometry;
l4_uint32_t blk_size; /**< Block size of underlying disk. */
struct l4virtio_block_config_topology_t
{
/** Number of logical blocks per physical block (log2) */
l4_uint8_t physical_block_exp;
/** Offset of first aligned logical block */
l4_uint8_t alignment_offset;
/** Suggested minimum I/O size in blocks */
l4_uint16_t min_io_size;
/** Suggested optimal (i.e. maximum) I/O size in blocks */
l4_uint32_t opt_io_size;
} topology;
l4_uint8_t writeback;
l4_uint8_t unused0[1];
l4_uint16_t num_queues;
l4_uint32_t max_discard_sectors;
l4_uint32_t max_discard_seg;
l4_uint32_t discard_sector_alignment;
l4_uint32_t max_write_zeroes_sectors;
l4_uint32_t max_write_zeroes_seg;
l4_uint8_t write_zeroes_may_unmap;
l4_uint8_t unused1[3];
} l4virtio_block_config_t;
/**\}*/

View File

@@ -0,0 +1,81 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2019, 2022, 2024 Kernkonzept GmbH.
* Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
*/
#pragma once
/**
* \ingroup l4virtio
* \defgroup l4virtio_input L4 VIRTIO Input Device
* \{
*/
#include <l4/sys/types.h>
/**
* Device information selectors
*/
enum L4virtio_input_config_select
{
L4VIRTIO_INPUT_CFG_UNSET = 0,
L4VIRTIO_INPUT_CFG_ID_NAME = 1,
L4VIRTIO_INPUT_CFG_ID_SERIAL = 2,
L4VIRTIO_INPUT_CFG_ID_DEVIDS = 3,
L4VIRTIO_INPUT_CFG_PROP_BITS = 0x10,
L4VIRTIO_INPUT_CFG_EV_BITS = 0x11,
L4VIRTIO_INPUT_CFG_ABS_INFO = 0x12
};
/**
* Information about the absolute axis in the underlying evdev implementation.
*/
typedef struct l4virtio_input_absinfo_t
{
l4_uint32_t min;
l4_uint32_t max;
l4_uint32_t fuzz;
l4_uint32_t flat;
l4_uint32_t res;
} l4virtio_absinfo_t;
/**
* Device ID information for the device.
*/
typedef struct l4virtio_input_devids_t
{
l4_uint16_t bustype;
l4_uint16_t vendor;
l4_uint16_t product;
l4_uint16_t version;
} l4virtio_input_devids_t;
/**
* Device configuration for input devices.
*/
typedef struct l4virtio_input_config_t
{
l4_uint8_t select;
l4_uint8_t subsel;
l4_uint8_t size;
l4_uint8_t reserved[5];
union
{
char string[128];
l4_uint8_t bitmap[128];
struct l4virtio_input_absinfo_t abs;
struct l4virtio_input_devids_t ids;
} u;
} l4virtio_input_config_t;
/**
* Single event in event or status queue.
*/
typedef struct l4virtio_input_event_t
{
l4_uint16_t type;
l4_uint16_t code;
l4_uint32_t value;
} l4virtio_input_event_t;
/**\} */

View File

@@ -0,0 +1,69 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2022, 2024 Kernkonzept GmbH.
* Author(s): Stephan Gerhold <stephan.gerhold@kernkonzept.com>
*/
#pragma once
/**
* \ingroup l4virtio
* \defgroup l4virtio_net L4 VIRTIO Network Device
* \{
*/
#include <l4/sys/types.h>
/**
* Header structure of a request for a network device.
*/
typedef struct l4virtio_net_header_t
{
l4_uint8_t flags;
l4_uint8_t gso_type;
l4_uint16_t hdr_len;
l4_uint16_t gso_size;
l4_uint16_t csum_start;
l4_uint16_t csum_offset;
l4_uint16_t num_buffers;
} l4virtio_net_header_t;
/**
* Device configuration for network devices.
*/
typedef struct l4virtio_net_config_t
{
l4_uint8_t mac[6];
l4_uint16_t status;
l4_uint16_t max_virtqueue_pairs;
l4_uint16_t mtu;
l4_uint32_t speed;
l4_uint8_t duplex;
} l4virtio_net_config_t;
/** Network device feature bits. */
enum L4virtio_net_feature_bits
{
L4VIRTIO_NET_F_CSUM = 0,
L4VIRTIO_NET_F_GUEST_CSUM = 1,
L4VIRTIO_NET_F_MTU = 3,
L4VIRTIO_NET_F_MAC = 5,
L4VIRTIO_NET_F_GUEST_TSO4 = 7,
L4VIRTIO_NET_F_GUEST_TSO6 = 8,
L4VIRTIO_NET_F_GUEST_ECN = 9,
L4VIRTIO_NET_F_GUEST_UFO = 10,
L4VIRTIO_NET_F_HOST_TSO4 = 11,
L4VIRTIO_NET_F_HOST_TSO6 = 12,
L4VIRTIO_NET_F_HOST_ECN = 13,
L4VIRTIO_NET_F_HOST_UFO = 14,
L4VIRTIO_NET_F_MRG_RXBUF = 15,
L4VIRTIO_NET_F_STATUS = 16,
L4VIRTIO_NET_F_CTRL_VQ = 17,
L4VIRTIO_NET_F_CTRL_RX = 18,
L4VIRTIO_NET_F_CTRL_VLAN = 19,
L4VIRTIO_NET_F_GUEST_ANNOUNCE = 21,
L4VIRTIO_NET_F_MQ = 22,
L4VIRTIO_NET_F_CTRL_MAC_ADDR = 23,
};
/**\}*/

View File

@@ -0,0 +1,642 @@
// vi:set ft=cpp: -*- Mode: C++ -*-
/* SPDX-License-Identifier: MIT */
/*
* (c) 2014 Alexander Warg <warg@os.inf.tu-dresden.de>
*/
#include <l4/re/util/debug>
#include <l4/sys/types.h>
#include <l4/sys/err.h>
#include <l4/cxx/bitfield>
#include <l4/cxx/exceptions>
#include <cstdint>
#pragma once
namespace L4virtio {
#if defined(__ARM_ARCH) && __ARM_ARCH == 7
static inline void wmb() { asm volatile ("dmb ishst" : : : "memory"); }
static inline void rmb() { asm volatile ("dmb ish" : : : "memory"); }
#elif defined(__ARM_ARCH) && __ARM_ARCH >= 8
static inline void wmb() { asm volatile ("dmb ishst" : : : "memory"); }
static inline void rmb() { asm volatile ("dmb ishld" : : : "memory"); }
#elif defined(__mips__)
static inline void wmb() { asm volatile ("sync" : : : "memory"); }
static inline void rmb() { asm volatile ("sync" : : : "memory"); }
#elif defined(__amd64__) || defined(__i386__) || defined(__i686__)
static inline void wmb() { asm volatile ("sfence" : : : "memory"); }
static inline void rmb() { asm volatile ("lfence" : : : "memory"); }
#elif defined(__riscv)
static inline void wmb() { asm volatile ("fence ow, ow" : : : "memory"); }
static inline void rmb() { asm volatile ("fence ir, ir" : : : "memory"); }
#else
#warning Missing proper memory write barrier
static inline void wmb() { asm volatile ("" : : : "memory"); }
static inline void rmb() { asm volatile ("" : : : "memory"); }
#endif
/**
* Pointer used in virtio descriptors.
*
* As the descriptor contain guest addresses these pointers cannot be
* dereferenced directly.
*/
template< typename T >
class Ptr
{
public:
/// Type for making an invalid (NULL) Ptr.
enum Invalid_type { Invalid /**< Use to set a Ptr to invalid (NULL) */ };
Ptr() = default;
/// Make and invalid Ptr
Ptr(Invalid_type) : _p(~0ULL) {}
/// Make a Ptr from a raw 64bit address
explicit Ptr(l4_uint64_t vm_addr) : _p(vm_addr) {}
/// \return The raw 64bit address of the stored pointer.
l4_uint64_t get() const { return _p; }
/// \return true if the stored pointer is valid (not NULL).
bool is_valid() const { return _p != ~0ULL; }
private:
l4_uint64_t _p;
};
/**
* Low-level Virtqueue.
*
* This class represents a single virtqueue, with a local running available
* index.
*
* \note The Virtqueue implementation is not thread-safe.
*/
class Virtqueue
{
public:
/**
* Descriptor in the descriptor table.
*/
class Desc
{
public:
/**
* Type for descriptor flags.
*/
struct Flags
{
l4_uint16_t raw; ///< raw flags value of a virtio descriptor.
Flags() = default;
/// Make Flags from raw 16bit value.
explicit Flags(l4_uint16_t v) : raw(v) {}
/// Part of a descriptor chain which is continued with the next field.
CXX_BITFIELD_MEMBER( 0, 0, next, raw);
/// Block described by this descriptor is writeable.
CXX_BITFIELD_MEMBER( 1, 1, write, raw);
/// Indirect descriptor, block contains a list of descriptors.
CXX_BITFIELD_MEMBER( 2, 2, indirect, raw);
};
Ptr<void> addr; ///< Address stored in descriptor.
l4_uint32_t len; ///< Length of described buffer.
Flags flags; ///< Descriptor flags.
l4_uint16_t next; ///< Index of the next chained descriptor.
/**
* Dump a single descriptor.
*/
void dump(unsigned idx) const
{
L4Re::Util::Dbg().printf("D[%04x]: %08llx (%x) f=%04x n=%04x\n",
idx, addr.get(),
len, static_cast<unsigned>(flags.raw),
static_cast<unsigned>(next));
}
};
/**
* Type of available ring, this is read-only for the host.
*/
class Avail
{
public:
/**
* Flags of the available ring.
*/
struct Flags
{
l4_uint16_t raw; ///< raw 16bit flags value of the available ring.
Flags() = default;
/// Make Flags from the raw value.
explicit Flags(l4_uint16_t v) : raw(v) {}
/// Guest does not want to receive interrupts when requests are finished.
CXX_BITFIELD_MEMBER( 0, 0, no_irq, raw);
};
Flags flags; ///< flags of available ring
l4_uint16_t idx; ///< available index written by guest
l4_uint16_t ring[]; ///< array of available descriptor indexes.
};
/**
* Type of an element of the used ring.
*/
struct Used_elem
{
Used_elem() = default;
/**
* Initialize a used ring element.
*
* \param id The index of the descriptor to be marked as used.
* \param len The total bytes written into the buffer of the
* descriptor chain.
*/
Used_elem(l4_uint16_t id, l4_uint32_t len) : id(id), len(len) {}
l4_uint32_t id; ///< descriptor index
l4_uint32_t len; ///< length field
};
/**
* Used ring.
*/
class Used
{
public:
/**
* flags for the used ring.
*/
struct Flags
{
l4_uint16_t raw; ///< raw flags value as specified by virtio.
Flags() = default;
/// make Flags from raw value
explicit Flags(l4_uint16_t v) : raw(v) {}
/// host does not want to be notified when new requests have been queued.
CXX_BITFIELD_MEMBER( 0, 0, no_notify, raw);
};
Flags flags; ///< flags of the used ring.
l4_uint16_t idx; ///< index of the last entry in the ring.
Used_elem ring[]; ///< array of used descriptors.
};
protected:
Desc *_desc = nullptr; ///< pointer to descriptor table, NULL if queue is off.
Avail *_avail = nullptr; ///< pointer to available ring.
Used *_used = nullptr; ///< pointer to used ring.
/** The life counter for the queue */
l4_uint16_t _current_avail = 0;
/**
* mask used for indexing into the descriptor table
* and the rings.
*/
l4_uint16_t _idx_mask = 0;
/**
* Create a disabled virtqueue.
*/
Virtqueue() = default;
Virtqueue(Virtqueue const &) = delete;
~Virtqueue() = default;
public:
/**
* Completely disable the queue.
*
* setup() must be used to enable the queue again.
*/
void disable()
{ _desc = 0; }
/**
* Fixed alignment values for different parts of a virtqueue.
*/
enum
{
Desc_align = 4, //< Alignment of the descriptor table.
Avail_align = 1, //< Alignment of the available ring.
Used_align = 2, //< Alignment of the used ring.
};
/**
* Calculate the total size for a virtqueue of the given dimensions.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
*
* \return The total size in bytes of the queue data structures.
*/
static unsigned long total_size(unsigned num)
{
static_assert(Desc_align >= Avail_align,
"virtqueue alignment assumptions broken");
return l4_round_size(desc_size(num) + avail_size(num), Used_align)
+ used_size(num);
}
/**
* Calculate the size of the descriptor table for `num` entries.
*
* \param num The number of entries in the descriptor table.
*
* \returns The size in bytes needed for a descriptor table with
* `num` entries.
*/
static unsigned long desc_size(unsigned num)
{ return num * 16; }
/**
* Get the alignment in zero LSBs needed for the descriptor table.
*
* \returns The alignment in zero LSBs needed for a descriptor table.
*/
static unsigned long desc_align()
{ return Desc_align; }
/**
* Calculate the size of the available ring for `num` entries.
*
* \param num The number of entries in the available ring.
* \returns The size in bytes needed for an available ring with
* `num` entries.
*/
static unsigned long avail_size(unsigned num)
{ return 2 * num + 6; }
/**
* Get the alignment in zero LSBs needed for the available ring.
*
* \returns The alignment in zero LSBs needed for an available ring.
*/
static unsigned long avail_align()
{ return Avail_align; }
/**
* Calculate the size of the used ring for `num` entries.
*
* \param num The number of entries in the used ring.
*
* \returns The size in bytes needed for an used ring with
* `num` entries.
*/
static unsigned long used_size(unsigned num)
{ return 8 * num + 6; }
/**
* Get the alignment in zero LSBs needed for the used ring.
*
* \returns The alignment in zero LSBs needed for an used ring.
*/
static unsigned long used_align()
{ return Used_align; }
/**
* Calculate the total size of this virtqueue.
*
* \pre The queue has been set up.
*/
unsigned long total_size() const
{
return (reinterpret_cast<char *>(_used) - reinterpret_cast<char *>(_desc))
+ used_size(num());
}
/**
* Get the offset of the available ring from the descriptor table.
*/
unsigned long avail_offset() const
{ return reinterpret_cast<char *>(_avail) - reinterpret_cast<char *>(_desc); }
/**
* Get the offset of the used ring from the descriptor table.
*/
unsigned long used_offset() const
{ return reinterpret_cast<char *>(_used) - reinterpret_cast<char *>(_desc); }
/**
* Enable this queue.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
* \param desc The address of the descriptor table. (Must be
* Desc_align aligned and at least `desc_size(num)` bytes
* in size.)
* \param avail The address of the available ring. (Must be
* Avail_align aligned and at least `avail_size(num)` bytes
* in size.)
* \param used The address of the used ring. (Must be Used_align aligned
* and at least `used_size(num)` bytes in size.)
*
* Due to the data type of the descriptors, the queue can have a
* maximum size of 2^16.
*/
void setup(unsigned num, void *desc, void *avail, void *used)
{
if (num > 0x10000)
throw L4::Runtime_error(-L4_EINVAL, "Queue too large.");
_idx_mask = num - 1;
_desc = static_cast<Desc*>(desc);
_avail = static_cast<Avail*>(avail);
_used = static_cast<Used*>(used);
_current_avail = 0;
L4Re::Util::Dbg().printf("VQ[%p]: num=%d d:%p a:%p u:%p\n",
this, num, _desc, _avail, _used);
}
/**
* Enable this queue.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
* \param ring The base address for the queue data structure. The memory
* block at `ring` must be at least `total_size(num)` bytes
* in size and have an alignment of Desc_align
* (desc_align()) bits.
*
* Due to the data type of the descriptors, the queue can have a
* maximum size of 2^16.
*/
void setup_simple(unsigned num, void *ring)
{
l4_addr_t desc = reinterpret_cast<l4_addr_t>(ring);
l4_addr_t avail = l4_round_size(desc + desc_size(num), Avail_align);
void *used = reinterpret_cast<void *>(
l4_round_size(avail + avail_size(num), Used_align));
setup(num, ring, reinterpret_cast<void *>(avail), used);
}
/**
* Dump descriptors for this queue.
*
* \pre the queue must be in working state.
*/
void dump(Desc const *d) const
{ d->dump(d - _desc); }
/**
* Test if this queue is in working state.
*
* \return true when the queue is in working state, false else.
*/
bool ready() const
{ return L4_LIKELY(_desc != 0); }
/// \return The number of entries in the ring.
unsigned num() const
{ return _idx_mask + 1; }
/**
* Get the no IRQ flag of this queue.
*
* \pre queue must be in working state.
*
* \return true if the guest does not want to get IRQs (currently).
*/
bool no_notify_guest() const
{
return _avail->flags.no_irq();
}
/**
* Get the no notify flag of this queue.
*
* \pre queue must be in working state.
*
* \return true if the host does not want to get IRQs (currently).
*/
bool no_notify_host() const
{
return _used->flags.no_notify();
}
/**
* Set the no-notify flag for this queue
*
* \pre Queue must be in a working state.
*/
void no_notify_host(bool value)
{
_used->flags.no_notify() = value;
}
/**
* Get available index from available ring (for debugging).
*
* \pre Queue must be in a working state.
*
* \return current index in the available ring (shared
* between device model and device driver).
*/
l4_uint16_t get_avail_idx() const { return _avail->idx; }
/**
* Get tail-available index stored in local state (for debugging).
*
* \return current tail index for the the available ring.
*/
l4_uint16_t get_tail_avail_idx() const { return _current_avail; }
};
namespace Driver {
/**
* Driver-side implementation of a Virtqueue.
*
* Adds function for managing the descriptor list, enqueueing new
* and dequeueing finished requests.
*
* \note The Virtqueue implementation is not thread-safe.
*/
class Virtqueue : public L4virtio::Virtqueue
{
private:
/// Index of next free entry in the descriptor table.
l4_uint16_t _next_free;
public:
enum End_of_queue
{
// Indicates the end of the queue.
Eoq = 0xFFFF
};
Virtqueue() : _next_free(Eoq) {}
/**
* Initialize the descriptor table and the index structures
* of this queue.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
*
* \pre The queue must be set up correctly with setup() or setup_simple().
*/
void initialize_rings(unsigned num)
{
_used->idx = 0;
_avail->idx = 0;
// setup the freelist
for (l4_uint16_t d = 0; d < num - 1; ++d)
_desc[d].next = d + 1;
_desc[num - 1].next = Eoq;
_next_free = 0;
}
/**
* Initialize this virtqueue.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
* \param desc The address of the descriptor table. (Must be
* Desc_align aligned and at least `desc_size(num)` bytes
* in size.)
* \param avail The address of the available ring. (Must be
* Avail_align aligned and at least `avail_size(num)` bytes
* in size.)
* \param used The address of the used ring. (Must be Used_align aligned
* and at least `used_size(num)` bytes in size.)
*
* This function sets up the memory and initializes the freelist.
*/
void init_queue(unsigned num, void *desc, void *avail, void *used)
{
setup(num, desc, avail, used);
initialize_rings(num);
}
/**
* Initialize this virtqueue.
*
* \param num The number of entries in the descriptor table, the
* available ring, and the used ring (must be a power of 2).
* \param base The base address for the queue data structure.
*
* This function sets up the memory and initializes the freelist.
*/
void init_queue(unsigned num, void *base)
{
setup_simple(num, base);
initialize_rings(num);
}
/**
* Allocate and return an unused descriptor from the descriptor table.
*
* The descriptor will be removed from the free list, the content
* should be considered undefined. After use, it needs to be freed
* using free_descriptor().
*
* \return The index of the reserved descriptor or Virtqueue::Eoq if
* no free descriptor is available.
*
* Note: the implementation uses (2^16 - 1) as the end of queue marker.
* That means that the final entry in the queue can not be allocated
* iff the queue size is 2^16.
*/
l4_uint16_t alloc_descriptor()
{
l4_uint16_t idx = _next_free;
if (idx == Eoq)
return Eoq;
_next_free = _desc[idx].next;
return idx;
}
/**
* Enqueue a descriptor in the available ring.
*
* \param descno Index of the head descriptor to enqueue.
*/
void enqueue_descriptor(l4_uint16_t descno)
{
if (descno > _idx_mask)
throw L4::Bounds_error();
_avail->ring[_avail->idx & _idx_mask] = descno; // _avail->idx expected to wrap
wmb();
++_avail->idx;
}
/**
* Return a reference to a descriptor in the descriptor table.
*
* \param descno Index of the descriptor,
* expected to be in correct range.
*/
Desc &desc(l4_uint16_t descno)
{
if (descno > _idx_mask)
throw L4::Bounds_error();
return _desc[descno];
}
/**
* Return the next finished block.
*
* \param[out] len (optional) Size of valid data in finished block.
* Note that this is the value reported by the device,
* which may set it to a value that is larger than the
* original buffer size.
*
* \return Index of the head or Virtqueue::Eoq
* if no used element is currently available.
*/
l4_uint16_t find_next_used(l4_uint32_t *len = nullptr)
{
if (_current_avail == _used->idx)
return Eoq;
auto elem = _used->ring[_current_avail++ & _idx_mask];
if (len)
*len = elem.len;
return elem.id;
}
/**
* Free a chained list of descriptors in the descriptor queue.
*
* \param head Index of the first element in the descriptor chain.
* \param tail Index of the last element in the descriptor chain.
*
* Simply takes the descriptor chain and prepends it to the beginning
* of the free list. Assumes that the list has been correctly chained.
*/
void free_descriptor(l4_uint16_t head, l4_uint16_t tail)
{
if (head > _idx_mask || tail > _idx_mask)
throw L4::Bounds_error();
_desc[tail].next = _next_free;
_next_free = head;
}
};
}
} // namespace L4virtio