9#include <l4/cxx/ref_ptr>
11#include <l4/cxx/utils>
12#include <l4/sys/cache.h>
16#include <l4/l4virtio/server/virtio-block>
18#include <l4/libblock-device/debug.h>
19#include <l4/libblock-device/device.h>
20#include <l4/libblock-device/types.h>
21#include <l4/libblock-device/request.h>
23namespace Block_device {
25template <
typename DEV>
31 class Generic_pending_request :
public Pending_request
34 int check_error(
int result)
36 if (result < 0 && result != -
L4_EBUSY)
37 client->handle_request_error(result,
this);
43 explicit Generic_pending_request(Virtio_client *c, cxx::unique_ptr<Request> &&req)
44 : request(
cxx::move(req)), client(c)
47 void fail_request()
override
52 cxx::unique_ptr<Request> request;
53 Virtio_client *client;
56 struct Pending_inout_request :
public Generic_pending_request
61 explicit Pending_inout_request(Virtio_client *c,
62 cxx::unique_ptr<Request> &&req)
63 : Generic_pending_request(c,
cxx::move(req))
70 ~Pending_inout_request()
override
72 this->client->release_dma(
this);
75 int handle_request()
override
76 {
return this->check_error(this->client->inout_request(
this)); }
79 struct Pending_flush_request :
public Generic_pending_request
81 using Generic_pending_request::Generic_pending_request;
83 int handle_request()
override
84 {
return this->check_error(this->client->flush_request(
this)); }
87 struct Pending_cmd_request :
public Generic_pending_request
91 using Generic_pending_request::Generic_pending_request;
93 int handle_request()
override
95 return this->check_error(this->client->discard_cmd_request(
this, 0));
100 using Device_type = DEV;
112 dev->capacity() >> 9,
115 _client_invalidate_cb(nullptr),
116 _client_idle_cb(nullptr),
122 init_discard_info(0);
128 void reset_device()
override
130 if (_client_invalidate_cb)
131 _client_invalidate_cb(
false);
133 _negotiated_features.raw = 0;
146 _shutdown_state = Shutdown_type::Running;
147 _negotiated_features.raw = 0;
150 bool queue_stopped()
override
151 {
return _shutdown_state == Shutdown_type::Client_gone; }
158 void set_client_invalidate_cb(std::function<
void(
bool)> &&cb)
160 _client_invalidate_cb = cb;
163 void set_client_idle_cb(std::function<
void()> &&cb)
165 _client_idle_cb = cb;
171 _device_notify_irq = irq;
176 return _device_notify_irq;
184 cxx::unique_ptr<Pending_request> start_request(cxx::unique_ptr<Request> &&req)
186 auto trace = Dbg::trace(
"virtio");
188 cxx::unique_ptr<Pending_request> pending;
190 if (_shutdown_state != Shutdown_type::Running)
192 trace.printf(
"Failing requests as the client is shutting down\n");
197 trace.printf(
"request received: type 0x%x, sector 0x%llx\n",
198 req->header().type, req->header().sector);
199 switch (req->header().type)
204 auto p = cxx::make_unique<Pending_inout_request>(
this, cxx::move(req));
205 int ret = build_inout_blocks(p.get());
207 pending.reset(p.release());
209 handle_request_error(ret, p.get());
214 auto p = cxx::make_unique<Pending_flush_request>(
this, cxx::move(req));
215 int ret = check_flush_request(p.get());
217 pending.reset(p.release());
219 handle_request_error(ret, p.get());
225 auto p = cxx::make_unique<Pending_cmd_request>(
this, cxx::move(req));
226 int ret = build_discard_cmd_blocks(p.get());
228 pending.reset(p.release());
230 handle_request_error(ret, p.get());
241 void task_finished(Generic_pending_request *preq,
int error,
l4_size_t sz)
248 if (_shutdown_state != Client_gone)
256 cxx::unique_ptr<Pending_request> ureq(preq);
262 void shutdown_event(Shutdown_type type)
268 if (_shutdown_state == Client_gone)
273 l4_assert(_shutdown_state != System_shutdown);
276 l4_assert(_shutdown_state != System_suspend
277 || type == Shutdown_type::Running);
280 _shutdown_state = type;
282 if (type == Shutdown_type::Client_shutdown)
287 l4_assert(_shutdown_state == Shutdown_type::Running);
290 if (type != Shutdown_type::Running)
292 if (_client_invalidate_cb)
293 _client_invalidate_cb(type != Shutdown_type::Client_gone);
311 char const *service = 0)
341 return _in_flight != 0;
344 Notification_domain
const *notification_domain()
const
345 {
return _device->notification_domain(); }
354 void release_dma(Pending_inout_request *req)
357 Inout_block *cur = &req->blocks;
360 if (cur->num_sectors)
361 _device->dma_unmap(cur->dma_addr, cur->num_sectors, req->dir);
362 cur = cur->next.get();
366 int build_inout_blocks(Pending_inout_request *preq)
368 auto *req = preq->request.get();
369 l4_size_t sps = _device->sector_size() >> 9;
370 l4_uint64_t current_sector = req->header().sector / sps;
371 l4_uint64_t sectors = _device->capacity() / _device->sector_size();
372 auto dir = preq->dir;
378 if (device_features().ro())
382 if (_negotiated_features.config_wce())
385 flags = Block_device::Inout_f_wb;
387 else if (_negotiated_features.flush())
388 flags = Block_device::Inout_f_wb;
392 if (current_sector * sps != req->header().sector)
395 Inout_block *last_blk =
nullptr;
399 while (req->has_more())
401 Request::Data_block b;
403 if (++seg > _device->max_segments())
408 b = req->next_block();
412 Dbg::warn().printf(
"Descriptor error: %s\n", e.
message());
419 l4_size_t sz = b.len / _device->sector_size();
421 if (sz * _device->sector_size() != b.len)
423 Dbg::warn().printf(
"Bad block size 0x%x\n", b.len);
430 if (current_sector > sectors - sz)
436 last_blk->next = cxx::make_unique<Inout_block>();
437 blk = last_blk->next.get();
443 long ret = _device->dma_map(b.mem, off, sz, dir, &phys);
447 blk->dma_addr = phys;
448 blk->virt_addr = (
void *) ((
l4_addr_t)b.mem->local_base() + off);
449 blk->num_sectors = sz;
450 current_sector += sz;
459 void maintain_cache_before_req(Pending_inout_request
const *preq)
463 for (Inout_block
const *cur = &preq->blocks; cur; cur = cur->next.get())
468 l4_size_t vsize = cur->num_sectors * _device->sector_size();
479 void maintain_cache_after_req(Pending_inout_request
const *preq)
483 for (Inout_block
const *cur = &preq->blocks; cur; cur = cur->next.get())
488 l4_size_t vsize = cur->num_sectors * _device->sector_size();
495 int inout_request(Pending_inout_request *preq)
497 auto *req = preq->request.get();
498 l4_uint64_t sector = req->header().sector / (_device->sector_size() >> 9);
500 maintain_cache_before_req(preq);
501 int res = _device->inout_data(
502 sector, preq->blocks,
504 maintain_cache_after_req(preq);
505 task_finished(preq, error, sz);
516 int check_flush_request(Pending_flush_request *preq)
518 if (!_negotiated_features.flush())
521 auto *req = preq->request.get();
524 if (req->header().sector)
530 int flush_request(Pending_flush_request *preq)
532 int res = _device->flush([
this, preq](
int error,
l4_size_t sz) {
533 task_finished(preq, error, sz);
543 bool check_features(
void)
override
545 _negotiated_features = negotiated_features();
549 template <
typename T = Device_type>
550 void init_discard_info(
long) {}
552 template <
typename T = Device_type>
553 auto init_discard_info(
int)
554 ->
decltype(((T*)0)->discard_info(), void())
556 _di = _device->discard_info();
559 size_t sps = _device->sector_size() >> 9;
560 if (_di.max_discard_sectors)
561 set_discard(_di.max_discard_sectors * sps, _di.max_discard_seg,
562 _di.discard_sector_alignment * sps);
563 if (_di.max_write_zeroes_sectors)
565 _di.max_write_zeroes_seg, _di.write_zeroes_may_unmap);
568 int build_discard_cmd_blocks(Pending_cmd_request *preq)
570 auto *req = preq->request.get();
573 if (this->device_features().ro())
577 if (req->header().sector)
582 if (!_negotiated_features.discard())
587 if (!_negotiated_features.write_zeroes())
591 auto *d = _device.get();
594 size_t max_seg = discard ? _di.max_discard_seg : _di.max_write_zeroes_seg;
597 l4_uint64_t sectors = d->capacity() / d->sector_size();
599 Inout_block *last_blk =
nullptr;
601 while (req->has_more())
603 Request::Data_block b;
607 b = req->next_block();
611 Dbg::warn().printf(
"Descriptor error: %s\n", e.
message());
617 size_t items = b.len /
sizeof(payload[0]);
618 if (items *
sizeof(payload[0]) != b.len)
621 if (seg + items > max_seg)
625 for (
auto i = 0u; i < items; i++)
627 auto p = cxx::access_once<l4virtio_block_discard_t>(&payload[i]);
631 if (p.sector % sps != 0)
633 if (p.num_sectors % sps != 0)
638 p.num_sectors /= sps;
641 if (p.num_sectors > sectors)
643 if (p.sector > sectors - p.num_sectors)
646 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_RESERVED)
652 last_blk->next = cxx::make_unique<Inout_block>();
653 blk = last_blk->next.get();
658 blk->sector = p.sector;
659 blk->num_sectors = p.num_sectors;
663 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
665 if (p.num_sectors > _di.max_discard_sectors)
670 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP
671 && _di.write_zeroes_may_unmap)
672 blk->flags = Inout_f_unmap;
673 if (p.num_sectors > _di.max_write_zeroes_sectors)
684 template <
typename T = Device_type>
685 int discard_cmd_request(Pending_cmd_request *,
long)
688 template <
typename T = Device_type>
689 auto discard_cmd_request(Pending_cmd_request *preq,
int)
690 ->
decltype(((T*)0)->discard_info(), int())
692 auto *req = preq->request.get();
695 int res = _device->discard(
697 [
this, preq](
int error,
l4_size_t sz) { task_finished(preq, error, sz); },
708 void handle_request_error(
int error, Generic_pending_request *pending)
710 auto trace = Dbg::trace(
"virtio");
714 trace.printf(
"Unsupported operation.\n");
720 trace.printf(
"Got IO error: %d\n", error);
727 std::function<void(
bool)> _client_invalidate_cb;
728 std::function<void()> _client_idle_cb;
730 Shutdown_type _shutdown_state;
732 Device_discard_feature::Discard_info _di;
734 L4virtio::Svr::Block_features _negotiated_features;
l4_uint64_t Dma_addr
Data type for DMA addresses.
Direction
Direction of the DMA transfers.
@ To_device
device reads the memory
@ None
device is coherently connected to the memory
@ From_device
device writes to the memory
C++ interface for capabilities.
Interface for server-loop related functions.
Abstract interface for object registries.
virtual void unregister_obj(L4::Epiface *o, bool unmap=true)=0
Unregister the given object o from the server.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Base class for virtio block devices.
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
cxx::unique_ptr< Request > get_request()
Return one request if available.
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
void set_flush()
Enables the flush command.
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
void init_mem_info(unsigned num)
Initialize the memory region list to the given maximum.
virtual L4::Cap< L4::Irq > device_notify_irq(unsigned idx)
Callback to gather the device notification IRQ (multi IRQ).
A reference-counting pointer with automatic cleanup.
unsigned int l4_size_t
Unsigned size type.
unsigned long l4_addr_t
Address type.
unsigned int l4_uint32_t
Unsigned 32bit value.
unsigned long long l4_uint64_t
Unsigned 64bit value.
int l4_cache_flush_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache flush a range; writes back to PoC.
int l4_cache_clean_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache clean a range in D-cache; writes back to PoC.
int l4_cache_inv_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache invalidate a range; might write back to PoC.
@ L4_EBUSY
Object currently busy, try later.
@ L4VIRTIO_BLOCK_T_DISCARD
Discard a range of sectors.
@ L4VIRTIO_BLOCK_T_FLUSH
Flush data to disk.
@ L4VIRTIO_BLOCK_T_IN
Read from device.
@ L4VIRTIO_BLOCK_T_OUT
Write to device.
@ L4VIRTIO_BLOCK_T_WRITE_ZEROES
Write zeroes to a range of sectors.
@ L4VIRTIO_BLOCK_S_IOERR
IO error on device.
@ L4VIRTIO_BLOCK_S_UNSUPP
Operation is not supported.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
L4-VIRTIO Transport C++ API.
Epiface implementation for Kobject-based interface implementations.
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Exception used by Queue to indicate descriptor errors.
char const * message() const
Get a human readable description of the error code.
Structure used for the write zeroes and discard commands.
#define l4_assert(expr)
Low-level assert.
Common task related definitions.
Implementation of a list of unique-ptr-managed objects.