10#include <l4/cxx/ref_ptr>
12#include <l4/cxx/utils>
13#include <l4/sys/cache.h>
17#include <l4/l4virtio/server/virtio-block>
19#include <l4/libblock-device/debug.h>
20#include <l4/libblock-device/device.h>
21#include <l4/libblock-device/types.h>
22#include <l4/libblock-device/request_queue.h>
24namespace Block_device {
26template <
typename DEV>
30 public Pending_request::Owner
33 class Generic_pending_request :
public Pending_request
36 int check_error(
int result)
38 if (result < 0 && result != -
L4_EBUSY)
39 client->handle_request_error(result,
this);
45 explicit Generic_pending_request(Virtio_client *c, cxx::unique_ptr<Request> &&req)
46 : request(
cxx::move(req)), client(c)
49 void fail_request()
override
54 bool is_owner(Pending_request::Owner *owner)
override
55 {
return static_cast<Pending_request::Owner *
>(client) == owner; }
57 cxx::unique_ptr<Request> request;
58 Virtio_client *client;
61 struct Pending_inout_request :
public Generic_pending_request
65 using Generic_pending_request::Generic_pending_request;
74 int handle_request()
override
75 {
return this->check_error(this->client->inout_request(
this)); }
79 struct Pending_flush_request :
public Generic_pending_request
81 using Generic_pending_request::Generic_pending_request;
83 int handle_request()
override
84 {
return this->check_error(this->client->flush_request(
this)); }
87 struct Pending_cmd_request :
public Generic_pending_request
91 using Generic_pending_request::Generic_pending_request;
93 int handle_request()
override
95 return this->check_error(this->client->discard_cmd_request(
this, 0));
100 using Device_type = DEV;
112 dev->capacity() >> 9,
117 _pending(dev->request_queue()),
121 init_discard_info(0);
127 void reset_device()
override
130 _pending->drain_queue_for(
this,
false);
132 _negotiated_features.raw = 0;
145 _shutdown_state = Shutdown_type::Running;
146 _negotiated_features.raw = 0;
149 bool queue_stopped()
override
152 bool process_request(cxx::unique_ptr<Request> &&req)
override
154 auto trace = Dbg::trace(
"virtio");
156 if (_shutdown_state != Shutdown_type::Running)
158 trace.printf(
"Failing requests as the client is shutting down\n");
163 trace.printf(
"request received: type 0x%x, sector 0x%llx\n",
164 req->header().type, req->header().sector);
165 switch (req->header().type)
170 auto pending = cxx::make_unique<Pending_inout_request>(
this, cxx::move(req));
171 int ret = build_inout_blocks(pending.get());
174 if (_pending && !_pending->empty())
177 ret = inout_request(pending.get());
179 release_dma(pending.get());
180 return handle_request_result(ret, cxx::move(pending));
184 auto pending = cxx::make_unique<Pending_flush_request>(
this, cxx::move(req));
185 int ret = check_flush_request(pending.get());
188 if (_pending && !_pending->empty())
191 ret = flush_request(pending.get());
193 return handle_request_result(ret, cxx::move(pending));
198 auto pending = cxx::make_unique<Pending_cmd_request>(
this, cxx::move(req));
199 return handle_discard(cxx::move(pending), 0);
208 void task_finished(Generic_pending_request *preq,
int error,
l4_size_t sz)
215 if (_shutdown_state != Client_gone)
219 _pending->process_pending();
222 cxx::unique_ptr<Pending_request> ureq(preq);
228 void shutdown_event(Shutdown_type type)
234 if (_shutdown_state == Client_gone)
239 l4_assert(_shutdown_state != System_shutdown);
242 l4_assert(_shutdown_state != System_suspend
243 || type == Shutdown_type::Running);
246 _shutdown_state = type;
248 if (type == Shutdown_type::Client_shutdown)
253 l4_assert(_shutdown_state == Shutdown_type::Running);
256 if (type != Shutdown_type::Running)
259 _pending->drain_queue_for(
this, type != Client_gone);
278 char const *service = 0)
308 ->unmap(this->irq_iface()->
obj_cap().fpage(),
316 return _in_flight != 0;
326 void release_dma(Pending_inout_request *req)
329 Inout_block *cur = &req->blocks;
332 if (cur->num_sectors)
333 _device->dma_unmap(cur->dma_addr, cur->num_sectors, req->dir());
334 cur = cur->next.get();
338 int build_inout_blocks(Pending_inout_request *preq)
340 auto *req = preq->request.get();
341 l4_size_t sps = _device->sector_size() >> 9;
342 l4_uint64_t current_sector = req->header().sector / sps;
343 l4_uint64_t sectors = _device->capacity() / _device->sector_size();
344 auto dir = preq->dir();
350 if (device_features().ro())
354 if (_negotiated_features.config_wce())
357 flags = Block_device::Inout_f_wb;
359 else if (_negotiated_features.flush())
360 flags = Block_device::Inout_f_wb;
364 if (current_sector * sps != req->header().sector)
367 Inout_block *last_blk =
nullptr;
371 while (req->has_more())
373 Request::Data_block b;
375 if (++seg > _device->max_segments())
380 b = req->next_block();
390 l4_size_t sz = b.len / _device->sector_size();
392 if (sz * _device->sector_size() != b.len)
394 Dbg::warn().printf(
"Bad block size 0x%x\n", b.len);
401 if (current_sector > sectors - sz)
407 last_blk->next = cxx::make_unique<Inout_block>();
408 blk = last_blk->next.get();
414 long ret = _device->dma_map(b.mem, off, sz, dir, &phys);
418 blk->dma_addr = phys;
419 blk->virt_addr = (
void *) ((
l4_addr_t)b.mem->local_base() + off);
420 blk->num_sectors = sz;
421 current_sector += sz;
430 void maintain_cache_before_req(Pending_inout_request
const *preq)
434 for (Inout_block
const *cur = &preq->blocks; cur; cur = cur->next.get())
439 l4_size_t vsize = cur->num_sectors * _device->sector_size();
450 void maintain_cache_after_req(Pending_inout_request
const *preq)
454 for (Inout_block
const *cur = &preq->blocks; cur; cur = cur->next.get())
459 l4_size_t vsize = cur->num_sectors * _device->sector_size();
466 int inout_request(Pending_inout_request *preq)
468 auto *req = preq->request.get();
469 l4_uint64_t sector = req->header().sector / (_device->sector_size() >> 9);
471 maintain_cache_before_req(preq);
472 int res = _device->inout_data(
473 sector, preq->blocks,
476 maintain_cache_after_req(preq);
477 task_finished(preq, error, sz);
488 int check_flush_request(Pending_flush_request *preq)
490 if (!_negotiated_features.flush())
493 auto *req = preq->request.get();
496 if (req->header().sector)
502 int flush_request(Pending_flush_request *preq)
504 int res = _device->flush([
this, preq](
int error,
l4_size_t sz) {
505 task_finished(preq, error, sz);
515 bool check_features(
void)
override
517 _negotiated_features = negotiated_features();
521 template <
typename T = Device_type>
522 void init_discard_info(
long) {}
524 template <
typename T = Device_type>
525 auto init_discard_info(
int)
526 ->
decltype(((T*)0)->discard_info(), void())
528 _di = _device->discard_info();
531 size_t sps = _device->sector_size() >> 9;
532 if (_di.max_discard_sectors)
533 set_discard(_di.max_discard_sectors * sps, _di.max_discard_seg,
534 _di.discard_sector_alignment * sps);
535 if (_di.max_write_zeroes_sectors)
537 _di.max_write_zeroes_seg, _di.write_zeroes_may_unmap);
540 bool handle_discard(cxx::unique_ptr<Pending_cmd_request> &&pending,
int)
542 int ret = build_discard_cmd_blocks(pending.get());
545 if (this->_pending && !this->_pending->empty())
548 ret = discard_cmd_request(pending.get(), 0);
551 return this->handle_request_result(ret, cxx::move(pending));
554 int build_discard_cmd_blocks(Pending_cmd_request *preq)
556 auto *req = preq->request.get();
559 if (this->device_features().ro())
563 if (req->header().sector)
568 if (!_negotiated_features.discard())
573 if (!_negotiated_features.write_zeroes())
577 auto *d = _device.get();
580 size_t max_seg = discard ? _di.max_discard_seg : _di.max_write_zeroes_seg;
583 l4_uint64_t sectors = d->capacity() / d->sector_size();
585 Inout_block *last_blk =
nullptr;
587 while (req->has_more())
589 Request::Data_block b;
593 b = req->next_block();
602 size_t items = b.len /
sizeof(payload[0]);
603 if (items *
sizeof(payload[0]) != b.len)
606 if (seg + items > max_seg)
610 for (
auto i = 0u; i < items; i++)
612 auto p = cxx::access_once<l4virtio_block_discard_t>(&payload[i]);
616 if (p.sector % sps != 0)
618 if (p.num_sectors % sps != 0)
623 p.num_sectors /= sps;
626 if (p.num_sectors > sectors)
628 if (p.sector > sectors - p.num_sectors)
631 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_RESERVED)
636 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
638 if (p.num_sectors > _di.max_discard_sectors)
643 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP
644 && !_di.write_zeroes_may_unmap)
646 if (p.num_sectors > _di.max_write_zeroes_sectors)
653 last_blk->next = cxx::make_unique<Inout_block>();
654 blk = last_blk->next.get();
659 blk->sector = p.sector;
660 blk->num_sectors = p.num_sectors;
661 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
662 blk->flags = Inout_f_unmap;
671 template <
typename T = Device_type>
672 int discard_cmd_request(Pending_cmd_request *,
long)
675 template <
typename T = Device_type>
676 auto discard_cmd_request(Pending_cmd_request *preq,
int)
677 ->
decltype(((T*)0)->discard_info(), int())
679 auto *req = preq->request.get();
682 int res = _device->discard(
684 [
this, preq](
int error,
l4_size_t sz) { task_finished(preq, error, sz); },
694 template <
typename REQ>
695 bool handle_request_result(
int error, cxx::unique_ptr<REQ> &&pending)
699 Dbg::trace(
"virtio").printf(
"Port busy, queueing request.\n");
700 _pending->add_to_queue(cxx::unique_ptr<Pending_request>(pending.release()));
703 handle_request_error(error, pending.get());
715 void handle_request_error(
int error, Generic_pending_request *pending)
717 auto trace = Dbg::trace(
"virtio");
721 trace.printf(
"Unsupported operation.\n");
727 trace.printf(
"Got IO error: %d\n", error);
734 Shutdown_type _shutdown_state;
736 Request_queue *_pending;
737 Device_discard_feature::Discard_info _di;
739 L4virtio::Svr::Block_features _negotiated_features;
l4_uint64_t Dma_addr
Data type for DMA addresses.
Direction
Direction of the DMA transfers.
@ To_device
device reads the memory
@ None
device is coherently connected to the memory
@ From_device
device writes to the memory
C++ interface for capabilities.
Interface for server-loop related functions.
Abstract interface for object registries.
virtual void unregister_obj(L4::Epiface *o, bool unmap=true)=0
Unregister the given object o from the server.
virtual L4::Cap< L4::Irq > register_irq_obj(L4::Epiface *o)=0
Register o as server-side object for asynchronous IRQs.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Base class for virtio block devices.
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
void set_flush()
Enables the flush command.
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
void init_mem_info(unsigned num)
Initialize the memory region list to the given maximum.
A reference-counting pointer with automatic cleanup.
unsigned int l4_size_t
Unsigned size type.
unsigned long l4_addr_t
Address type.
unsigned int l4_uint32_t
Unsigned 32bit value.
unsigned long long l4_uint64_t
Unsigned 64bit value.
int l4_cache_flush_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache flush a range; writes back to PoC.
int l4_cache_clean_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache clean a range in D-cache; writes back to PoC.
int l4_cache_inv_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache invalidate a range; might write back to PoC.
@ L4_EBUSY
Object currently busy, try later.
@ L4_FP_DELETE_OBJ
Flag that indicates that an unmap operation on object capabilities shall try to delete the correspond...
@ L4_FP_ALL_SPACES
Flag to tell the unmap operation to revoke permissions from all child mappings including the mapping ...
@ L4VIRTIO_BLOCK_T_DISCARD
Discard a range of sectors.
@ L4VIRTIO_BLOCK_T_FLUSH
Flush data to disk.
@ L4VIRTIO_BLOCK_T_IN
Read from device.
@ L4VIRTIO_BLOCK_T_OUT
Write to device.
@ L4VIRTIO_BLOCK_T_WRITE_ZEROES
Write zeroes to a range of sectors.
@ L4VIRTIO_BLOCK_S_IOERR
IO error on device.
@ L4VIRTIO_BLOCK_S_UNSUPP
Operation is not supported.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
L4-VIRTIO Transport C++ API.
Cap< RPC_IFACE > obj_cap() const
Get the (typed) capability to this object.
Epiface implementation for Kobject-based interface implementations.
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Exception used by Queue to indicate descriptor errors.
Structure used for the write zeroes and discard commands.
#define l4_assert(expr)
Low-level assert.
Common task related definitions.
Implementation of a list of unique-ptr-managed objects.