L4Re Operating System Framework
Interface and Usage Documentation
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
virtio_client.h
1/*
2 * Copyright (C) 2018-2024 Kernkonzept GmbH.
3 * Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
4 *
5 * License: see LICENSE.spdx (in this directory or the directories above)
6 */
7#pragma once
8
9#include <l4/cxx/ref_ptr>
11#include <l4/cxx/utils>
12#include <l4/sys/cache.h>
13
14#include <l4/sys/task>
15
16#include <l4/l4virtio/server/virtio-block>
17
18#include <l4/libblock-device/debug.h>
19#include <l4/libblock-device/device.h>
20#include <l4/libblock-device/types.h>
21#include <l4/libblock-device/request.h>
22
23namespace Block_device {
24
25template <typename DEV>
26class Virtio_client
27: public L4virtio::Svr::Block_dev_base<Mem_region_info>,
28 public L4::Epiface_t<Virtio_client<DEV>, L4virtio::Device>
29{
30protected:
31 class Generic_pending_request : public Pending_request
32 {
33 protected:
34 int check_error(int result)
35 {
36 if (result < 0 && result != -L4_EBUSY)
37 client->handle_request_error(result, this);
38
39 return result;
40 }
41
42 public:
43 explicit Generic_pending_request(Virtio_client *c, cxx::unique_ptr<Request> &&req)
44 : request(cxx::move(req)), client(c)
45 {}
46
47 void fail_request() override
48 {
49 client->finalize_request(cxx::move(request), 0, L4VIRTIO_BLOCK_S_IOERR);
50 }
51
52 cxx::unique_ptr<Request> request;
53 Virtio_client *client;
54 };
55
56 struct Pending_inout_request : public Generic_pending_request
57 {
58 Inout_block blocks;
60
61 explicit Pending_inout_request(Virtio_client *c,
62 cxx::unique_ptr<Request> &&req)
63 : Generic_pending_request(c, cxx::move(req))
64 {
65 dir = this->request->header().type == L4VIRTIO_BLOCK_T_OUT
68 }
69
70 ~Pending_inout_request() override
71 {
72 this->client->release_dma(this);
73 }
74
75 int handle_request() override
76 { return this->check_error(this->client->inout_request(this)); }
77 };
78
79 struct Pending_flush_request : public Generic_pending_request
80 {
81 using Generic_pending_request::Generic_pending_request;
82
83 int handle_request() override
84 { return this->check_error(this->client->flush_request(this)); }
85 };
86
87 struct Pending_cmd_request : public Generic_pending_request
88 {
89 Inout_block blocks;
90
91 using Generic_pending_request::Generic_pending_request;
92
93 int handle_request() override
94 {
95 return this->check_error(this->client->discard_cmd_request(this, 0));
96 }
97 };
98
99public:
100 using Device_type = DEV;
101
110 Virtio_client(cxx::Ref_ptr<Device_type> const &dev, unsigned numds, bool readonly)
111 : L4virtio::Svr::Block_dev_base<Mem_region_info>(L4VIRTIO_VENDOR_KK, 0x100,
112 dev->capacity() >> 9,
113 dev->is_read_only()
114 || readonly),
115 _client_invalidate_cb(nullptr),
116 _client_idle_cb(nullptr),
117 _numds(numds),
118 _device(dev),
119 _in_flight(0)
120 {
121 reset_client();
122 init_discard_info(0);
123 }
124
128 void reset_device() override
129 {
130 if (_client_invalidate_cb)
131 _client_invalidate_cb(false);
132 _device->reset();
133 _negotiated_features.raw = 0;
134 }
135
139 void reset_client()
140 {
141 init_mem_info(_numds);
142 set_seg_max(_device->max_segments());
143 set_size_max(_device->max_size());
144 set_flush();
145 set_config_wce(0); // starting in write-through mode
146 _shutdown_state = Shutdown_type::Running;
147 _negotiated_features.raw = 0;
148 }
149
150 bool queue_stopped() override
151 { return _shutdown_state == Shutdown_type::Client_gone; }
152
153 // make these interfaces public so that a request scheduler can invoke them
154 using L4virtio::Svr::Block_dev_base<Mem_region_info>::check_for_new_requests;
155 using L4virtio::Svr::Block_dev_base<Mem_region_info>::get_request;
156
157 // make it possible for the request scheduler to register a direct callback
158 void set_client_invalidate_cb(std::function<void(bool)> &&cb)
159 {
160 _client_invalidate_cb = cb;
161 }
162
163 void set_client_idle_cb(std::function<void()> &&cb)
164 {
165 _client_idle_cb = cb;
166 }
167
168 // make it possible for the request scheduler to register a device notify IRQ
169 void set_device_notify_irq(L4::Cap<L4::Irq> irq)
170 {
171 _device_notify_irq = irq;
172 }
173
174 L4::Cap<L4::Irq> device_notify_irq() const override
175 {
176 return _device_notify_irq;
177 }
178
184 cxx::unique_ptr<Pending_request> start_request(cxx::unique_ptr<Request> &&req)
185 {
186 auto trace = Dbg::trace("virtio");
187
188 cxx::unique_ptr<Pending_request> pending;
189
190 if (_shutdown_state != Shutdown_type::Running)
191 {
192 trace.printf("Failing requests as the client is shutting down\n");
193 this->finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_IOERR);
194 return pending;
195 }
196
197 trace.printf("request received: type 0x%x, sector 0x%llx\n",
198 req->header().type, req->header().sector);
199 switch (req->header().type)
200 {
203 {
204 auto p = cxx::make_unique<Pending_inout_request>(this, cxx::move(req));
205 int ret = build_inout_blocks(p.get());
206 if (ret == L4_EOK)
207 pending.reset(p.release());
208 else
209 handle_request_error(ret, p.get());
210 break;
211 }
213 {
214 auto p = cxx::make_unique<Pending_flush_request>(this, cxx::move(req));
215 int ret = check_flush_request(p.get());
216 if (ret == L4_EOK)
217 pending.reset(p.release());
218 else
219 handle_request_error(ret, p.get());
220 break;
221 }
224 {
225 auto p = cxx::make_unique<Pending_cmd_request>(this, cxx::move(req));
226 int ret = build_discard_cmd_blocks(p.get());
227 if (ret == L4_EOK)
228 pending.reset(p.release());
229 else
230 handle_request_error(ret, p.get());
231 break;
232 }
233 default:
234 finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_UNSUPP);
235 break;
236 }
237
238 return pending;
239 }
240
241 void task_finished(Generic_pending_request *preq, int error, l4_size_t sz)
242 {
243 _in_flight--;
244
245 // move on to the next request
246
247 // Only finalize if the client is still alive
248 if (_shutdown_state != Client_gone)
249 finalize_request(cxx::move(preq->request), sz, error);
250
251 // New requests might be schedulable
252 if (_client_idle_cb)
253 _client_idle_cb();
254
255 // pending request can be dropped
256 cxx::unique_ptr<Pending_request> ureq(preq);
257 }
258
262 void shutdown_event(Shutdown_type type)
263 {
264 // If the client is already in the Client_gone state, it means that it was
265 // already shutdown and this is another go at its removal. This situation
266 // can occur because at the time of its previous removal attempt there were
267 // still I/O requests in progress.
268 if (_shutdown_state == Client_gone)
269 return;
270
271 // Transitions from System_shutdown are also not allowed, the initiator
272 // should take care of graceful handling of this.
273 l4_assert(_shutdown_state != System_shutdown);
274 // If we are transitioning from System_suspend, it must be only to Running,
275 // the initiator should handle this gracefully.
276 l4_assert(_shutdown_state != System_suspend
277 || type == Shutdown_type::Running);
278
279 // Update shutdown state of the client
280 _shutdown_state = type;
281
282 if (type == Shutdown_type::Client_shutdown)
283 {
284 reset();
285 reset_client();
286 // Client_shutdown must transit to the Running state
287 l4_assert(_shutdown_state == Shutdown_type::Running);
288 }
289
290 if (type != Shutdown_type::Running)
291 {
292 if (_client_invalidate_cb)
293 _client_invalidate_cb(type != Shutdown_type::Client_gone);
294 _device->reset();
295 }
296 }
297
310 L4::Cap<void> register_obj(L4::Registry_iface *registry,
311 char const *service = 0)
312 {
313 L4::Cap<void> ret;
314 if (service)
315 ret = registry->register_obj(this, service);
316 else
317 ret = registry->register_obj(this);
318 L4Re::chkcap(ret);
319
320 return ret;
321 }
322
323 L4::Cap<void> register_obj(L4::Registry_iface *registry,
325 {
326 return L4Re::chkcap(registry->register_obj(this, ep));
327 }
328
334 void unregister_obj(L4::Registry_iface *registry)
335 {
336 registry->unregister_obj(this);
337 }
338
339 bool busy() const
340 {
341 return _in_flight != 0;
342 }
343
344 Notification_domain const *notification_domain() const
345 { return _device->notification_domain(); }
346
347protected:
348 L4::Ipc_svr::Server_iface *server_iface() const override
349 {
350 return this->L4::Epiface::server_iface();
351 }
352
353private:
354 void release_dma(Pending_inout_request *req)
355 {
356 // unmap DMA regions
357 Inout_block *cur = &req->blocks;
358 while (cur)
359 {
360 if (cur->num_sectors)
361 _device->dma_unmap(cur->dma_addr, cur->num_sectors, req->dir);
362 cur = cur->next.get();
363 }
364 }
365
366 int build_inout_blocks(Pending_inout_request *preq)
367 {
368 auto *req = preq->request.get();
369 l4_size_t sps = _device->sector_size() >> 9;
370 l4_uint64_t current_sector = req->header().sector / sps;
371 l4_uint64_t sectors = _device->capacity() / _device->sector_size();
372 auto dir = preq->dir;
373
374 l4_uint32_t flags = 0;
375 if (req->header().type == L4VIRTIO_BLOCK_T_OUT)
376 {
377 // If RO was offered, every write must fail
378 if (device_features().ro())
379 return -L4_EIO;
380
381 // Figure out whether the write has a write-through or write-back semantics
382 if (_negotiated_features.config_wce())
383 {
384 if (get_writeback() == 1)
385 flags = Block_device::Inout_f_wb;
386 }
387 else if (_negotiated_features.flush())
388 flags = Block_device::Inout_f_wb;
389 }
390
391 // Check alignment of the first sector
392 if (current_sector * sps != req->header().sector)
393 return -L4_EIO;
394
395 Inout_block *last_blk = nullptr;
396
397 size_t seg = 0;
398
399 while (req->has_more())
400 {
401 Request::Data_block b;
402
403 if (++seg > _device->max_segments())
404 return -L4_EIO;
405
406 try
407 {
408 b = req->next_block();
409 }
410 catch (L4virtio::Svr::Bad_descriptor const &e)
411 {
412 Dbg::warn().printf("Descriptor error: %s\n", e.message());
413 return -L4_EIO;
414 }
415
416 l4_size_t off = b.mem->ds_offset() + (l4_addr_t) b.addr
417 - (l4_addr_t) b.mem->local_base();
418
419 l4_size_t sz = b.len / _device->sector_size();
420
421 if (sz * _device->sector_size() != b.len)
422 {
423 Dbg::warn().printf("Bad block size 0x%x\n", b.len);
424 return -L4_EIO;
425 };
426
427 // Check bounds
428 if (sz > sectors)
429 return -L4_EIO;
430 if (current_sector > sectors - sz)
431 return -L4_EIO;
432
433 Inout_block *blk;
434 if (last_blk)
435 {
436 last_blk->next = cxx::make_unique<Inout_block>();
437 blk = last_blk->next.get();
438 }
439 else
440 blk = &preq->blocks;
441
443 long ret = _device->dma_map(b.mem, off, sz, dir, &phys);
444 if (ret < 0)
445 return ret;
446
447 blk->dma_addr = phys;
448 blk->virt_addr = (void *) ((l4_addr_t)b.mem->local_base() + off);
449 blk->num_sectors = sz;
450 current_sector += sz;
451 blk->flags = flags;
452
453 last_blk = blk;
454 }
455
456 return L4_EOK;
457 }
458
459 void maintain_cache_before_req(Pending_inout_request const *preq)
460 {
461 if (preq->dir == L4Re::Dma_space::None)
462 return;
463 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
464 {
465 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
466 if (vstart)
467 {
468 l4_size_t vsize = cur->num_sectors * _device->sector_size();
469 if (preq->dir == L4Re::Dma_space::From_device)
470 l4_cache_inv_data(vstart, vstart + vsize);
471 else if (preq->dir == L4Re::Dma_space::To_device)
472 l4_cache_clean_data(vstart, vstart + vsize);
473 else // L4Re::Dma_space::Bidirectional
474 l4_cache_flush_data(vstart, vstart + vsize);
475 }
476 }
477 }
478
479 void maintain_cache_after_req(Pending_inout_request const *preq)
480 {
481 if (preq->dir == L4Re::Dma_space::None)
482 return;
483 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
484 {
485 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
486 if (vstart)
487 {
488 l4_size_t vsize = cur->num_sectors * _device->sector_size();
489 if (preq->dir != L4Re::Dma_space::To_device)
490 l4_cache_inv_data(vstart, vstart + vsize);
491 }
492 }
493 }
494
495 int inout_request(Pending_inout_request *preq)
496 {
497 auto *req = preq->request.get();
498 l4_uint64_t sector = req->header().sector / (_device->sector_size() >> 9);
499
500 maintain_cache_before_req(preq);
501 int res = _device->inout_data(
502 sector, preq->blocks,
503 [this, preq](int error, l4_size_t sz) {
504 maintain_cache_after_req(preq);
505 task_finished(preq, error, sz);
506 },
507 preq->dir);
508
509 // request successfully submitted to device
510 if (res >= 0)
511 _in_flight++;
512
513 return res;
514 }
515
516 int check_flush_request(Pending_flush_request *preq)
517 {
518 if (!_negotiated_features.flush())
519 return -L4_ENOSYS;
520
521 auto *req = preq->request.get();
522
523 // sector must be zero for FLUSH
524 if (req->header().sector)
525 return -L4_ENOSYS;
526
527 return L4_EOK;
528 }
529
530 int flush_request(Pending_flush_request *preq)
531 {
532 int res = _device->flush([this, preq](int error, l4_size_t sz) {
533 task_finished(preq, error, sz);
534 });
535
536 // request successfully submitted to device
537 if (res >= 0)
538 _in_flight++;
539
540 return res;
541 }
542
543 bool check_features(void) override
544 {
545 _negotiated_features = negotiated_features();
546 return true;
547 }
548
549 template <typename T = Device_type>
550 void init_discard_info(long) {}
551
552 template <typename T = Device_type>
553 auto init_discard_info(int)
554 -> decltype(((T*)0)->discard_info(), void())
555 {
556 _di = _device->discard_info();
557
558 // Convert sector sizes to virtio 512-byte sectors.
559 size_t sps = _device->sector_size() >> 9;
560 if (_di.max_discard_sectors)
561 set_discard(_di.max_discard_sectors * sps, _di.max_discard_seg,
562 _di.discard_sector_alignment * sps);
563 if (_di.max_write_zeroes_sectors)
564 set_write_zeroes(_di.max_write_zeroes_sectors * sps,
565 _di.max_write_zeroes_seg, _di.write_zeroes_may_unmap);
566 }
567
568 int build_discard_cmd_blocks(Pending_cmd_request *preq)
569 {
570 auto *req = preq->request.get();
571 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
572
573 if (this->device_features().ro())
574 return -L4_EIO;
575
576 // sector is used only for inout requests, it must be zero for WzD
577 if (req->header().sector)
578 return -L4_ENOSYS;
579
580 if (discard)
581 {
582 if (!_negotiated_features.discard())
583 return -L4_ENOSYS;
584 }
585 else
586 {
587 if (!_negotiated_features.write_zeroes())
588 return -L4_ENOSYS;
589 }
590
591 auto *d = _device.get();
592
593 size_t seg = 0;
594 size_t max_seg = discard ? _di.max_discard_seg : _di.max_write_zeroes_seg;
595
596 l4_size_t sps = d->sector_size() >> 9;
597 l4_uint64_t sectors = d->capacity() / d->sector_size();
598
599 Inout_block *last_blk = nullptr;
600
601 while (req->has_more())
602 {
603 Request::Data_block b;
604
605 try
606 {
607 b = req->next_block();
608 }
609 catch (L4virtio::Svr::Bad_descriptor const &e)
610 {
611 Dbg::warn().printf("Descriptor error: %s\n", e.message());
612 return -L4_EIO;
613 }
614
615 auto *payload = reinterpret_cast<l4virtio_block_discard_t *>(b.addr);
616
617 size_t items = b.len / sizeof(payload[0]);
618 if (items * sizeof(payload[0]) != b.len)
619 return -L4_EIO;
620
621 if (seg + items > max_seg)
622 return -L4_EIO;
623 seg += items;
624
625 for (auto i = 0u; i < items; i++)
626 {
627 auto p = cxx::access_once<l4virtio_block_discard_t>(&payload[i]);
628
629 // Check sector size alignment. Discard sector alignment is not
630 // strictly enforced as it is merely a hint to the driver.
631 if (p.sector % sps != 0)
632 return -L4_EIO;
633 if (p.num_sectors % sps != 0)
634 return -L4_EIO;
635
636 // Convert to the device sector size
637 p.sector /= sps;
638 p.num_sectors /= sps;
639
640 // Check bounds
641 if (p.num_sectors > sectors)
642 return -L4_EIO;
643 if (p.sector > sectors - p.num_sectors)
644 return -L4_EIO;
645
646 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_RESERVED)
647 return -L4_ENOSYS;
648
649 Inout_block *blk;
650 if (last_blk)
651 {
652 last_blk->next = cxx::make_unique<Inout_block>();
653 blk = last_blk->next.get();
654 }
655 else
656 blk = &preq->blocks;
657
658 blk->sector = p.sector;
659 blk->num_sectors = p.num_sectors;
660
661 if (discard)
662 {
663 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
664 return -L4_ENOSYS;
665 if (p.num_sectors > _di.max_discard_sectors)
666 return -L4_EIO;
667 }
668 else
669 {
670 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP
671 && _di.write_zeroes_may_unmap)
672 blk->flags = Inout_f_unmap;
673 if (p.num_sectors > _di.max_write_zeroes_sectors)
674 return -L4_EIO;
675 }
676
677 last_blk = blk;
678 }
679 }
680
681 return L4_EOK;
682 }
683
684 template <typename T = Device_type>
685 int discard_cmd_request(Pending_cmd_request *, long)
686 { return -L4_EIO; }
687
688 template <typename T = Device_type>
689 auto discard_cmd_request(Pending_cmd_request *preq, int)
690 -> decltype(((T*)0)->discard_info(), int())
691 {
692 auto *req = preq->request.get();
693 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
694
695 int res = _device->discard(
696 0, preq->blocks,
697 [this, preq](int error, l4_size_t sz) { task_finished(preq, error, sz); },
698 discard);
699
700 // request successfully submitted to device
701 if (res >= 0)
702 _in_flight++;
703
704 return res;
705 }
706
707 // only use on errors that are not busy
708 void handle_request_error(int error, Generic_pending_request *pending)
709 {
710 auto trace = Dbg::trace("virtio");
711
712 if (error == -L4_ENOSYS)
713 {
714 trace.printf("Unsupported operation.\n");
715 finalize_request(cxx::move(pending->request), 0,
717 }
718 else
719 {
720 trace.printf("Got IO error: %d\n", error);
721 finalize_request(cxx::move(pending->request), 0, L4VIRTIO_BLOCK_S_IOERR);
722 }
723 }
724
725protected:
726 L4::Cap<L4::Irq> _device_notify_irq;
727 std::function<void(bool)> _client_invalidate_cb;
728 std::function<void()> _client_idle_cb;
729 unsigned _numds;
730 Shutdown_type _shutdown_state;
732 Device_discard_feature::Discard_info _di;
733
734 L4virtio::Svr::Block_features _negotiated_features;
735
736 unsigned _in_flight;
737};
738
739} //name space
l4_uint64_t Dma_addr
Data type for DMA addresses.
Definition dma_space:59
Direction
Direction of the DMA transfers.
Definition dma_space:65
@ To_device
device reads the memory
Definition dma_space:67
@ None
device is coherently connected to the memory
Definition dma_space:69
@ From_device
device writes to the memory
Definition dma_space:68
C++ interface for capabilities.
Definition capability.h:219
Interface for server-loop related functions.
Definition ipc_epiface:37
Abstract interface for object registries.
Definition ipc_epiface:323
virtual void unregister_obj(L4::Epiface *o, bool unmap=true)=0
Unregister the given object o from the server.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Base class for virtio block devices.
Definition virtio-block:258
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
Definition virtio-block:420
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
Definition virtio-block:386
cxx::unique_ptr< Request > get_request()
Return one request if available.
Definition virtio-block:528
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Definition virtio-block:373
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
Definition virtio-block:442
void set_flush()
Enables the flush command.
Definition virtio-block:362
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
Definition virtio-block:288
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
Definition virtio-block:400
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
Definition virtio-block:302
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
Definition virtio-block:481
void init_mem_info(unsigned num)
Initialize the memory region list to the given maximum.
Definition l4virtio:1006
virtual L4::Cap< L4::Irq > device_notify_irq(unsigned idx)
Callback to gather the device notification IRQ (multi IRQ).
Definition l4virtio:868
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:71
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:24
unsigned long l4_addr_t
Address type.
Definition l4int.h:34
unsigned int l4_uint32_t
Unsigned 32bit value.
Definition l4int.h:29
unsigned long long l4_uint64_t
Unsigned 64bit value.
Definition l4int.h:31
int l4_cache_flush_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache flush a range; writes back to PoC.
Definition cache.h:78
int l4_cache_clean_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache clean a range in D-cache; writes back to PoC.
Definition cache.h:70
int l4_cache_inv_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache invalidate a range; might write back to PoC.
Definition cache.h:86
@ L4_ENOSYS
No sys.
Definition err.h:50
@ L4_EBUSY
Object currently busy, try later.
Definition err.h:42
@ L4_EIO
I/O error.
Definition err.h:35
@ L4_EOK
Ok.
Definition err.h:32
@ L4VIRTIO_BLOCK_T_DISCARD
Discard a range of sectors.
@ L4VIRTIO_BLOCK_T_FLUSH
Flush data to disk.
@ L4VIRTIO_BLOCK_T_IN
Read from device.
@ L4VIRTIO_BLOCK_T_OUT
Write to device.
@ L4VIRTIO_BLOCK_T_WRITE_ZEROES
Write zeroes to a range of sectors.
@ L4VIRTIO_BLOCK_S_IOERR
IO error on device.
@ L4VIRTIO_BLOCK_S_UNSUPP
Operation is not supported.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
Definition error_helper:149
L4-VIRTIO Transport C++ API.
Definition l4virtio:26
Our C++ library.
Definition arith:11
Epiface implementation for Kobject-based interface implementations.
Definition ipc_epiface:504
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Definition ipc_epiface:213
Exception used by Queue to indicate descriptor errors.
Definition virtio:398
char const * message() const
Get a human readable description of the error code.
Definition virtio:430
Structure used for the write zeroes and discard commands.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:32
Common task related definitions.
Implementation of a list of unique-ptr-managed objects.