L4Re Operating System Framework – Interface and Usage Documentation
Loading...
Searching...
No Matches
virtio_client.h
1/*
2 * Copyright (C) 2018-2022 Kernkonzept GmbH.
3 * Author(s): Sarah Hoffmann <sarah.hoffmann@kernkonzept.com>
4 *
5 * This file is distributed under the terms of the GNU General Public
6 * License, version 2. Please see the COPYING-GPL-2 file for details.
7 */
8#pragma once
9
10#include <l4/cxx/ref_ptr>
12#include <l4/cxx/utils>
13#include <l4/sys/cache.h>
14
15#include <l4/sys/task>
16
17#include <l4/l4virtio/server/virtio-block>
18
19#include <l4/libblock-device/debug.h>
20#include <l4/libblock-device/device.h>
21#include <l4/libblock-device/types.h>
22#include <l4/libblock-device/request_queue.h>
23
24namespace Block_device {
25
26template <typename DEV>
27class Virtio_client
28: public L4virtio::Svr::Block_dev_base<Mem_region_info>,
29 public L4::Epiface_t<Virtio_client<DEV>, L4virtio::Device>,
30 public Pending_request::Owner
31{
32protected:
33 class Generic_pending_request : public Pending_request
34 {
35 protected:
36 int check_error(int result)
37 {
38 if (result < 0 && result != -L4_EBUSY)
39 client->handle_request_error(result, this);
40
41 return result;
42 }
43
44 public:
45 explicit Generic_pending_request(Virtio_client *c, cxx::unique_ptr<Request> &&req)
46 : request(cxx::move(req)), client(c)
47 {}
48
49 void fail_request() override
50 {
51 client->finalize_request(cxx::move(request), 0, L4VIRTIO_BLOCK_S_IOERR);
52 }
53
54 bool is_owner(Pending_request::Owner *owner) override
55 { return static_cast<Pending_request::Owner *>(client) == owner; }
56
57 cxx::unique_ptr<Request> request;
58 Virtio_client *client;
59 };
60
61 struct Pending_inout_request : public Generic_pending_request
62 {
63 Inout_block blocks;
64
65 using Generic_pending_request::Generic_pending_request;
66
68 {
69 return this->request->header().type == L4VIRTIO_BLOCK_T_OUT
72 }
73
74 int handle_request() override
75 { return this->check_error(this->client->inout_request(this)); }
76
77 };
78
79 struct Pending_flush_request : public Generic_pending_request
80 {
81 using Generic_pending_request::Generic_pending_request;
82
83 int handle_request() override
84 { return this->check_error(this->client->flush_request(this)); }
85 };
86
87 struct Pending_cmd_request : public Generic_pending_request
88 {
89 Inout_block blocks;
90
91 using Generic_pending_request::Generic_pending_request;
92
93 int handle_request() override
94 {
95 return this->check_error(this->client->discard_cmd_request(this, 0));
96 }
97 };
98
99public:
100 using Device_type = DEV;
101
110 Virtio_client(cxx::Ref_ptr<Device_type> const &dev, unsigned numds, bool readonly)
111 : L4virtio::Svr::Block_dev_base<Mem_region_info>(L4VIRTIO_VENDOR_KK, 0x100,
112 dev->capacity() >> 9,
113 dev->is_read_only()
114 || readonly),
115 _numds(numds),
116 _device(dev),
117 _pending(dev->request_queue()),
118 _in_flight(0)
119 {
120 reset_client();
121 init_discard_info(0);
122 }
123
127 void reset_device() override
128 {
129 if (_pending)
130 _pending->drain_queue_for(this, false);
131 _device->reset();
132 _negotiated_features.raw = 0;
133 }
134
138 void reset_client()
139 {
140 init_mem_info(_numds);
141 set_seg_max(_device->max_segments());
142 set_size_max(_device->max_size());
143 set_flush();
144 set_config_wce(0); // starting in write-through mode
145 _shutdown_state = Shutdown_type::Running;
146 _negotiated_features.raw = 0;
147 }
148
149 bool queue_stopped() override
150 { return false; }
151
152 bool process_request(cxx::unique_ptr<Request> &&req) override
153 {
154 auto trace = Dbg::trace("virtio");
155
156 if (_shutdown_state != Shutdown_type::Running)
157 {
158 trace.printf("Failing requests as the client is shutting down\n");
159 this->finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_IOERR);
160 return false;
161 }
162
163 trace.printf("request received: type 0x%x, sector 0x%llx\n",
164 req->header().type, req->header().sector);
165 switch (req->header().type)
166 {
169 {
170 auto pending = cxx::make_unique<Pending_inout_request>(this, cxx::move(req));
171 int ret = build_inout_blocks(pending.get());
172 if (ret >= 0)
173 {
174 if (_pending && !_pending->empty())
175 ret = -L4_EBUSY; // make sure to keep request order
176 else
177 ret = inout_request(pending.get());
178 } else
179 release_dma(pending.get());
180 return handle_request_result(ret, cxx::move(pending));
181 }
183 {
184 auto pending = cxx::make_unique<Pending_flush_request>(this, cxx::move(req));
185 int ret = check_flush_request(pending.get());
186 if (ret == L4_EOK)
187 {
188 if (_pending && !_pending->empty())
189 ret = -L4_EBUSY; // make sure to keep request order
190 else
191 ret = flush_request(pending.get());
192 }
193 return handle_request_result(ret, cxx::move(pending));
194 }
197 {
198 auto pending = cxx::make_unique<Pending_cmd_request>(this, cxx::move(req));
199 return handle_discard(cxx::move(pending), 0);
200 }
201 default:
202 finalize_request(cxx::move(req), 0, L4VIRTIO_BLOCK_S_UNSUPP);
203 }
204
205 return true;
206 }
207
208 void task_finished(Generic_pending_request *preq, int error, l4_size_t sz)
209 {
210 _in_flight--;
211
212 // move on to the next request
213
214 // Only finalize if the client is still alive
215 if (_shutdown_state != Client_gone)
216 finalize_request(cxx::move(preq->request), sz, error);
217
218 if (_pending)
219 _pending->process_pending();
220
221 // pending request can be dropped
222 cxx::unique_ptr<Pending_request> ureq(preq);
223 }
224
228 void shutdown_event(Shutdown_type type)
229 {
230 // If the client is already in the Client_gone state, it means that it was
231 // already shutdown and this is another go at its removal. This situation
232 // can occur because at the time of its previous removal attempt there were
233 // still I/O requests in progress.
234 if (_shutdown_state == Client_gone)
235 return;
236
237 // Transitions from System_shutdown are also not allowed, the initiator
238 // should take care of graceful handling of this.
239 l4_assert(_shutdown_state != System_shutdown);
240 // If we are transitioning from System_suspend, it must be only to Running,
241 // the initiator should handle this gracefully.
242 l4_assert(_shutdown_state != System_suspend
243 || type == Shutdown_type::Running);
244
245 // Update shutdown state of the client
246 _shutdown_state = type;
247
248 if (type == Shutdown_type::Client_shutdown)
249 {
250 reset();
251 reset_client();
252 // Client_shutdown must transit to the Running state
253 l4_assert(_shutdown_state == Shutdown_type::Running);
254 }
255
256 if (type != Shutdown_type::Running)
257 {
258 if (_pending)
259 _pending->drain_queue_for(this, type != Client_gone);
260 _device->reset();
261 }
262 }
263
277 L4::Cap<void> register_obj(L4::Registry_iface *registry,
278 char const *service = 0)
279 {
280 L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
281 L4::Cap<void> ret;
282 if (service)
283 ret = registry->register_obj(this, service);
284 else
285 ret = registry->register_obj(this);
286 L4Re::chkcap(ret);
287
288 return ret;
289 }
290
291 L4::Cap<void> register_obj(L4::Registry_iface *registry,
293 {
294 L4Re::chkcap(registry->register_irq_obj(this->irq_iface()));
295
296 return L4Re::chkcap(registry->register_obj(this, ep));
297 }
298
304 void unregister_obj(L4::Registry_iface *registry)
305 {
306 // We need to delete the IRQ object created in register_irq_obj() ourselves
307 L4::Cap<L4::Task>(L4Re::This_task)
308 ->unmap(this->irq_iface()->obj_cap().fpage(),
310 registry->unregister_obj(this->irq_iface());
311 registry->unregister_obj(this);
312 }
313
314 bool busy() const
315 {
316 return _in_flight != 0;
317 }
318
319protected:
320 L4::Ipc_svr::Server_iface *server_iface() const override
321 {
322 return this->L4::Epiface::server_iface();
323 }
324
325private:
326 void release_dma(Pending_inout_request *req)
327 {
328 // unmap DMA regions
329 Inout_block *cur = &req->blocks;
330 while (cur)
331 {
332 if (cur->num_sectors)
333 _device->dma_unmap(cur->dma_addr, cur->num_sectors, req->dir());
334 cur = cur->next.get();
335 }
336 }
337
338 int build_inout_blocks(Pending_inout_request *preq)
339 {
340 auto *req = preq->request.get();
341 l4_size_t sps = _device->sector_size() >> 9;
342 l4_uint64_t current_sector = req->header().sector / sps;
343 l4_uint64_t sectors = _device->capacity() / _device->sector_size();
344 auto dir = preq->dir();
345
346 l4_uint32_t flags = 0;
347 if (req->header().type == L4VIRTIO_BLOCK_T_OUT)
348 {
349 // If RO was offered, every write must fail
350 if (device_features().ro())
351 return -L4_EIO;
352
353 // Figure out whether the write has a write-through or write-back semantics
354 if (_negotiated_features.config_wce())
355 {
356 if (get_writeback() == 1)
357 flags = Block_device::Inout_f_wb;
358 }
359 else if (_negotiated_features.flush())
360 flags = Block_device::Inout_f_wb;
361 }
362
363 // Check alignment of the first sector
364 if (current_sector * sps != req->header().sector)
365 return -L4_EIO;
366
367 Inout_block *last_blk = nullptr;
368
369 size_t seg = 0;
370
371 while (req->has_more())
372 {
373 Request::Data_block b;
374
375 if (++seg > _device->max_segments())
376 return -L4_EIO;
377
378 try
379 {
380 b = req->next_block();
381 }
382 catch (L4virtio::Svr::Bad_descriptor const &e)
383 {
384 return -L4_EIO;
385 }
386
387 l4_size_t off = b.mem->ds_offset() + (l4_addr_t) b.addr
388 - (l4_addr_t) b.mem->local_base();
389
390 l4_size_t sz = b.len / _device->sector_size();
391
392 if (sz * _device->sector_size() != b.len)
393 {
394 Dbg::warn().printf("Bad block size 0x%x\n", b.len);
395 return -L4_EIO;
396 };
397
398 // Check bounds
399 if (sz > sectors)
400 return -L4_EIO;
401 if (current_sector > sectors - sz)
402 return -L4_EIO;
403
404 Inout_block *blk;
405 if (last_blk)
406 {
407 last_blk->next = cxx::make_unique<Inout_block>();
408 blk = last_blk->next.get();
409 }
410 else
411 blk = &preq->blocks;
412
414 long ret = _device->dma_map(b.mem, off, sz, dir, &phys);
415 if (ret < 0)
416 return ret;
417
418 blk->dma_addr = phys;
419 blk->virt_addr = (void *) ((l4_addr_t)b.mem->local_base() + off);
420 blk->num_sectors = sz;
421 current_sector += sz;
422 blk->flags = flags;
423
424 last_blk = blk;
425 }
426
427 return L4_EOK;
428 }
429
430 void maintain_cache_before_req(Pending_inout_request const *preq)
431 {
432 if (preq->dir() == L4Re::Dma_space::None)
433 return;
434 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
435 {
436 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
437 if (vstart)
438 {
439 l4_size_t vsize = cur->num_sectors * _device->sector_size();
440 if (preq->dir() == L4Re::Dma_space::From_device)
441 l4_cache_inv_data(vstart, vstart + vsize);
442 else if (preq->dir() == L4Re::Dma_space::To_device)
443 l4_cache_clean_data(vstart, vstart + vsize);
444 else // L4Re::Dma_space::Bidirectional
445 l4_cache_flush_data(vstart, vstart + vsize);
446 }
447 }
448 }
449
450 void maintain_cache_after_req(Pending_inout_request const *preq)
451 {
452 if (preq->dir() == L4Re::Dma_space::None)
453 return;
454 for (Inout_block const *cur = &preq->blocks; cur; cur = cur->next.get())
455 {
456 l4_addr_t vstart = (l4_addr_t)cur->virt_addr;
457 if (vstart)
458 {
459 l4_size_t vsize = cur->num_sectors * _device->sector_size();
460 if (preq->dir() != L4Re::Dma_space::To_device)
461 l4_cache_inv_data(vstart, vstart + vsize);
462 }
463 }
464 }
465
466 int inout_request(Pending_inout_request *preq)
467 {
468 auto *req = preq->request.get();
469 l4_uint64_t sector = req->header().sector / (_device->sector_size() >> 9);
470
471 maintain_cache_before_req(preq);
472 int res = _device->inout_data(
473 sector, preq->blocks,
474 [this, preq](int error, l4_size_t sz) {
475 release_dma(preq);
476 maintain_cache_after_req(preq);
477 task_finished(preq, error, sz);
478 },
479 preq->dir());
480
481 // request successfully submitted to device
482 if (res >= 0)
483 _in_flight++;
484
485 return res;
486 }
487
488 int check_flush_request(Pending_flush_request *preq)
489 {
490 if (!_negotiated_features.flush())
491 return -L4_ENOSYS;
492
493 auto *req = preq->request.get();
494
495 // sector must be zero for FLUSH
496 if (req->header().sector)
497 return -L4_ENOSYS;
498
499 return L4_EOK;
500 }
501
502 int flush_request(Pending_flush_request *preq)
503 {
504 int res = _device->flush([this, preq](int error, l4_size_t sz) {
505 task_finished(preq, error, sz);
506 });
507
508 // request successfully submitted to device
509 if (res >= 0)
510 _in_flight++;
511
512 return res;
513 }
514
515 bool check_features(void) override
516 {
517 _negotiated_features = negotiated_features();
518 return true;
519 }
520
521 template <typename T = Device_type>
522 void init_discard_info(long) {}
523
524 template <typename T = Device_type>
525 auto init_discard_info(int)
526 -> decltype(((T*)0)->discard_info(), void())
527 {
528 _di = _device->discard_info();
529
530 // Convert sector sizes to virtio 512-byte sectors.
531 size_t sps = _device->sector_size() >> 9;
532 if (_di.max_discard_sectors)
533 set_discard(_di.max_discard_sectors * sps, _di.max_discard_seg,
534 _di.discard_sector_alignment * sps);
535 if (_di.max_write_zeroes_sectors)
536 set_write_zeroes(_di.max_write_zeroes_sectors * sps,
537 _di.max_write_zeroes_seg, _di.write_zeroes_may_unmap);
538 }
539
540 bool handle_discard(cxx::unique_ptr<Pending_cmd_request> &&pending, int)
541 {
542 int ret = build_discard_cmd_blocks(pending.get());
543 if (ret >= 0)
544 {
545 if (this->_pending && !this->_pending->empty())
546 ret = -L4_EBUSY; // make sure to keep request order
547 else
548 ret = discard_cmd_request(pending.get(), 0);
549 }
550
551 return this->handle_request_result(ret, cxx::move(pending));
552 }
553
554 int build_discard_cmd_blocks(Pending_cmd_request *preq)
555 {
556 auto *req = preq->request.get();
557 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
558
559 if (this->device_features().ro())
560 return -L4_EIO;
561
562 // sector is used only for inout requests, it must be zero for WzD
563 if (req->header().sector)
564 return -L4_ENOSYS;
565
566 if (discard)
567 {
568 if (!_negotiated_features.discard())
569 return -L4_ENOSYS;
570 }
571 else
572 {
573 if (!_negotiated_features.write_zeroes())
574 return -L4_ENOSYS;
575 }
576
577 auto *d = _device.get();
578
579 size_t seg = 0;
580 size_t max_seg = discard ? _di.max_discard_seg : _di.max_write_zeroes_seg;
581
582 l4_size_t sps = d->sector_size() >> 9;
583 l4_uint64_t sectors = d->capacity() / d->sector_size();
584
585 Inout_block *last_blk = nullptr;
586
587 while (req->has_more())
588 {
589 Request::Data_block b;
590
591 try
592 {
593 b = req->next_block();
594 }
595 catch (L4virtio::Svr::Bad_descriptor const &e)
596 {
597 return -L4_EIO;
598 }
599
600 auto *payload = reinterpret_cast<l4virtio_block_discard_t *>(b.addr);
601
602 size_t items = b.len / sizeof(payload[0]);
603 if (items * sizeof(payload[0]) != b.len)
604 return -L4_EIO;
605
606 if (seg + items > max_seg)
607 return -L4_EIO;
608 seg += items;
609
610 for (auto i = 0u; i < items; i++)
611 {
612 auto p = cxx::access_once<l4virtio_block_discard_t>(&payload[i]);
613
614 // Check sector size alignment. Discard sector alignment is not
615 // strictly enforced as it is merely a hint to the driver.
616 if (p.sector % sps != 0)
617 return -L4_EIO;
618 if (p.num_sectors % sps != 0)
619 return -L4_EIO;
620
621 // Convert to the device sector size
622 p.sector /= sps;
623 p.num_sectors /= sps;
624
625 // Check bounds
626 if (p.num_sectors > sectors)
627 return -L4_EIO;
628 if (p.sector > sectors - p.num_sectors)
629 return -L4_EIO;
630
631 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_RESERVED)
632 return -L4_ENOSYS;
633
634 if (discard)
635 {
636 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
637 return -L4_ENOSYS;
638 if (p.num_sectors > _di.max_discard_sectors)
639 return -L4_EIO;
640 }
641 else
642 {
643 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP
644 && !_di.write_zeroes_may_unmap)
645 return -L4_ENOSYS;
646 if (p.num_sectors > _di.max_write_zeroes_sectors)
647 return -L4_EIO;
648 }
649
650 Inout_block *blk;
651 if (last_blk)
652 {
653 last_blk->next = cxx::make_unique<Inout_block>();
654 blk = last_blk->next.get();
655 }
656 else
657 blk = &preq->blocks;
658
659 blk->sector = p.sector;
660 blk->num_sectors = p.num_sectors;
661 if (p.flags & L4VIRTIO_BLOCK_DISCARD_F_UNMAP)
662 blk->flags = Inout_f_unmap;
663
664 last_blk = blk;
665 }
666 }
667
668 return L4_EOK;
669 }
670
671 template <typename T = Device_type>
672 int discard_cmd_request(Pending_cmd_request *, long)
673 { return -L4_EIO; }
674
675 template <typename T = Device_type>
676 auto discard_cmd_request(Pending_cmd_request *preq, int)
677 -> decltype(((T*)0)->discard_info(), int())
678 {
679 auto *req = preq->request.get();
680 bool discard = (req->header().type == L4VIRTIO_BLOCK_T_DISCARD);
681
682 int res = _device->discard(
683 0, preq->blocks,
684 [this, preq](int error, l4_size_t sz) { task_finished(preq, error, sz); },
685 discard);
686
687 // request successfully submitted to device
688 if (res >= 0)
689 _in_flight++;
690
691 return res;
692 }
693
694 template <typename REQ>
695 bool handle_request_result(int error, cxx::unique_ptr<REQ> &&pending)
696 {
697 if (error == -L4_EBUSY && _pending)
698 {
699 Dbg::trace("virtio").printf("Port busy, queueing request.\n");
700 _pending->add_to_queue(cxx::unique_ptr<Pending_request>(pending.release()));
701 }
702 else if (error < 0)
703 handle_request_error(error, pending.get());
704 else
705 {
706 // request has been successfully sent to hardware
707 // which now has ownership of Request pointer, so release here
708 pending.release();
709 }
710
711 return true;
712 }
713
714 // only use on errors that are not busy
715 void handle_request_error(int error, Generic_pending_request *pending)
716 {
717 auto trace = Dbg::trace("virtio");
718
719 if (error == -L4_ENOSYS)
720 {
721 trace.printf("Unsupported operation.\n");
722 finalize_request(cxx::move(pending->request), 0,
724 }
725 else
726 {
727 trace.printf("Got IO error: %d\n", error);
728 finalize_request(cxx::move(pending->request), 0, L4VIRTIO_BLOCK_S_IOERR);
729 }
730 }
731
732protected:
733 unsigned _numds;
734 Shutdown_type _shutdown_state;
736 Request_queue *_pending;
737 Device_discard_feature::Discard_info _di;
738
739 L4virtio::Svr::Block_features _negotiated_features;
740
741 unsigned _in_flight;
742};
743
744} //name space
l4_uint64_t Dma_addr
Data type for DMA addresses.
Definition dma_space:70
Direction
Direction of the DMA transfers.
Definition dma_space:76
@ To_device
device reads the memory
Definition dma_space:78
@ None
device is coherently connected to the memory
Definition dma_space:80
@ From_device
device writes to the memory
Definition dma_space:79
C++ interface for capabilities.
Definition capability.h:222
Interface for server-loop related functions.
Definition ipc_epiface:48
Abstract interface for object registries.
Definition ipc_epiface:334
virtual void unregister_obj(L4::Epiface *o, bool unmap=true)=0
Unregister the given object o from the server.
virtual L4::Cap< L4::Irq > register_irq_obj(L4::Epiface *o)=0
Register o as server-side object for asynchronous IRQs.
virtual L4::Cap< void > register_obj(L4::Epiface *o, char const *service)=0
Register an L4::Epiface for an IPC gate available in the applications environment under the name serv...
Base class for virtio block devices.
Definition virtio-block:258
void set_write_zeroes(l4_uint32_t max_write_zeroes_sectors, l4_uint32_t max_write_zeroes_seg, l4_uint8_t write_zeroes_may_unmap)
Sets constraints for and enables the write zeroes command.
Definition virtio-block:440
l4_uint8_t get_writeback()
Get the writeback field from the configuration space.
Definition virtio-block:406
void set_config_wce(l4_uint8_t writeback)
Sets cache mode and enables the writeback toggle.
Definition virtio-block:393
Block_dev_base(l4_uint32_t vendor, unsigned queue_size, l4_uint64_t capacity, bool read_only)
Create a new virtio block device.
Definition virtio-block:462
void set_flush()
Enables the flush command.
Definition virtio-block:382
void set_size_max(l4_uint32_t sz)
Sets the maximum size of any single segment reported to client.
Definition virtio-block:308
void set_discard(l4_uint32_t max_discard_sectors, l4_uint32_t max_discard_seg, l4_uint32_t discard_sector_alignment)
Sets constraints for and enables the discard command.
Definition virtio-block:420
void set_seg_max(l4_uint32_t sz)
Sets the maximum number of segments in a request that is reported to client.
Definition virtio-block:322
void finalize_request(cxx::unique_ptr< Request > req, unsigned sz, l4_uint8_t status=L4VIRTIO_BLOCK_S_OK)
Releases resources related to a request and notifies the client.
Definition virtio-block:515
void init_mem_info(unsigned num)
Initialize the memory region list to the given maximum.
Definition l4virtio:983
A reference-counting pointer with automatic cleanup.
Definition ref_ptr:82
unsigned int l4_size_t
Unsigned size type.
Definition l4int.h:35
unsigned long l4_addr_t
Address type.
Definition l4int.h:45
unsigned int l4_uint32_t
Unsigned 32bit value.
Definition l4int.h:40
unsigned long long l4_uint64_t
Unsigned 64bit value.
Definition l4int.h:42
int l4_cache_flush_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache flush a range; writes back to PoC.
Definition cache.h:89
int l4_cache_clean_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache clean a range in D-cache; writes back to PoC.
Definition cache.h:81
int l4_cache_inv_data(unsigned long start, unsigned long end) L4_NOTHROW
Cache invalidate a range; might write back to PoC.
Definition cache.h:97
@ L4_ENOSYS
No sys.
Definition err.h:60
@ L4_EBUSY
Object currently busy, try later.
Definition err.h:53
@ L4_EIO
I/O error.
Definition err.h:46
@ L4_EOK
Ok.
Definition err.h:43
@ L4_FP_DELETE_OBJ
Flag that indicates that an unmap operation on object capabilities shall try to delete the correspond...
Definition consts.h:209
@ L4_FP_ALL_SPACES
Flag to tell the unmap operation to revoke permissions from all child mappings including the mapping ...
Definition consts.h:198
@ L4VIRTIO_BLOCK_T_DISCARD
Discard a range of sectors.
@ L4VIRTIO_BLOCK_T_FLUSH
Flush data to disk.
@ L4VIRTIO_BLOCK_T_IN
Read from device.
@ L4VIRTIO_BLOCK_T_OUT
Write to device.
@ L4VIRTIO_BLOCK_T_WRITE_ZEROES
Write zeroes to a range of sectors.
@ L4VIRTIO_BLOCK_S_IOERR
IO error on device.
@ L4VIRTIO_BLOCK_S_UNSUPP
Operation is not supported.
T chkcap(T &&cap, char const *extra="", long err=-L4_ENOMEM)
Check for valid capability or raise C++ exception.
Definition error_helper:145
L4-VIRTIO Transport C++ API.
Definition l4virtio:26
Our C++ library.
Definition arith:22
Cap< RPC_IFACE > obj_cap() const
Get the (typed) capability to this object.
Definition ipc_epiface:280
Epiface implementation for Kobject-based interface implementations.
Definition ipc_epiface:514
Server_iface * server_iface() const
Get pointer to server interface at which the object is currently registered.
Definition ipc_epiface:224
Exception used by Queue to indicate descriptor errors.
Definition virtio:326
Structure used for the write zeroes and discard commands.
#define l4_assert(expr)
Low-level assert.
Definition assert.h:43
Common task related definitions.
Implementation of a list of unique-ptr-managed objects.