L4Re - L4 Runtime Environment
virtio
1 // vi:ft=cpp
2 /*
3  * (c) 2014 Alexander Warg <warg@os.inf.tu-dresden.de>
4  *
5  * This file is part of TUD:OS and distributed under the terms of the
6  * GNU General Public License 2.
7  * Please see the COPYING-GPL-2 file for details.
8  *
9  * As a special exception, you may use this file as part of a free software
10  * library without restriction. Specifically, if other files instantiate
11  * templates or use macros or inline functions from this file, or you compile
12  * this file and link it with other files to produce an executable, this
13  * file does not by itself cause the resulting executable to be covered by
14  * the GNU General Public License. This exception does not however
15  * invalidate any other reasons why the executable file might be covered by
16  * the GNU General Public License.
17  */
18 
19 
20 #pragma once
21 
22 #include <l4/sys/types.h>
23 #include <l4/cxx/bitfield>
24 #include <l4/cxx/minmax>
25 #include <l4/cxx/utils>
26 
27 #include <limits.h>
28 #include <string.h>
29 #include <stdio.h>
30 
31 #include "../virtqueue"
32 
33 /**
34  * \ingroup l4virtio_transport
35  *
36  * L4-VIRTIO Transport C++ API
37  */
38 namespace L4virtio {
39 namespace Svr {
40 
41 /**
42  * \brief Type of the device status register.
43  */
44 struct Dev_status
45 {
46  unsigned char raw; ///< Raw value of the VIRTIO device status register.
47  Dev_status() = default;
48 
49  /// Make Status from raw value.
50  explicit Dev_status(l4_uint32_t v) : raw(v) {}
51 
52  CXX_BITFIELD_MEMBER( 0, 0, acked, raw);
53  CXX_BITFIELD_MEMBER( 1, 1, driver, raw);
54  CXX_BITFIELD_MEMBER( 2, 2, driver_ok, raw);
55  CXX_BITFIELD_MEMBER( 3, 3, features_ok, raw);
56  CXX_BITFIELD_MEMBER( 7, 7, failed, raw);
57 
58  /**
59  * \brief Check if the device is in running state.
60  * \return true if the device is in running state.
61  *
62  * The device is in running state when acked(), driver(),
63  * features_ok(), and driver_ok() return true,
64  * and failed() returns false.
65  */
66  bool running() const
67  {
68  return (raw == 0xf);
69  }
70 };
71 
72 /**
73  * \brief Type for device feature bitmap
74  */
75 struct Dev_features
76 {
77  l4_uint32_t raw; ///< The raw value of the features bitmap
78  Dev_features() = default;
79 
80  /// Make Features from a raw bitmap.
81  explicit Dev_features(l4_uint32_t v) : raw(v) {}
82 
83  CXX_BITFIELD_MEMBER(28, 28, ring_indirect_desc, raw);
84  CXX_BITFIELD_MEMBER(29, 29, ring_event_idx, raw);
85 };
86 
87 
88 /**
89  * Virtqueue implementation for the device
90  *
91  * This class represents a single virtqueue, with a local running available
92  * index.
93  */
94 class Virtqueue : public L4virtio::Virtqueue
95 {
96 public:
97  /**
98  * VIRTIO request, essentially a descriptor from the available ring.
99  */
100  class Head_desc
101  {
102  friend class Virtqueue;
103  private:
104  Virtqueue::Desc const *_d;
105  Head_desc(Virtqueue *r, unsigned i) : _d(r->desc(i)) {}
106 
107  struct Null_ptr_check;
108 
109  public:
110  /// Make invalid (NULL) request.
111  Head_desc() : _d(0) {}
112 
113  /// \return True if the request is valid (not NULL).
114  bool valid() const { return _d; }
115 
116  /// \return True if the request is valid (not NULL).
117  operator Null_ptr_check const * () const
118  { return reinterpret_cast<Null_ptr_check const *>(_d); }
119 
120  /// \return Pointer to the head descriptor of the request.
121  Desc const *desc() const
122  { return _d; }
123  };
124 
125  struct Request : Head_desc
126  {
127  Virtqueue *ring;
128  Request() = default;
129  private:
130  friend class Virtqueue;
131  Request(Virtqueue *r, unsigned i) : Head_desc(r, i), ring(r) {}
132  };
133 
134 
135  /**
136  * \brief Get the next available descriptor from the available ring.
137  * \pre The queue must be in working state.
138  * \return A Request for the next available descriptor, the Request is invalid
139  * if there are no descriptors in the available ring.
140  * \note The return value must be checked even when a previous desc_avail()
141  * returned true.
142  *
143  */
144  Request next_avail()
145  {
146  if (L4_LIKELY(_current_avail != _avail->idx))
147  {
148  rmb();
149  unsigned head = _current_avail & _idx_mask;
150  ++_current_avail;
151  return Request(this, _avail->ring[head]);
152  }
153  return Request();
154  }
155 
156  /**
157  * \brief Test for available descriptors.
158  * \return true if there are descriptors availabale, false if not.
159  * \pre The queue must be in working state.
160  */
161  bool desc_avail() const
162  {
163  return _current_avail != _avail->idx;
164  }
165 
166  /**
167  * \brief Put the given descriptor into the used ring.
168  * \param r request that shall be marked as finished.
169  * \param len the total number of bytes written.
170  * \pre queue must be in working state.
171  * \pre \a r must be a valid request from this queue.
172  */
173  void consumed(Head_desc const &r, l4_uint32_t len = 0)
174  {
175  l4_uint16_t i = _used->idx & _idx_mask;
176  _used->ring[i] = Used_elem(r._d - _desc, len);
177  wmb();
178  ++_used->idx;
179  }
180 
181  template<typename ITER>
182  void consumed(ITER const &begin, ITER const &end)
183  {
184  l4_uint16_t added = 0;
185  l4_uint16_t idx = _used->idx;
186 
187  for (auto elem = begin ; elem != end; ++elem, ++added)
188  _used->ring[(idx + added) & _idx_mask]
189  = Used_elem(elem->first._d - _desc, elem->second);
190 
191  wmb();
192  _used->idx += added;
193  }
194 
195  template<typename QUEUE_OBSERVER>
196  void finish(Head_desc &d, QUEUE_OBSERVER *o, l4_uint32_t len = 0)
197  {
198  consumed(d, len);
199  o->notify_queue(this);
200  d._d = 0;
201  }
202 
203  template<typename ITER, typename QUEUE_OBSERVER>
204  void finish(ITER const &begin, ITER const &end, QUEUE_OBSERVER *o)
205  {
206  consumed(begin, end);
207  o->notify_queue(this);
208  }
209 
210  /**
211  * \brief Set the 'no notify' flag for this queue.
212  *
213  * This function may be called on a disabled queue.
214  */
215  void disable_notify()
216  {
217  if (L4_LIKELY(ready()))
218  _used->flags.no_notify() = 1;
219  }
220 
221  /**
222  * \brief Clear the 'no notify' flag for this queue.
223  *
224  * This function may be called on a disabled queue.
225  */
226  void enable_notify()
227  {
228  if (L4_LIKELY(ready()))
229  _used->flags.no_notify() = 0;
230  }
231 
232  /**
233  * \brief Get a descriptor from the descriptor list.
234  * \param idx the index of the descriptor.
235  * \pre \a idx < \a num
236  * \pre queue must be in working state
237  */
238  Desc const *desc(unsigned idx) const
239  { return _desc + idx; }
240 
241 };
242 
243 /**
244  * \brief Abstract data buffer.
245  */
246 struct Data_buffer
247 {
248  char *pos; ///< Current buffer position
249  l4_uint32_t left; ///< Bytes left in buffer
250 
251  Data_buffer() = default;
252 
253  /**
254  * \brief Create buffer for object \a p.
255  * \tparam T type of object (implicit)
256  * \param p pointer to object.
257  *
258  * The buffer shall point to the start of the object \a p and the size left
259  * is sizeof(T).
260  */
261  template<typename T>
262  explicit Data_buffer(T *p)
263  : pos(reinterpret_cast<char *>(p)), left(sizeof(T))
264  {}
265 
266  /**
267  * \brief Set buffer for object \a p.
268  * \tparam T type of object (implicit)
269  * \param p pointer to object.
270  *
271  * The buffer shall point to the start of the object \a p and the size left
272  * is sizeof(T).
273  */
274  template<typename T>
275  void set(T *p)
276  {
277  pos = reinterpret_cast<char *>(p);
278  left = sizeof(T);
279  }
280 
281  /**
282  * \brief Copy contents from this buffer to the destination buffer.
283  * \param dst Destination buffer.
284  * \param max (optional) Maximum number of bytes to copy.
285  * \return the number of bytes copied.
286  *
287  * This function copies at most `max` bytes from this to `dst`. If
288  * `max` is omitted, copies the maximum number of bytes available
289  * that fit `dst`.
290  */
291  l4_uint32_t copy_to(Data_buffer *dst, l4_uint32_t max = UINT_MAX)
292  {
293  unsigned long bytes = cxx::min(cxx::min(left, dst->left), max);
294  memcpy(dst->pos, pos, bytes);
295  left -= bytes;
296  pos += bytes;
297  dst->left -= bytes;
298  dst->pos += bytes;
299  return bytes;
300  }
301 
302  /**
303  * \brief Skip given number of bytes in this buffer.
304  * \param bytes Number of bytes that shall be skipped.
305  * \return The number of bytes skipped.
306  *
307  * Try to skip the given number of bytes in this buffer, if there are less
308  * bytes left in the buffer that given then at most left bytes are skipped
309  * and the amount is returned.
310  */
311  l4_uint32_t skip(l4_uint32_t bytes)
312  {
313  unsigned long b = cxx::min(left, bytes);
314  left -= b;
315  pos += b;
316  return b;
317  }
318 
319  /**
320  * \brief Check if there are no more bytes left in the buffer.
321  * \return true if there are no more bytes left in the buffer.
322  */
323  bool done() const
324  { return left == 0; }
325 };
326 
327 class Request_processor;
328 
329 /**
330  * \brief Exception used by Queue to indicate descriptor errors.
331  */
332 struct Bad_descriptor
333 {
334  /// The error code
335  enum Error
336  {
337  Bad_address, ///< Address cannot be translated
338  Bad_rights, ///< Missing access rights on memory
339  Bad_flags, ///< Invalid combination of descriptor flags
340  Bad_next, ///< Invalid next index
341  Bad_size ///< Invalid size of memory block
342  };
343 
344  /// The processor that triggered the exception
345  Request_processor const *proc;
346 
347  // The error code
348  Error error;
349 
350  /**
351  * \brief Make a bad descriptor exception.
352  * \param proc The request processor causing the exception
353  * \param e The error code.
354  */
355  Bad_descriptor(Request_processor const *proc, Error e)
356  : proc(proc), error(e)
357  {}
358 
359  /**
360  * Get a human readable description of the error code.
361  *
362  * \return Message describing the error.
363  */
364  char const *message() const
365  {
366  char const *const err[] = {
367  [Bad_address] = "Descriptor address cannot be translated",
368  [Bad_rights] = "Insufficient memory access rights",
369  [Bad_flags] = "Invalid descriptor flags",
370  [Bad_next] = "The descriptor's `next` index is invalid",
371  [Bad_size] = "Invalid size of the memory block"
372  };
373 
374  if (error >= (sizeof(err) / sizeof(err[0])))
375  return "Unknown error";
376 
377  return err[error];
378  }
379 };
380 
381 
382 /**
383  * \brief Encapsulate the state for processing a VIRTIO request.
384  *
385  * A VIRTIO request is a possibly chained list of descriptors retrieved from
386  * the available ring of a virtqueue, using Virtqueue::next_avail().
387  *
388  * The descriptor processing depends on helper (DESC_MAN) for interpreting the
389  * descriptors in the context of the device implementation.
390  *
391  * DESC_MAN has to provide the functionality to safely dereference a
392  * descriptor from a descriptor list.
393  *
394  * The following methods must be provided by DESC_MAN:
395  * * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
396  * Request_processor const *proc,
397  * Virtqueue::Desc const **table) \endcode
398  * This function is used to dereference \a desc as an indirect descriptor
399  * table, and must return a pointer to an indirect descriptor table.
400  * * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
401  * Request_processor const *proc, ...) \endcode
402  * This function is used to dereference a descriptor as a normal data
403  * buffer, and '...' are the arguments that are passed to start() and next().
404  */
405 class Request_processor
406 {
407 private:
408  /// pointer to descriptor table (may point to an indirect table)
409  Virtqueue::Desc const *_table;
410 
411  /// currently processed descriptor
412  Virtqueue::Desc _current;
413 
414  /// number of entries in the current descriptor table (_table)
415  l4_uint16_t _num;
416 
417 public:
418  /**
419  * Start processing a new request.
420  *
421  * \tparam DESCM_MAN Type of descriptor manager (implicit).
422  * \param dm Descriptor manager that is used to translate VIRTIO
423  * descriptor addresses.
424  * \param ring VIRTIO ring of the request.
425  * \param request VIRTIO request from Virtqueue::next_avail()
426  * \param args Extra arguments passed to dm->load_desc()
427  *
428  * \pre The given request must be valid.
429  *
430  * \throws Bad_descriptor The descriptor has an invalid size or load_desc()
431  * has thrown an exception by itself.
432  */
433  template<typename DESC_MAN, typename ...ARGS>
434  void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
435  {
436  _current = cxx::access_once(request.desc());
437 
438  if (_current.flags.indirect())
439  {
440  dm->load_desc(_current, this, &_table);
441  _num = _current.len / sizeof(Virtqueue::Desc);
442  if (L4_UNLIKELY(!_num))
443  throw Bad_descriptor(this, Bad_descriptor::Bad_size);
444 
445  _current = cxx::access_once(_table);
446  }
447  else
448  {
449  _table = ring->desc(0);
450  _num = ring->num();
451  }
452 
453  dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
454  }
455 
456  /**
457  * \brief Start processing a new request.
458  * \tparam DESCM_MAN Type of descriptor manager (implicit).
459  * \param dm Descriptor manager that is used to translate VIRTIO descriptor addresses.
460  * \param request VIRTIO request from Virtqueue::next_avail()
461  * \param args Extra arguments passed to dm->load_desc()
462  * \pre The given request must be valid.
463  */
464  template<typename DESC_MAN, typename ...ARGS>
465  Virtqueue::Request const &start(DESC_MAN *dm, Virtqueue::Request const &request, ARGS... args)
466  {
467  start(dm, request.ring, request, cxx::forward<ARGS>(args)...);
468  return request;
469  }
470 
471  /**
472  * \brief Get the flags of the currently processed descriptor.
473  * \return The flags of the currently processed descriptor.
474  */
475  Virtqueue::Desc::Flags current_flags() const
476  { return _current.flags; }
477 
478  /**
479  * \brief Are there more chained descriptors ?
480  * \return true if there are more chained descriptors in the current request.
481  */
482  bool has_more() const
483  { return _current.flags.next(); }
484 
485  /**
486  * Switch to the next descriptor in a descriptor chain.
487  *
488  * \tparam DESCM_MAN Type of descriptor manager (implicit).
489  * \param dm Descriptor manager that is used to translate VIRTIO
490  * descriptor addresses.
491  * \param args Extra arguments passed to dm->load_desc()
492  *
493  * \retval true A next descriptor is available.
494  * \retval false No descriptor available.
495  *
496  * \throws Bad_descriptor The `next` index of this descriptor is invalid.
497  */
498  template<typename DESC_MAN, typename ...ARGS>
499  bool next(DESC_MAN *dm, ARGS... args)
500  {
501  if (!_current.flags.next())
502  return false;
503 
504  if (L4_UNLIKELY(_current.next >= _num))
505  throw Bad_descriptor(this, Bad_descriptor::Bad_next);
506 
507  _current = cxx::access_once(_table + _current.next);
508 
509  if (0) // we ignore this for performance reasons
510  if (L4_UNLIKELY(_current.flags.indirect()))
511  throw Bad_descriptor(this, Bad_descriptor::Bad_flags);
512 
513  // must throw an exception in case of a bad descriptor
514  dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
515  return true;
516  }
517 };
518 
519 }
520 }
521