L4Re - L4 Runtime Environment
virtio
1 // vi:ft=cpp
2 /*
3  * (c) 2014 Alexander Warg <warg@os.inf.tu-dresden.de>
4  *
5  * This file is part of TUD:OS and distributed under the terms of the
6  * GNU General Public License 2.
7  * Please see the COPYING-GPL-2 file for details.
8  *
9  * As a special exception, you may use this file as part of a free software
10  * library without restriction. Specifically, if other files instantiate
11  * templates or use macros or inline functions from this file, or you compile
12  * this file and link it with other files to produce an executable, this
13  * file does not by itself cause the resulting executable to be covered by
14  * the GNU General Public License. This exception does not however
15  * invalidate any other reasons why the executable file might be covered by
16  * the GNU General Public License.
17  */
18 
19 
20 #pragma once
21 
22 #include <l4/sys/types.h>
23 #include <l4/cxx/bitfield>
24 #include <l4/cxx/minmax>
25 #include <l4/cxx/utils>
26 
27 #include <string.h>
28 #include <stdio.h>
29 
30 #include "../virtqueue"
31 
32 /**
33  * \ingroup l4virtio_transport
34  *
35  * L4-VIRTIO Transport C++ API
36  */
37 namespace L4virtio {
38 namespace Svr {
39 
40 /**
41  * \brief Type of the device status register.
42  */
43 struct Dev_status
44 {
45  unsigned char raw; ///< Raw value of the VIRTIO device status register.
46  Dev_status() = default;
47 
48  /// Make Status from raw value.
49  explicit Dev_status(l4_uint32_t v) : raw(v) {}
50 
51  CXX_BITFIELD_MEMBER( 0, 0, acked, raw);
52  CXX_BITFIELD_MEMBER( 1, 1, driver, raw);
53  CXX_BITFIELD_MEMBER( 2, 2, driver_ok, raw);
54  CXX_BITFIELD_MEMBER( 3, 3, features_ok, raw);
55  CXX_BITFIELD_MEMBER( 7, 7, failed, raw);
56 
57  /**
58  * \brief Check if the device is in running state.
59  * \return true if the device is in running state.
60  *
61  * The device is in running state when acked(), driver(),
62  * features_ok(), and driver_ok() return true,
63  * and failed() returns false.
64  */
65  bool running() const
66  {
67  return (raw == 0xf);
68  }
69 };
70 
71 /**
72  * \brief Type for device feature bitmap
73  */
74 struct Dev_features
75 {
76  l4_uint32_t raw; ///< The raw value of the features bitmap
77  Dev_features() = default;
78 
79  /// Make Features from a raw bitmap.
80  explicit Dev_features(l4_uint32_t v) : raw(v) {}
81 
82  CXX_BITFIELD_MEMBER(28, 28, ring_indirect_desc, raw);
83  CXX_BITFIELD_MEMBER(29, 29, ring_event_idx, raw);
84 };
85 
86 
87 /**
88  * Virtqueue implementation for the device
89  *
90  * This class represents a single virtqueue, with a local running available
91  * index.
92  */
93 class Virtqueue : public L4virtio::Virtqueue
94 {
95 public:
96  /**
97  * VIRTIO request, essentially a descriptor from the available ring.
98  */
99  class Head_desc
100  {
101  friend class Virtqueue;
102  private:
103  Virtqueue::Desc const *_d;
104  Head_desc(Virtqueue *r, unsigned i) : _d(r->desc(i)) {}
105 
106  struct Null_ptr_check;
107 
108  public:
109  /// Make invalid (NULL) request.
110  Head_desc() : _d(0) {}
111 
112  /// \return True if the request is valid (not NULL).
113  bool valid() const { return _d; }
114 
115  /// \return True if the request is valid (not NULL).
116  operator Null_ptr_check const * () const
117  { return reinterpret_cast<Null_ptr_check const *>(_d); }
118 
119  /// \return Pointer to the head descriptor of the request.
120  Desc const *desc() const
121  { return _d; }
122  };
123 
124  struct Request : Head_desc
125  {
126  Virtqueue *ring;
127  Request() = default;
128  private:
129  friend class Virtqueue;
130  Request(Virtqueue *r, unsigned i) : Head_desc(r, i), ring(r) {}
131  };
132 
133 
134  /**
135  * \brief Get the next available descriptor from the available ring.
136  * \pre The queue must be in working state.
137  * \return A Request for the next available descriptor, the Request is invalid
138  * if there are no descriptors in the available ring.
139  * \note The return value must be checked even when a previous desc_avail()
140  * returned true.
141  *
142  */
143  Request next_avail()
144  {
145  if (L4_LIKELY(_current_avail != _avail->idx))
146  {
147  rmb();
148  unsigned head = _current_avail & _idx_mask;
149  ++_current_avail;
150  return Request(this, _avail->ring[head]);
151  }
152  return Request();
153  }
154 
155  /**
156  * \brief Test for available descriptors.
157  * \return true if there are descriptors availabale, false if not.
158  * \pre The queue must be in working state.
159  */
160  bool desc_avail() const
161  {
162  return _current_avail != _avail->idx;
163  }
164 
165  /**
166  * \brief Put the given descriptor into the used ring.
167  * \param r request that shall be marked as finished.
168  * \param len the total number of bytes written.
169  * \pre queue must be in working state.
170  * \pre \a r must be a valid request from this queue.
171  */
172  void consumed(Head_desc const &r, l4_uint32_t len = 0)
173  {
174  l4_uint16_t i = _used->idx & _idx_mask;
175  _used->ring[i] = Used_elem(r._d - _desc, len);
176  wmb();
177  ++_used->idx;
178  }
179 
180  template<typename ITER>
181  void consumed(ITER const &begin, ITER const &end)
182  {
183  l4_uint16_t added = 0;
184  l4_uint16_t idx = _used->idx;
185 
186  for (auto elem = begin ; elem != end; ++elem, ++added)
187  _used->ring[(idx + added) & _idx_mask]
188  = Used_elem(elem->first._d - _desc, elem->second);
189 
190  wmb();
191  _used->idx += added;
192  }
193 
194  template<typename QUEUE_OBSERVER>
195  void finish(Head_desc &d, QUEUE_OBSERVER *o, l4_uint32_t len = 0)
196  {
197  consumed(d, len);
198  o->notify_queue(this);
199  d._d = 0;
200  }
201 
202  template<typename ITER, typename QUEUE_OBSERVER>
203  void finish(ITER const &begin, ITER const &end, QUEUE_OBSERVER *o)
204  {
205  consumed(begin, end);
206  o->notify_queue(this);
207  }
208 
209  /**
210  * \brief Set the 'no notify' flag for this queue.
211  *
212  * This function may be called on a disabled queue.
213  */
214  void disable_notify()
215  {
216  if (L4_LIKELY(ready()))
217  _used->flags.no_notify() = 1;
218  }
219 
220  /**
221  * \brief Clear the 'no notify' flag for this queue.
222  *
223  * This function may be called on a disabled queue.
224  */
225  void enable_notify()
226  {
227  if (L4_LIKELY(ready()))
228  _used->flags.no_notify() = 0;
229  }
230 
231  /**
232  * \brief Get a descriptor from the descriptor list.
233  * \param idx the index of the descriptor.
234  * \pre \a idx < \a num
235  * \pre queue must be in working state
236  */
237  Desc const *desc(unsigned idx) const
238  { return _desc + idx; }
239 
240 };
241 
242 /**
243  * \brief Abstract data buffer.
244  */
245 struct Data_buffer
246 {
247  char *pos; ///< Current buffer position
248  l4_uint32_t left; ///< Bytes left in buffer
249 
250  Data_buffer() = default;
251 
252  /**
253  * \brief Create buffer for object \a p.
254  * \tparam T type of object (implicit)
255  * \param p pointer to object.
256  *
257  * The buffer shall point to the start of the object \a p and the size left
258  * is sizeof(T).
259  */
260  template<typename T>
261  explicit Data_buffer(T *p)
262  : pos(reinterpret_cast<char *>(p)), left(sizeof(T))
263  {}
264 
265  /**
266  * \brief Set buffer for object \a p.
267  * \tparam T type of object (implicit)
268  * \param p pointer to object.
269  *
270  * The buffer shall point to the start of the object \a p and the size left
271  * is sizeof(T).
272  */
273  template<typename T>
274  void set(T *p)
275  {
276  pos = reinterpret_cast<char *>(p);
277  left = sizeof(T);
278  }
279 
280  /**
281  * \brief Copy contents from this buffer to the destination buffer.
282  * \param dst Destination buffer.
283  * \return the number of bytes copied.
284  *
285  * This function copies the maximum number of bytes from this to
286  * \a dst.
287  */
288  l4_uint32_t copy_to(Data_buffer *dst)
289  {
290  unsigned long bytes = cxx::min(left, dst->left);
291  memcpy(dst->pos, pos, bytes);
292  left -= bytes;
293  pos += bytes;
294  dst->left -= bytes;
295  dst->pos += bytes;
296  return bytes;
297  }
298 
299  /**
300  * \brief Skip given number of bytes in this buffer.
301  * \param bytes Number of bytes that shall be skipped.
302  * \return The number of bytes skipped.
303  *
304  * Try to skip the given number of bytes in this buffer, if there are less
305  * bytes left in the buffer that given then at most left bytes are skipped
306  * and the amount is returned.
307  */
308  l4_uint32_t skip(l4_uint32_t bytes)
309  {
310  unsigned long b = cxx::min(left, bytes);
311  left -= b;
312  pos += b;
313  return b;
314  }
315 
316  /**
317  * \brief Check if there are no more bytes left in the buffer.
318  * \return true if there are no more bytes left in the buffer.
319  */
320  bool done() const
321  { return left == 0; }
322 };
323 
324 class Request_processor;
325 
326 /**
327  * \brief Exception used by Queue to indicate descriptor errors.
328  */
329 struct Bad_descriptor
330 {
331  /// The error code
332  enum Error
333  {
334  Bad_address, ///< Address cannot be translated
335  Bad_rights, ///< Missing access rights on memory
336  Bad_flags, ///< Invalid combination of descriptor flags
337  Bad_next, ///< Invalid next index
338  Bad_size ///< Invalid size of memory block
339  };
340 
341  /// The processor that triggered the exception
342  Request_processor const *proc;
343 
344  // The error code
345  Error error;
346 
347  /**
348  * \brief Make a bad descriptor exception.
349  * \param proc The request processor causing the exception
350  * \param e The error code.
351  */
352  Bad_descriptor(Request_processor const *proc, Error e)
353  : proc(proc), error(e)
354  {}
355 
356  /**
357  * Get a human readable description of the error code.
358  *
359  * \return Message describing the error.
360  */
361  char const *message() const
362  {
363  char const *const err[] = {
364  [Bad_address] = "Descriptor address cannot be translated",
365  [Bad_rights] = "Insufficient memory access rights",
366  [Bad_flags] = "Invalid descriptor flags",
367  [Bad_next] = "The descriptor's `next` index is invalid",
368  [Bad_size] = "Invalid size of the memory block"
369  };
370 
371  if (error >= (sizeof(err) / sizeof(err[0])))
372  return "Unknown error";
373 
374  return err[error];
375  }
376 };
377 
378 
379 /**
380  * \brief Encapsulate the state for processing a VIRTIO request.
381  *
382  * A VIRTIO request is a possibly chained list of descriptors retrieved from
383  * the available ring of a virtqueue, using Virtqueue::next_avail().
384  *
385  * The descriptor processing depends on helper (DESC_MAN) for interpreting the
386  * descriptors in the context of the device implementation.
387  *
388  * DESC_MAN has to provide the functionality to safely dereference a
389  * descriptor from a descriptor list.
390  *
391  * The following methods must be provided by DESC_MAN:
392  * * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
393  * Request_processor const *proc,
394  * Virtqueue::Desc const **table) \endcode
395  * This function is used to dereference \a desc as an indirect descriptor
396  * table, and must return a pointer to an indirect descriptor table.
397  * * \code DESC_MAN::load_desc(Virtqueue::Desc const &desc,
398  * Request_processor const *proc, ...) \endcode
399  * This function is used to dereference a descriptor as a normal data
400  * buffer, and '...' are the arguments that are passed to start() and next().
401  */
402 class Request_processor
403 {
404 private:
405  /// pointer to descriptor table (may point to an indirect table)
406  Virtqueue::Desc const *_table;
407 
408  /// currently processed descriptor
409  Virtqueue::Desc _current;
410 
411  /// number of entries in the current descriptor table (_table)
412  l4_uint16_t _num;
413 
414 public:
415  /**
416  * Start processing a new request.
417  *
418  * \tparam DESCM_MAN Type of descriptor manager (implicit).
419  * \param dm Descriptor manager that is used to translate VIRTIO
420  * descriptor addresses.
421  * \param ring VIRTIO ring of the request.
422  * \param request VIRTIO request from Virtqueue::next_avail()
423  * \param args Extra arguments passed to dm->load_desc()
424  *
425  * \pre The given request must be valid.
426  *
427  * \throws Bad_descriptor The descriptor has an invalid size or load_desc()
428  * has thrown an exception by itself.
429  */
430  template<typename DESC_MAN, typename ...ARGS>
431  void start(DESC_MAN *dm, Virtqueue *ring, Virtqueue::Head_desc const &request, ARGS... args)
432  {
433  _current = cxx::access_once(request.desc());
434 
435  if (_current.flags.indirect())
436  {
437  dm->load_desc(_current, this, &_table);
438  _num = _current.len / sizeof(Virtqueue::Desc);
439  if (L4_UNLIKELY(!_num))
440  throw Bad_descriptor(this, Bad_descriptor::Bad_size);
441 
442  _current = cxx::access_once(_table);
443  }
444  else
445  {
446  _table = ring->desc(0);
447  _num = ring->num();
448  }
449 
450  dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
451  }
452 
453  /**
454  * \brief Start processing a new request.
455  * \tparam DESCM_MAN Type of descriptor manager (implicit).
456  * \param dm Descriptor manager that is used to translate VIRTIO descriptor addresses.
457  * \param request VIRTIO request from Virtqueue::next_avail()
458  * \param args Extra arguments passed to dm->load_desc()
459  * \pre The given request must be valid.
460  */
461  template<typename DESC_MAN, typename ...ARGS>
462  Virtqueue::Request const &start(DESC_MAN *dm, Virtqueue::Request const &request, ARGS... args)
463  {
464  start(dm, request.ring, request, cxx::forward<ARGS>(args)...);
465  return request;
466  }
467 
468  /**
469  * \brief Get the flags of the currently processed descriptor.
470  * \return The flags of the currently processed descriptor.
471  */
472  Virtqueue::Desc::Flags current_flags() const
473  { return _current.flags; }
474 
475  /**
476  * \brief Are there more chained descriptors ?
477  * \return true if there are more chained descriptors in the current request.
478  */
479  bool has_more() const
480  { return _current.flags.next(); }
481 
482  /**
483  * Switch to the next descriptor in a descriptor chain.
484  *
485  * \tparam DESCM_MAN Type of descriptor manager (implicit).
486  * \param dm Descriptor manager that is used to translate VIRTIO
487  * descriptor addresses.
488  * \param args Extra arguments passed to dm->load_desc()
489  *
490  * \retval true A next descriptor is available.
491  * \retval false No descriptor available.
492  *
493  * \throws Bad_descriptor The `next` index of this descriptor is invalid.
494  */
495  template<typename DESC_MAN, typename ...ARGS>
496  bool next(DESC_MAN *dm, ARGS... args)
497  {
498  if (!_current.flags.next())
499  return false;
500 
501  if (L4_UNLIKELY(_current.next >= _num))
502  throw Bad_descriptor(this, Bad_descriptor::Bad_next);
503 
504  _current = cxx::access_once(_table + _current.next);
505 
506  if (0) // we ignore this for performance reasons
507  if (L4_UNLIKELY(_current.flags.indirect()))
508  throw Bad_descriptor(this, Bad_descriptor::Bad_flags);
509 
510  // must throw an exception in case of a bad descriptor
511  dm->load_desc(_current, this, cxx::forward<ARGS>(args)...);
512  return true;
513  }
514 };
515 
516 }
517 }
518