ReactOS  0.4.15-dev-1054-gd029a62
VirtIORing-Packed.c
Go to the documentation of this file.
1 /*
2  * Packed virtio ring manipulation routines
3  *
4  * Copyright 2019 Red Hat, Inc.
5  *
6  * Authors:
7  * Yuri Benditovich <ybendito@redhat.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met :
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in the
16  * documentation and / or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of their contributors
18  * may be used to endorse or promote products derived from this software
19  * without specific prior written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include "osdep.h"
34 #include "virtio_pci.h"
35 #include "VirtIO.h"
36 #include "kdebugprint.h"
37 #include "virtio_ring.h"
39 
40 #include <pshpack1.h>
41 
43  /* Descriptor Ring Change Event Offset/Wrap Counter. */
45  /* Descriptor Ring Change Event Flags. */
47 };
48 
50  /* Buffer Address. */
52  /* Buffer Length. */
54  /* Buffer ID. */
56  /* The flags depending on descriptor type. */
58 };
59 
60 #include <poppack.h>
61 
62 #define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }}
63 #define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
64 
65 /* This marks a buffer as continuing via the next field. */
66 #define VRING_DESC_F_NEXT 1
67 /* This marks a buffer as write-only (otherwise read-only). */
68 #define VRING_DESC_F_WRITE 2
69 /* This means the buffer contains a list of buffer descriptors. */
70 #define VRING_DESC_F_INDIRECT 4
71 
72 /*
73  * Mark a descriptor as available or used in packed ring.
74  * Notice: they are defined as shifts instead of shifted values.
75  */
76 #define VRING_PACKED_DESC_F_AVAIL 7
77 #define VRING_PACKED_DESC_F_USED 15
78 
79 /* Enable events in packed ring. */
80 #define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
81 /* Disable events in packed ring. */
82 #define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
83 
84 /*
85  * Enable events for a specific descriptor in packed ring.
86  * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
87  * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
88  */
89 #define VRING_PACKED_EVENT_FLAG_DESC 0x2
90  /*
91  * Wrap counter bit shift in event suppression structure
92  * of packed ring.
93  */
94 #define VRING_PACKED_EVENT_F_WRAP_CTR 15
95 
96 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
97 /* Assuming a given event_idx value from the other side, if
98  * we have just incremented index from old to new_idx,
99  * should we trigger an event?
100  */
101 static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
102 {
103  /* Note: Xen has similar logic for notification hold-off
104  * in include/xen/interface/io/ring.h with req_event and req_prod
105  * corresponding to event_idx + 1 and new_idx respectively.
106  * Note also that req_event and req_prod in Xen start at 1,
107  * event indexes in virtio start at 0. */
108  return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
109 }
110 
112  void *data; /* Data for callback. */
113  u16 num; /* Descriptor list length. */
114  u16 next; /* The next desc state in a list. */
115  u16 last; /* The last desc state in a list. */
116 };
117 
119  struct virtqueue vq;
120  /* Number we've added since last sync. */
121  unsigned int num_added;
122  /* Head of free buffer list. */
123  unsigned int free_head;
124  /* Number of free descriptors */
125  unsigned int num_free;
126  /* Last used index we've seen. */
128  /* Avail used flags. */
130  struct
131  {
132  /* Driver ring wrap counter. */
134  /* Device ring wrap counter. */
136  /* Index of the next avail descriptor. */
138  /*
139  * Last written value to driver->flags in
140  * guest byte order.
141  */
143  struct {
144  unsigned int num;
148  } vring;
149  /* Per-descriptor state. */
151  } packed;
153 };
154 
155 #define packedvq(vq) ((struct virtqueue_packed *)vq)
156 
158 {
159  return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
160 }
161 
162 unsigned long vring_size_packed(unsigned int num, unsigned long align)
163 {
164  /* array of descriptors */
165  unsigned long res = num * sizeof(struct vring_packed_desc);
166  /* driver and device event */
167  res += 2 * sizeof(struct vring_packed_desc_event);
168  return res;
169 }
170 
172  struct virtqueue *_vq, /* the queue */
173  struct scatterlist sg[], /* sg array of length out + in */
174  unsigned int out, /* number of driver->device buffer descriptors in sg */
175  unsigned int in, /* number of device->driver buffer descriptors in sg */
176  void *opaque, /* later returned from virtqueue_get_buf */
177  void *va_indirect, /* VA of the indirect page or NULL */
178  ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
179 {
180  struct virtqueue_packed *vq = packedvq(_vq);
181  unsigned int descs_used;
182  struct vring_packed_desc *desc;
183  u16 head, id, i;
184 
185  descs_used = out + in;
186  head = vq->packed.next_avail_idx;
187  id = (u16)vq->free_head;
188 
189  BUG_ON(descs_used == 0);
190  BUG_ON(id >= vq->packed.vring.num);
191 
192  if (va_indirect && vq->num_free > 0) {
193  desc = va_indirect;
194  for (i = 0; i < descs_used; i++) {
195  desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
196  desc[i].addr = sg[i].physAddr.QuadPart;
197  desc[i].len = sg[i].length;
198  }
199  vq->packed.vring.desc[head].addr = phys_indirect;
200  vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
201  vq->packed.vring.desc[head].id = id;
202 
203  KeMemoryBarrier();
204  vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
205 
206  DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
207  head++;
208  if (head >= vq->packed.vring.num) {
209  head = 0;
210  vq->packed.avail_wrap_counter ^= 1;
211  vq->avail_used_flags ^=
214  }
215  vq->packed.next_avail_idx = head;
216  /* We're using some buffers from the free list. */
217  vq->num_free -= 1;
218  vq->num_added += 1;
219 
220  vq->free_head = vq->packed.desc_state[id].next;
221 
222  /* Store token and indirect buffer state. */
223  vq->packed.desc_state[id].num = 1;
224  vq->packed.desc_state[id].data = opaque;
225  vq->packed.desc_state[id].last = id;
226 
227  } else {
228  unsigned int n;
229  u16 curr, prev, head_flags;
230  if (vq->num_free < descs_used) {
231  DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
232  return -ENOSPC;
233  }
234  desc = vq->packed.vring.desc;
235  i = head;
236  curr = id;
237  for (n = 0; n < descs_used; n++) {
238  u16 flags = vq->avail_used_flags;
239  flags |= n < out ? 0 : VRING_DESC_F_WRITE;
240  if (n != descs_used - 1) {
242  }
243  desc[i].addr = sg[n].physAddr.QuadPart;
244  desc[i].len = sg[n].length;
245  desc[i].id = id;
246  if (n == 0) {
247  head_flags = flags;
248  }
249  else {
250  desc[i].flags = flags;
251  }
252 
253  prev = curr;
254  curr = vq->packed.desc_state[curr].next;
255 
256  if (++i >= vq->packed.vring.num) {
257  i = 0;
258  vq->avail_used_flags ^=
261  }
262  }
263 
264  if (i < head)
265  vq->packed.avail_wrap_counter ^= 1;
266 
267  /* We're using some buffers from the free list. */
268  vq->num_free -= descs_used;
269 
270  /* Update free pointer */
271  vq->packed.next_avail_idx = i;
272  vq->free_head = curr;
273 
274  /* Store token. */
275  vq->packed.desc_state[id].num = (u16)descs_used;
276  vq->packed.desc_state[id].data = opaque;
277  vq->packed.desc_state[id].last = prev;
278 
279  /*
280  * A driver MUST NOT make the first descriptor in the list
281  * available before all subsequent descriptors comprising
282  * the list are made available.
283  */
284  KeMemoryBarrier();
285  vq->packed.vring.desc[head].flags = head_flags;
286  vq->num_added += descs_used;
287 
288  DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
289  }
290 
291  return 0;
292 }
293 
294 static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
295 {
296  struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
297 
298  /* Clear data ptr. */
299  state->data = NULL;
300 
301  vq->packed.desc_state[state->last].next = (u16)vq->free_head;
302  vq->free_head = id;
303  vq->num_free += state->num;
304 }
305 
307 {
308  struct virtqueue_packed *vq = packedvq(_vq);
309  unsigned int i;
310  void *buf;
311 
312  for (i = 0; i < vq->packed.vring.num; i++) {
313  if (!vq->packed.desc_state[i].data)
314  continue;
315  /* detach_buf clears data, so grab it now. */
316  buf = vq->packed.desc_state[i].data;
318  return buf;
319  }
320  /* That should have freed everything. */
321  BUG_ON(vq->num_free != vq->packed.vring.num);
322 
323  return NULL;
324 }
325 
326 static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
327 {
328  struct virtqueue_packed *vq = packedvq(_vq);
329 
330  if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
331  vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
332  vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
333  }
334 }
335 
336 static inline bool is_used_desc_packed(const struct virtqueue_packed *vq,
337  u16 idx, bool used_wrap_counter)
338 {
339  bool avail, used;
340  u16 flags;
341 
342  flags = vq->packed.vring.desc[idx].flags;
343  avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
344  used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
345 
346  return avail == used && used == used_wrap_counter;
347 }
348 
349 static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
350 {
351  bool wrap_counter;
352  u16 used_idx;
353  KeMemoryBarrier();
354 
355  wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
356  used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
357 
358  return is_used_desc_packed(vq, used_idx, wrap_counter);
359 
360 }
361 
363 {
364  bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
365  /*
366  * We optimistically turn back on interrupts, then check if there was
367  * more to do.
368  */
369 
370  if (event_suppression_enabled) {
371  vq->packed.vring.driver->off_wrap =
372  vq->last_used_idx |
373  (vq->packed.used_wrap_counter <<
375  /*
376  * We need to update event offset and event wrap
377  * counter first before updating event flags.
378  */
379  KeMemoryBarrier();
380  }
381 
382  if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
383  vq->packed.event_flags_shadow = event_suppression_enabled ?
386  vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
387  }
388 
389  return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
391 }
392 
393 static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
394 {
395  struct virtqueue_packed *vq = packedvq(_vq);
397 
399 }
400 
402 {
403  struct virtqueue_packed *vq = packedvq(_vq);
404  bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
405  u16 used_idx, wrap_counter;
406  u16 bufs;
407 
408  /*
409  * We optimistically turn back on interrupts, then check if there was
410  * more to do.
411  */
412 
413  if (event_suppression_enabled) {
414  /* TODO: tune this threshold */
415  bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
416  wrap_counter = vq->packed.used_wrap_counter;
417 
418  used_idx = vq->last_used_idx + bufs;
419  if (used_idx >= vq->packed.vring.num) {
420  used_idx -= (u16)vq->packed.vring.num;
421  wrap_counter ^= 1;
422  }
423 
424  vq->packed.vring.driver->off_wrap = used_idx |
425  (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
426 
427  /*
428  * We need to update event offset and event wrap
429  * counter first before updating event flags.
430  */
431  KeMemoryBarrier();
432  }
433 
434  if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
435  vq->packed.event_flags_shadow = event_suppression_enabled ?
438  vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
439  }
440 
441  /*
442  * We need to update event suppression structure first
443  * before re-checking for more used buffers.
444  */
445  KeMemoryBarrier();
446 
448  vq->last_used_idx,
449  vq->packed.used_wrap_counter)) {
450  return false;
451  }
452 
453  return true;
454 }
455 
457 {
458  struct virtqueue_packed *vq = packedvq(_vq);
459  return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
460 }
461 
462 static void virtqueue_shutdown_packed(struct virtqueue *_vq)
463 {
464  struct virtqueue_packed *vq = packedvq(_vq);
465  unsigned int num = vq->packed.vring.num;
466  void *pages = vq->packed.vring.desc;
467  unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
468 
469  RtlZeroMemory(pages, vring_size_packed(num, vring_align));
471  _vq->index,
472  num,
473  vring_align,
474  _vq->vdev,
475  pages,
476  _vq->notification_cb,
477  _vq);
478 }
479 
480 static inline bool more_used_packed(const struct virtqueue_packed *vq)
481 {
482  return is_used_desc_packed(vq, vq->last_used_idx,
483  vq->packed.used_wrap_counter);
484 }
485 
487  struct virtqueue *_vq, /* the queue */
488  unsigned int *len) /* number of bytes returned by the device */
489 {
490  struct virtqueue_packed *vq = packedvq(_vq);
491  u16 last_used, id;
492  void *ret;
493 
494  if (!more_used_packed(vq)) {
495  DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
496  return NULL;
497  }
498 
499  /* Only get used elements after they have been exposed by host. */
500  KeMemoryBarrier();
501 
502  last_used = vq->last_used_idx;
503  id = vq->packed.vring.desc[last_used].id;
504  *len = vq->packed.vring.desc[last_used].len;
505 
506  if (id >= vq->packed.vring.num) {
507  BAD_RING(vq, "id %u out of range\n", id);
508  return NULL;
509  }
510  if (!vq->packed.desc_state[id].data) {
511  BAD_RING(vq, "id %u is not a head!\n", id);
512  return NULL;
513  }
514 
515  /* detach_buf_packed clears data, so grab it now. */
516  ret = vq->packed.desc_state[id].data;
517  detach_buf_packed(vq, id);
518 
519  vq->last_used_idx += vq->packed.desc_state[id].num;
520  if (vq->last_used_idx >= vq->packed.vring.num) {
521  vq->last_used_idx -= (u16)vq->packed.vring.num;
522  vq->packed.used_wrap_counter ^= 1;
523  }
524 
525  /*
526  * If we expect an interrupt for the next entry, tell host
527  * by writing event index and flush out the write before
528  * the read in the next get_buf call.
529  */
530  if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
531  vq->packed.vring.driver->off_wrap = vq->last_used_idx |
532  ((u16)vq->packed.used_wrap_counter <<
534  KeMemoryBarrier();
535  }
536 
537  return ret;
538 }
539 
541 {
542  struct virtqueue_packed *vq = packedvq(_vq);
543  return more_used_packed(vq);
544 }
545 
546 static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
547 {
548  struct virtqueue_packed *vq = packedvq(_vq);
549  u16 new, old, off_wrap, flags, wrap_counter, event_idx;
550  bool needs_kick;
551  union {
552  struct {
553  __le16 off_wrap;
554  __le16 flags;
555  };
556  u32 value32;
557  } snapshot;
558 
559  /*
560  * We need to expose the new flags value before checking notification
561  * suppressions.
562  */
563  KeMemoryBarrier();
564 
565  old = vq->packed.next_avail_idx - vq->num_added;
566  new = vq->packed.next_avail_idx;
567  vq->num_added = 0;
568 
569  snapshot.value32 = *(u32 *)vq->packed.vring.device;
570  flags = snapshot.flags;
571 
573  needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
574  goto out;
575  }
576 
577  off_wrap = snapshot.off_wrap;
578 
579  wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
580  event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
581  if (wrap_counter != vq->packed.avail_wrap_counter)
582  event_idx -= (u16)vq->packed.vring.num;
583 
584  needs_kick = vring_need_event(event_idx, new, old);
585 out:
586  return needs_kick;
587 }
588 
589 static void virtqueue_kick_always_packed(struct virtqueue *_vq)
590 {
591  struct virtqueue_packed *vq = packedvq(_vq);
592  KeMemoryBarrier();
593  vq->num_added = 0;
594  virtqueue_notify(_vq);
595 }
596 
597 /* Initializes a new virtqueue using already allocated memory */
599  unsigned int index, /* virtqueue index */
600  unsigned int num, /* virtqueue size (always a power of 2) */
601  unsigned int vring_align, /* vring alignment requirement */
602  VirtIODevice *vdev, /* the virtio device owning the queue */
603  void *pages, /* vring memory */
604  void(*notify)(struct virtqueue *), /* notification callback */
605  void *control) /* virtqueue memory */
606 {
607  struct virtqueue_packed *vq = packedvq(control);
608  unsigned int i;
609 
610  vq->vq.vdev = vdev;
611  vq->vq.notification_cb = notify;
612  vq->vq.index = index;
613 
614  vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
615  vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
616 
617  /* initialize the ring */
618  vq->packed.vring.num = num;
619  vq->packed.vring.desc = pages;
620  vq->packed.vring.driver = vq->vq.avail_va;
621  vq->packed.vring.device = vq->vq.used_va;
622 
623  vq->num_free = num;
624  vq->free_head = 0;
625  vq->num_added = 0;
626  vq->packed.avail_wrap_counter = 1;
627  vq->packed.used_wrap_counter = 1;
628  vq->last_used_idx = 0;
629  vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
630  vq->packed.next_avail_idx = 0;
631  vq->packed.event_flags_shadow = 0;
632  vq->packed.desc_state = vq->desc_states;
633 
634  RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
635  for (i = 0; i < num - 1; i++) {
636  vq->packed.desc_state[i].next = i + 1;
637  }
638 
639  vq->vq.add_buf = virtqueue_add_buf_packed;
640  vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
641  vq->vq.disable_cb = virtqueue_disable_cb_packed;
642  vq->vq.enable_cb = virtqueue_enable_cb_packed;
643  vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
644  vq->vq.get_buf = virtqueue_get_buf_packed;
645  vq->vq.has_buf = virtqueue_has_buf_packed;
646  vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
647  vq->vq.kick_always = virtqueue_kick_always_packed;
648  vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
649  vq->vq.shutdown = virtqueue_shutdown_packed;
650  return &vq->vq;
651 }
unsigned long vring_size_packed(unsigned int num, unsigned long align)
#define VRING_PACKED_EVENT_FLAG_DESC
struct vring_packed_desc * desc
struct virtqueue * vring_new_virtqueue_packed(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
void(* notification_cb)(struct virtqueue *vq)
Definition: VirtIO.h:46
static bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
const GLenum * bufs
Definition: glext.h:6026
ActualNumberDriverObjects * sizeof(PDRIVER_OBJECT)) PDRIVER_OBJECT *DriverObjectList
struct outqueuenode * head
Definition: adnsresfilter.c:66
struct virtqueue vq
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glext.h:7751
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
int notify
Definition: msacm.c:1365
struct vring_desc_state_packed desc_states[]
GLdouble n
Definition: glext.h:7729
#define SMP_CACHE_BYTES
Definition: osdep.h:39
static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
static void * virtqueue_get_buf_packed(struct virtqueue *_vq, unsigned int *len)
int align(int length, int align)
Definition: dsound8.c:36
unsigned int num_added
unsigned int index
Definition: VirtIO.h:45
ULONG32 u32
Definition: btrfs.h:14
static int avail
Definition: adh-main.c:39
struct snapshot snapshot
#define VRING_PACKED_DESC_F_AVAIL
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
static void * virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
static const WCHAR desc[]
Definition: protectdata.c:36
unsigned int idx
Definition: utils.c:41
#define DPrintf(Level, Fmt)
Definition: kdebugprint.h:61
Definition: arc.h:49
unsigned char BOOLEAN
#define __le16
Definition: types.h:39
smooth NULL
Definition: ftsmooth.c:416
#define VRING_DESC_F_NEXT
#define BUG_ON(condition)
#define packedvq(vq)
static int virtqueue_add_buf_packed(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *opaque, void *va_indirect, ULONGLONG phys_indirect)
unsigned int free_head
GLuint index
Definition: glext.h:6031
static bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
c used
Definition: write.c:2857
#define VRING_PACKED_DESC_F_USED
struct vring_packed_desc_event * driver
void virtqueue_notify(struct virtqueue *vq)
unsigned short __u16
Definition: compat.h:89
struct virtqueue_packed::@998 packed
if(!(yy_init))
Definition: macro.lex.yy.c:714
struct vring_desc_state_packed * desc_state
#define BAD_RING(vq, fmt,...)
uint64_t ULONGLONG
Definition: typedefs.h:67
static FILE * out
Definition: regtests2xml.c:44
GLuint GLuint num
Definition: glext.h:9618
static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
UCHAR u8
Definition: btrfs.h:12
static bool is_used_desc_packed(const struct virtqueue_packed *vq, u16 idx, bool used_wrap_counter)
#define u16
Definition: types.h:8
GLbitfield flags
Definition: glext.h:7161
#define VRING_PACKED_EVENT_F_WRAP_CTR
int ret
#define index(s, c)
Definition: various.h:29
static int state
Definition: maze.c:121
GLenum GLsizei len
Definition: glext.h:6722
#define PAGE_SIZE
Definition: env_spec_w32.h:49
unsigned int num_free
struct vring_packed_desc_event * device
static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
#define VRING_DESC_F_INDIRECT
GLuint in
Definition: glext.h:9616
static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
#define VRING_PACKED_EVENT_FLAG_ENABLE
#define scatterlist
Definition: VirtIO.h:6
GLuint res
Definition: glext.h:9613
VirtIODevice * vdev
Definition: VirtIO.h:44
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
#define VRING_DESC_F_WRITE
GLenum GLuint id
Definition: glext.h:5579
static bool more_used_packed(const struct virtqueue_packed *vq)
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
FORCEINLINE VOID KeMemoryBarrier(VOID)
Definition: ke.h:58
ULONG_PTR addr
Definition: virtio_pci.h:239
USHORT u16
Definition: btrfs.h:13
#define __FUNCTION__
Definition: types.h:112
struct virtqueue_packed::@998::@999 vring
#define VRING_PACKED_EVENT_FLAG_DISABLE
__u64 __bitwise__ __virtio64
Definition: virtio_types.h:45
static void virtqueue_kick_always_packed(struct virtqueue *_vq)
#define __le32
Definition: types.h:40
static void virtqueue_shutdown_packed(struct virtqueue *_vq)
unsigned int vring_control_block_size_packed(u16 qsize)