ReactOS  0.4.15-dev-1197-g8081ba9
VirtIORing.c
Go to the documentation of this file.
1 /*
2  * Virtio ring manipulation routines
3  *
4  * Copyright 2017 Red Hat, Inc.
5  *
6  * Authors:
7  * Ladi Prosek <lprosek@redhat.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met :
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in the
16  * documentation and / or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of their contributors
18  * may be used to endorse or promote products derived from this software
19  * without specific prior written permission.
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 #include "osdep.h"
33 #include "virtio_pci.h"
34 #include "VirtIO.h"
35 #include "kdebugprint.h"
36 #include "virtio_ring.h"
38 
39 #define DESC_INDEX(num, i) ((i) & ((num) - 1))
40 
41  /* This marks a buffer as continuing via the next field. */
42 #define VIRTQ_DESC_F_NEXT 1
43 /* This marks a buffer as write-only (otherwise read-only). */
44 #define VIRTQ_DESC_F_WRITE 2
45 /* This means the buffer contains a list of buffer descriptors. */
46 #define VIRTQ_DESC_F_INDIRECT 4
47 
48 /* The Host uses this in used->flags to advise the Guest: don't kick me when
49 * you add a buffer. It's unreliable, so it's simply an optimization. Guest
50 * will still kick if it's out of buffers. */
51 #define VIRTQ_USED_F_NO_NOTIFY 1
52 /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
53 * when you consume a buffer. It's unreliable, so it's simply an
54 * optimization. */
55 #define VIRTQ_AVAIL_F_NO_INTERRUPT 1
56 
57 #pragma warning (push)
58 #pragma warning (disable:4200)
59 
60 #include <pshpack1.h>
61 
62 /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
63 struct vring_desc {
64  /* Address (guest-physical). */
66  /* Length. */
68  /* The flags as indicated above. */
70  /* We chain unused descriptors via this, too */
72 };
73 
74 struct vring_avail {
78 };
79 
80 /* u32 is used here for ids for padding reasons. */
82  /* Index of start of used descriptor chain. */
84  /* Total length of the descriptor chain which was used (written to) */
86 };
87 
88 struct vring_used {
92 };
93 
94 #include <poppack.h>
95 
96 /* Alignment requirements for vring elements.
97 * When using pre-virtio 1.0 layout, these fall out naturally.
98 */
99 #define VRING_AVAIL_ALIGN_SIZE 2
100 #define VRING_USED_ALIGN_SIZE 4
101 #define VRING_DESC_ALIGN_SIZE 16
102 
103 /* The standard layout for the ring is a continuous chunk of memory which looks
104 * like this. We assume num is a power of 2.
105 *
106 * struct vring
107 * {
108 * // The actual descriptors (16 bytes each)
109 * struct vring_desc desc[num];
110 *
111 * // A ring of available descriptor heads with free-running index.
112 * __virtio16 avail_flags;
113 * __virtio16 avail_idx;
114 * __virtio16 available[num];
115 * __virtio16 used_event_idx;
116 *
117 * // Padding to the next align boundary.
118 * char pad[];
119 *
120 * // A ring of used descriptor heads with free-running index.
121 * __virtio16 used_flags;
122 * __virtio16 used_idx;
123 * struct vring_used_elem used[num];
124 * __virtio16 avail_event_idx;
125 * };
126 */
127 /* We publish the used event index at the end of the available ring, and vice
128 * versa. They are at the end for backwards compatibility. */
129 
130 struct vring {
131  unsigned int num;
132 
133  struct vring_desc *desc;
134 
136 
137  struct vring_used *used;
138 };
139 
140 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
141 #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
142 
143 static inline void vring_init(struct vring *vr, unsigned int num, void *p,
144  unsigned long align)
145 {
146  vr->num = num;
147  vr->desc = (struct vring_desc *)p;
148  vr->avail = (struct vring_avail *)((__u8 *)p + num * sizeof(struct vring_desc));
149  vr->used = (struct vring_used *)(((ULONG_PTR)&vr->avail->ring[num] + sizeof(__virtio16)
150  + align - 1) & ~((ULONG_PTR)align - 1));
151 }
152 
153 static inline unsigned vring_size_split(unsigned int num, unsigned long align)
154 {
155 #pragma warning (push)
156 #pragma warning (disable:4319)
157  return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
158  + align - 1) & ~(align - 1))
159  + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
160 #pragma warning(pop)
161 }
162 
163 /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
164 /* Assuming a given event_idx value from the other side, if
165 * we have just incremented index from old to new_idx,
166 * should we trigger an event? */
167 static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
168 {
169  /* Note: Xen has similar logic for notification hold-off
170  * in include/xen/interface/io/ring.h with req_event and req_prod
171  * corresponding to event_idx + 1 and new_idx respectively.
172  * Note also that req_event and req_prod in Xen start at 1,
173  * event indexes in virtio start at 0. */
174  return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
175 }
176 
178  struct virtqueue vq;
179  struct vring vring;
180  struct {
184  unsigned int num_unused;
185  unsigned int num_added_since_kick;
188  void *opaque[];
189 };
190 
191 #define splitvq(vq) ((struct virtqueue_split *)vq)
192 
193 #pragma warning (pop)
194 
195  /* Returns the index of the first unused descriptor */
196 static inline u16 get_unused_desc(struct virtqueue_split *vq)
197 {
198  u16 idx = vq->first_unused;
199  ASSERT(vq->num_unused > 0);
200 
201  vq->first_unused = vq->vring.desc[idx].next;
202  vq->num_unused--;
203  return idx;
204 }
205 
206 /* Marks the descriptor chain starting at index idx as unused */
207 static inline void put_unused_desc_chain(struct virtqueue_split *vq, u16 idx)
208 {
209  u16 start = idx;
210 
211  vq->opaque[idx] = NULL;
212  while (vq->vring.desc[idx].flags & VIRTQ_DESC_F_NEXT) {
213  idx = vq->vring.desc[idx].next;
214  vq->num_unused++;
215  }
216 
217  vq->vring.desc[idx].flags = VIRTQ_DESC_F_NEXT;
218  vq->vring.desc[idx].next = vq->first_unused;
219  vq->num_unused++;
220 
221  vq->first_unused = start;
222 }
223 
224 /* Adds a buffer to a virtqueue, returns 0 on success, negative number on error */
226  struct virtqueue *_vq, /* the queue */
227  struct scatterlist sg[], /* sg array of length out + in */
228  unsigned int out, /* number of driver->device buffer descriptors in sg */
229  unsigned int in, /* number of device->driver buffer descriptors in sg */
230  void *opaque, /* later returned from virtqueue_get_buf */
231  void *va_indirect, /* VA of the indirect page or NULL */
232  ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
233 {
234  struct virtqueue_split *vq = splitvq(_vq);
235  struct vring *vring = &vq->vring;
236  unsigned int i;
237  u16 idx;
238 
239  if (va_indirect && (out + in) > 1 && vq->num_unused > 0) {
240  /* Use one indirect descriptor */
241  struct vring_desc *desc = (struct vring_desc *)va_indirect;
242 
243  for (i = 0; i < out + in; i++) {
244  desc[i].flags = (i < out ? 0 : VIRTQ_DESC_F_WRITE);
245  desc[i].flags |= VIRTQ_DESC_F_NEXT;
246  desc[i].addr = sg[i].physAddr.QuadPart;
247  desc[i].len = sg[i].length;
248  desc[i].next = (u16)i + 1;
249  }
250  desc[i - 1].flags &= ~VIRTQ_DESC_F_NEXT;
251 
252  idx = get_unused_desc(vq);
253  vq->vring.desc[idx].flags = VIRTQ_DESC_F_INDIRECT;
254  vq->vring.desc[idx].addr = phys_indirect;
255  vq->vring.desc[idx].len = i * sizeof(struct vring_desc);
256 
257  vq->opaque[idx] = opaque;
258  } else {
259  u16 last_idx;
260 
261  /* Use out + in regular descriptors */
262  if (out + in > vq->num_unused) {
263  return -ENOSPC;
264  }
265 
266  /* First descriptor */
267  idx = last_idx = get_unused_desc(vq);
268  vq->opaque[idx] = opaque;
269 
270  vring->desc[idx].addr = sg[0].physAddr.QuadPart;
271  vring->desc[idx].len = sg[0].length;
272  vring->desc[idx].flags = VIRTQ_DESC_F_NEXT;
273  if (out == 0) {
274  vring->desc[idx].flags |= VIRTQ_DESC_F_WRITE;
275  }
276  vring->desc[idx].next = vq->first_unused;
277 
278  /* The rest of descriptors */
279  for (i = 1; i < out + in; i++) {
280  last_idx = get_unused_desc(vq);
281 
282  vring->desc[last_idx].addr = sg[i].physAddr.QuadPart;
283  vring->desc[last_idx].len = sg[i].length;
284  vring->desc[last_idx].flags = VIRTQ_DESC_F_NEXT;
285  if (i >= out) {
286  vring->desc[last_idx].flags |= VIRTQ_DESC_F_WRITE;
287  }
288  vring->desc[last_idx].next = vq->first_unused;
289  }
290  vring->desc[last_idx].flags &= ~VIRTQ_DESC_F_NEXT;
291  }
292 
293  /* Write the first descriptor into the available ring */
294  vring->avail->ring[DESC_INDEX(vring->num, vq->master_vring_avail.idx)] = idx;
295  KeMemoryBarrier();
296  vring->avail->idx = ++vq->master_vring_avail.idx;
297  vq->num_added_since_kick++;
298 
299  return 0;
300 }
301 
302 /* Gets the opaque pointer associated with a returned buffer, or NULL if no buffer is available */
304  struct virtqueue *_vq, /* the queue */
305  unsigned int *len) /* number of bytes returned by the device */
306 {
307  struct virtqueue_split *vq = splitvq(_vq);
308  void *opaque;
309  u16 idx;
310 
311  if (vq->last_used == (int)vq->vring.used->idx) {
312  /* No descriptor index in the used ring */
313  return NULL;
314  }
315  KeMemoryBarrier();
316 
317  idx = DESC_INDEX(vq->vring.num, vq->last_used);
318  *len = vq->vring.used->ring[idx].len;
319 
320  /* Get the first used descriptor */
321  idx = (u16)vq->vring.used->ring[idx].id;
322  opaque = vq->opaque[idx];
323 
324  /* Put all descriptors back to the free list */
326 
327  vq->last_used++;
329  vring_used_event(&vq->vring) = vq->last_used;
330  KeMemoryBarrier();
331  }
332 
333  ASSERT(opaque != NULL);
334  return opaque;
335 }
336 
337 /* Returns true if at least one returned buffer is available, false otherwise */
339 {
340  struct virtqueue_split *vq = splitvq(_vq);
341  return (vq->last_used != vq->vring.used->idx);
342 }
343 
344 /* Returns true if the device should be notified, false otherwise */
345 static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
346 {
347  struct virtqueue_split *vq = splitvq(_vq);
348  bool wrap_around;
349  u16 old, new;
350  KeMemoryBarrier();
351 
352  wrap_around = (vq->num_added_since_kick >= (1 << 16));
353 
354  old = (u16)(vq->master_vring_avail.idx - vq->num_added_since_kick);
355  new = vq->master_vring_avail.idx;
356  vq->num_added_since_kick = 0;
357 
358  if (_vq->vdev->event_suppression_enabled) {
359  return wrap_around || (bool)vring_need_event(vring_avail_event(&vq->vring), new, old);
360  } else {
361  return !(vq->vring.used->flags & VIRTQ_USED_F_NO_NOTIFY);
362  }
363 }
364 
365 /* Notifies the device even if it's not necessary according to the event suppression logic */
366 static void virtqueue_kick_always_split(struct virtqueue *_vq)
367 {
368  struct virtqueue_split *vq = splitvq(_vq);
369  KeMemoryBarrier();
370  vq->num_added_since_kick = 0;
371  virtqueue_notify(_vq);
372 }
373 
374 /* Enables interrupts on a virtqueue and returns false if the queue has at least one returned
375  * buffer available to be fetched by virtqueue_get_buf, true otherwise */
376 static bool virtqueue_enable_cb_split(struct virtqueue *_vq)
377 {
378  struct virtqueue_split *vq = splitvq(_vq);
379  if (!virtqueue_is_interrupt_enabled(_vq)) {
380  vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
381  if (!_vq->vdev->event_suppression_enabled)
382  {
383  vq->vring.avail->flags = vq->master_vring_avail.flags;
384  }
385  }
386 
387  vring_used_event(&vq->vring) = vq->last_used;
388  KeMemoryBarrier();
389  return (vq->last_used == vq->vring.used->idx);
390 }
391 
392 /* Enables interrupts on a virtqueue after ~3/4 of the currently pushed buffers have been
393  * returned, returns false if this condition currently holds, false otherwise */
395 {
396  struct virtqueue_split *vq = splitvq(_vq);
397  u16 bufs;
398 
399  if (!virtqueue_is_interrupt_enabled(_vq)) {
400  vq->master_vring_avail.flags &= ~VIRTQ_AVAIL_F_NO_INTERRUPT;
401  if (!_vq->vdev->event_suppression_enabled)
402  {
403  vq->vring.avail->flags = vq->master_vring_avail.flags;
404  }
405  }
406 
407  /* Note that 3/4 is an arbitrary threshold */
408  bufs = (u16)(vq->master_vring_avail.idx - vq->last_used) * 3 / 4;
409  vring_used_event(&vq->vring) = vq->last_used + bufs;
410  KeMemoryBarrier();
411  return ((vq->vring.used->idx - vq->last_used) <= bufs);
412 }
413 
414 /* Disables interrupts on a virtqueue */
415 static void virtqueue_disable_cb_split(struct virtqueue *_vq)
416 {
417  struct virtqueue_split *vq = splitvq(_vq);
419  vq->master_vring_avail.flags |= VIRTQ_AVAIL_F_NO_INTERRUPT;
420  if (!_vq->vdev->event_suppression_enabled)
421  {
422  vq->vring.avail->flags = vq->master_vring_avail.flags;
423  }
424  }
425 }
426 
427 /* Returns true if interrupts are enabled on a virtqueue, false otherwise */
429 {
430  struct virtqueue_split *vq = splitvq(_vq);
431  return !(vq->master_vring_avail.flags & VIRTQ_AVAIL_F_NO_INTERRUPT);
432 }
433 
434 /* Re-initializes an already initialized virtqueue */
435 static void virtqueue_shutdown_split(struct virtqueue *_vq)
436 {
437  struct virtqueue_split *vq = splitvq(_vq);
438  unsigned int num = vq->vring.num;
439  void *pages = vq->vring.desc;
440  unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
441 
442  RtlZeroMemory(pages, vring_size_split(num, vring_align));
444  _vq->index,
445  vq->vring.num,
446  vring_align,
447  _vq->vdev,
448  pages,
449  _vq->notification_cb,
450  vq);
451 }
452 
453 /* Gets the opaque pointer associated with a not-yet-returned buffer, or NULL if no buffer is available
454  * to aid drivers with cleaning up all data on virtqueue shutdown */
456 {
457  struct virtqueue_split *vq = splitvq(_vq);
458  u16 idx;
459  void *opaque = NULL;
460 
461  for (idx = 0; idx < (u16)vq->vring.num; idx++) {
462  opaque = vq->opaque[idx];
463  if (opaque) {
465  vq->vring.avail->idx = --vq->master_vring_avail.idx;
466  break;
467  }
468  }
469  return opaque;
470 }
471 
472 /* Returns the size of the virtqueue structure including
473  * additional size for per-descriptor data */
474 unsigned int vring_control_block_size(u16 qsize, bool packed)
475 {
476  unsigned int res;
477  if (packed) {
478  return vring_control_block_size_packed(qsize);
479  }
480  res = sizeof(struct virtqueue_split);
481  res += sizeof(void *) * qsize;
482  return res;
483 }
484 
485 /* Initializes a new virtqueue using already allocated memory */
487  unsigned int index, /* virtqueue index */
488  unsigned int num, /* virtqueue size (always a power of 2) */
489  unsigned int vring_align, /* vring alignment requirement */
490  VirtIODevice *vdev, /* the virtio device owning the queue */
491  void *pages, /* vring memory */
492  void(*notify)(struct virtqueue *), /* notification callback */
493  void *control) /* virtqueue memory */
494 {
495  struct virtqueue_split *vq = splitvq(control);
496  u16 i;
497 
498  if (DESC_INDEX(num, num) != 0) {
499  DPrintf(0, "Virtqueue length %u is not a power of 2\n", num);
500  return NULL;
501  }
502 
503  RtlZeroMemory(vq, sizeof(*vq) + num * sizeof(void *));
504 
505  vring_init(&vq->vring, num, pages, vring_align);
506  vq->vq.vdev = vdev;
507  vq->vq.notification_cb = notify;
508  vq->vq.index = index;
509 
510  /* Build a linked list of unused descriptors */
511  vq->num_unused = num;
512  vq->first_unused = 0;
513  for (i = 0; i < num - 1; i++) {
514  vq->vring.desc[i].flags = VIRTQ_DESC_F_NEXT;
515  vq->vring.desc[i].next = i + 1;
516  }
517  vq->vq.avail_va = vq->vring.avail;
518  vq->vq.used_va = vq->vring.used;
519  vq->vq.add_buf = virtqueue_add_buf_split;
520  vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_split;
521  vq->vq.disable_cb = virtqueue_disable_cb_split;
522  vq->vq.enable_cb = virtqueue_enable_cb_split;
523  vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_split;
524  vq->vq.get_buf = virtqueue_get_buf_split;
525  vq->vq.has_buf = virtqueue_has_buf_split;
526  vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_split;
527  vq->vq.kick_always = virtqueue_kick_always_split;
528  vq->vq.kick_prepare = virtqueue_kick_prepare_split;
529  vq->vq.shutdown = virtqueue_shutdown_split;
530  return &vq->vq;
531 }
532 
533 /* Negotiates virtio transport features */
535  VirtIODevice *vdev,
536  u64 *features) /* points to device features on entry and driver accepted features on return */
537 {
538  unsigned int i;
539 
543  i != VIRTIO_F_VERSION_1) {
544  virtio_feature_disable(*features, i);
545  }
546  }
547 }
548 
549 /* Returns the max number of scatter-gather elements that fit in an indirect pages */
551 {
552  return PAGE_SIZE / sizeof(struct vring_desc);
553 }
554 
555 unsigned long vring_size(unsigned int num, unsigned long align, bool packed)
556 {
557  if (packed) {
558  return vring_size_packed(num, align);
559  } else {
560  return vring_size_split(num, align);
561  }
562 }
static unsigned vring_size_split(unsigned int num, unsigned long align)
Definition: VirtIORing.c:153
#define virtio_feature_disable(FeaturesList, Feature)
Definition: virtio_pci.h:313
unsigned char __u8
Definition: compat.h:88
unsigned long vring_size_packed(unsigned int num, unsigned long align)
unsigned long vring_size(unsigned int num, unsigned long align, bool packed)
Definition: VirtIORing.c:555
static u16 get_unused_desc(struct virtqueue_split *vq)
Definition: VirtIORing.c:196
__virtio16 idx
Definition: VirtIORing.c:90
struct png_info_def **typedef void(__cdecl typeof(png_destroy_read_struct))(struct png_struct_def **
Definition: typeof.h:49
void(* notification_cb)(struct virtqueue *vq)
Definition: VirtIO.h:46
static BOOLEAN virtqueue_is_interrupt_enabled_split(struct virtqueue *_vq)
Definition: VirtIORing.c:428
__virtio16 next
Definition: VirtIORing.c:71
__virtio16 flags
Definition: VirtIORing.c:89
const GLenum * bufs
Definition: glext.h:6026
GLuint GLenum GLsizei GLsizei GLint GLint GLboolean packed
Definition: glext.h:9271
void * opaque[]
Definition: VirtIORing.c:188
static void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align)
Definition: VirtIORing.c:143
struct vring_avail * avail
Definition: VirtIORing.c:135
struct vring_used_elem ring[]
Definition: VirtIORing.c:91
unsigned int vring_control_block_size(u16 qsize, bool packed)
Definition: VirtIORing.c:474
int notify
Definition: msacm.c:1365
#define VIRTIO_TRANSPORT_F_END
Definition: virtio_config.h:53
#define SMP_CACHE_BYTES
Definition: osdep.h:39
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
Definition: VirtIORing.c:345
struct virtqueue_split::@1000 master_vring_avail
#define splitvq(vq)
Definition: VirtIORing.c:191
static void * virtqueue_get_buf_split(struct virtqueue *_vq, unsigned int *len)
Definition: VirtIORing.c:303
int align(int length, int align)
Definition: dsound8.c:36
__virtio32 len
Definition: VirtIORing.c:85
u32 virtio_get_indirect_page_capacity()
Definition: VirtIORing.c:550
unsigned int index
Definition: VirtIO.h:45
#define DESC_INDEX(num, i)
Definition: VirtIORing.c:39
ULONG32 u32
Definition: btrfs.h:14
static void virtqueue_kick_always_split(struct virtqueue *_vq)
Definition: VirtIORing.c:366
struct virtqueue vq
Definition: VirtIORing.c:178
uint32_t ULONG_PTR
Definition: typedefs.h:65
VRET vr
Definition: symbols.c:94
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
#define VIRTQ_AVAIL_F_NO_INTERRUPT
Definition: VirtIORing.c:55
static int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
Definition: VirtIORing.c:167
static void put_unused_desc_chain(struct virtqueue_split *vq, u16 idx)
Definition: VirtIORing.c:207
static const WCHAR desc[]
Definition: protectdata.c:36
static BOOLEAN virtqueue_is_interrupt_enabled(struct virtqueue *vq)
Definition: VirtIO.h:110
unsigned int num_added_since_kick
Definition: VirtIORing.c:185
unsigned int idx
Definition: utils.c:41
#define DPrintf(Level, Fmt)
Definition: kdebugprint.h:61
static void virtqueue_disable_cb_split(struct virtqueue *_vq)
Definition: VirtIORing.c:415
Definition: arc.h:49
unsigned char BOOLEAN
smooth NULL
Definition: ftsmooth.c:416
#define VIRTQ_USED_F_NO_NOTIFY
Definition: VirtIORing.c:51
GLuint index
Definition: glext.h:6031
#define VIRTQ_DESC_F_WRITE
Definition: VirtIORing.c:44
__u16 __bitwise__ __virtio16
Definition: virtio_types.h:43
__u32 __bitwise__ __virtio32
Definition: virtio_types.h:44
void vring_transport_features(VirtIODevice *vdev, u64 *features)
Definition: VirtIORing.c:534
typedef bool(CARDLIBPROC *pCanDragProc)(CardRegion &stackobj
#define VIRTIO_RING_F_EVENT_IDX
Definition: virtio_ring.h:45
static void virtqueue_shutdown_split(struct virtqueue *_vq)
Definition: VirtIORing.c:435
void virtqueue_notify(struct virtqueue *vq)
__virtio32 len
Definition: VirtIORing.c:67
unsigned short __u16
Definition: compat.h:89
uint64_t ULONGLONG
Definition: typedefs.h:67
static FILE * out
Definition: regtests2xml.c:44
GLuint GLuint num
Definition: glext.h:9618
#define u16
Definition: types.h:8
#define vring_avail_event(vr)
Definition: VirtIORing.c:141
unsigned int num_unused
Definition: VirtIORing.c:184
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel) ?(CompletionRoutine !=NULL) :TRUE)
ULONG64 u64
Definition: btrfs.h:15
#define index(s, c)
Definition: various.h:29
static BOOLEAN virtqueue_has_buf_split(struct virtqueue *_vq)
Definition: VirtIORing.c:338
GLenum GLsizei len
Definition: glext.h:6722
unsigned int num
Definition: VirtIORing.c:131
#define PAGE_SIZE
Definition: env_spec_w32.h:49
__virtio16 idx
Definition: VirtIORing.c:76
__virtio16 flags
Definition: VirtIORing.c:75
static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
Definition: VirtIORing.c:394
#define VIRTQ_DESC_F_NEXT
Definition: VirtIORing.c:42
#define VIRTQ_DESC_F_INDIRECT
Definition: VirtIORing.c:46
#define vring_used_event(vr)
Definition: VirtIORing.c:140
static void * virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
Definition: VirtIORing.c:455
struct vring_desc * desc
Definition: VirtIORing.c:133
GLuint in
Definition: glext.h:9616
GLuint start
Definition: gl.h:1545
__virtio64 addr
Definition: VirtIORing.c:65
struct vring vring
Definition: VirtIORing.c:179
#define scatterlist
Definition: VirtIO.h:6
bool event_suppression_enabled
Definition: virtio_pci.h:245
GLuint res
Definition: glext.h:9613
VirtIODevice * vdev
Definition: VirtIO.h:44
#define VIRTIO_TRANSPORT_F_START
Definition: virtio_config.h:52
__virtio32 id
Definition: VirtIORing.c:83
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
#define ULONG_PTR
Definition: config.h:101
static bool virtqueue_enable_cb_split(struct virtqueue *_vq)
Definition: VirtIORing.c:376
FORCEINLINE VOID KeMemoryBarrier(VOID)
Definition: ke.h:58
static int virtqueue_add_buf_split(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *opaque, void *va_indirect, ULONGLONG phys_indirect)
Definition: VirtIORing.c:225
GLfloat GLfloat p
Definition: glext.h:8902
struct vring_used * used
Definition: VirtIORing.c:137
#define VIRTIO_RING_F_INDIRECT_DESC
Definition: virtio_ring.h:39
#define VIRTIO_F_VERSION_1
Definition: virtio_config.h:63
ULONG_PTR addr
Definition: virtio_pci.h:239
USHORT u16
Definition: btrfs.h:13
__virtio16 ring[]
Definition: VirtIORing.c:77
__u64 __bitwise__ __virtio64
Definition: virtio_types.h:45
struct virtqueue * vring_new_virtqueue_split(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
Definition: VirtIORing.c:486
__virtio16 flags
Definition: VirtIORing.c:69
unsigned int vring_control_block_size_packed(u16 qsize)