ReactOS 0.4.15-dev-7842-g558ab78
VirtIORing-Packed.c
Go to the documentation of this file.
1/*
2 * Packed virtio ring manipulation routines
3 *
4 * Copyright 2019 Red Hat, Inc.
5 *
6 * Authors:
7 * Yuri Benditovich <ybendito@redhat.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met :
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and / or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of their contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include "osdep.h"
34#include "virtio_pci.h"
35#include "VirtIO.h"
36#include "kdebugprint.h"
37#include "virtio_ring.h"
39
40#include <pshpack1.h>
41
43 /* Descriptor Ring Change Event Offset/Wrap Counter. */
45 /* Descriptor Ring Change Event Flags. */
47};
48
50 /* Buffer Address. */
52 /* Buffer Length. */
54 /* Buffer ID. */
56 /* The flags depending on descriptor type. */
58};
59
60#include <poppack.h>
61
62#define BUG_ON(condition) { if (condition) { KeBugCheck(0xE0E1E2E3); }}
63#define BAD_RING(vq, fmt, ...) DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
64
65/* This marks a buffer as continuing via the next field. */
66#define VRING_DESC_F_NEXT 1
67/* This marks a buffer as write-only (otherwise read-only). */
68#define VRING_DESC_F_WRITE 2
69/* This means the buffer contains a list of buffer descriptors. */
70#define VRING_DESC_F_INDIRECT 4
71
72/*
73 * Mark a descriptor as available or used in packed ring.
74 * Notice: they are defined as shifts instead of shifted values.
75 */
76#define VRING_PACKED_DESC_F_AVAIL 7
77#define VRING_PACKED_DESC_F_USED 15
78
79/* Enable events in packed ring. */
80#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
81/* Disable events in packed ring. */
82#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
83
84/*
85 * Enable events for a specific descriptor in packed ring.
86 * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
87 * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
88 */
89#define VRING_PACKED_EVENT_FLAG_DESC 0x2
90 /*
91 * Wrap counter bit shift in event suppression structure
92 * of packed ring.
93 */
94#define VRING_PACKED_EVENT_F_WRAP_CTR 15
95
96/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
97/* Assuming a given event_idx value from the other side, if
98 * we have just incremented index from old to new_idx,
99 * should we trigger an event?
100 */
101static inline bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
102{
103 /* Note: Xen has similar logic for notification hold-off
104 * in include/xen/interface/io/ring.h with req_event and req_prod
105 * corresponding to event_idx + 1 and new_idx respectively.
106 * Note also that req_event and req_prod in Xen start at 1,
107 * event indexes in virtio start at 0. */
108 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
109}
110
112 void *data; /* Data for callback. */
113 u16 num; /* Descriptor list length. */
114 u16 next; /* The next desc state in a list. */
115 u16 last; /* The last desc state in a list. */
116};
117
119 struct virtqueue vq;
120 /* Number we've added since last sync. */
121 unsigned int num_added;
122 /* Head of free buffer list. */
123 unsigned int free_head;
124 /* Number of free descriptors */
125 unsigned int num_free;
126 /* Last used index we've seen. */
128 /* Avail used flags. */
130 struct
131 {
132 /* Driver ring wrap counter. */
134 /* Device ring wrap counter. */
136 /* Index of the next avail descriptor. */
138 /*
139 * Last written value to driver->flags in
140 * guest byte order.
141 */
143 struct {
144 unsigned int num;
149 /* Per-descriptor state. */
153};
154
155#define packedvq(vq) ((struct virtqueue_packed *)vq)
156
158{
159 return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
160}
161
162unsigned long vring_size_packed(unsigned int num, unsigned long align)
163{
164 /* array of descriptors */
165 unsigned long res = num * sizeof(struct vring_packed_desc);
166 /* driver and device event */
167 res += 2 * sizeof(struct vring_packed_desc_event);
168 return res;
169}
170
172 struct virtqueue *_vq, /* the queue */
173 struct scatterlist sg[], /* sg array of length out + in */
174 unsigned int out, /* number of driver->device buffer descriptors in sg */
175 unsigned int in, /* number of device->driver buffer descriptors in sg */
176 void *opaque, /* later returned from virtqueue_get_buf */
177 void *va_indirect, /* VA of the indirect page or NULL */
178 ULONGLONG phys_indirect) /* PA of the indirect page or 0 */
179{
180 struct virtqueue_packed *vq = packedvq(_vq);
181 unsigned int descs_used;
182 struct vring_packed_desc *desc;
183 u16 head, id, i;
184
185 descs_used = out + in;
186 head = vq->packed.next_avail_idx;
187 id = (u16)vq->free_head;
188
189 BUG_ON(descs_used == 0);
190 BUG_ON(id >= vq->packed.vring.num);
191
192 if (va_indirect && vq->num_free > 0) {
193 desc = va_indirect;
194 for (i = 0; i < descs_used; i++) {
195 desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
196 desc[i].addr = sg[i].physAddr.QuadPart;
197 desc[i].len = sg[i].length;
198 }
199 vq->packed.vring.desc[head].addr = phys_indirect;
200 vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
201 vq->packed.vring.desc[head].id = id;
202
204 vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
205
206 DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
207 head++;
208 if (head >= vq->packed.vring.num) {
209 head = 0;
210 vq->packed.avail_wrap_counter ^= 1;
211 vq->avail_used_flags ^=
214 }
215 vq->packed.next_avail_idx = head;
216 /* We're using some buffers from the free list. */
217 vq->num_free -= 1;
218 vq->num_added += 1;
219
220 vq->free_head = vq->packed.desc_state[id].next;
221
222 /* Store token and indirect buffer state. */
223 vq->packed.desc_state[id].num = 1;
224 vq->packed.desc_state[id].data = opaque;
225 vq->packed.desc_state[id].last = id;
226
227 } else {
228 unsigned int n;
229 u16 curr, prev, head_flags;
230 if (vq->num_free < descs_used) {
231 DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
232 return -ENOSPC;
233 }
234 desc = vq->packed.vring.desc;
235 i = head;
236 curr = id;
237 for (n = 0; n < descs_used; n++) {
239 flags |= n < out ? 0 : VRING_DESC_F_WRITE;
240 if (n != descs_used - 1) {
242 }
243 desc[i].addr = sg[n].physAddr.QuadPart;
244 desc[i].len = sg[n].length;
245 desc[i].id = id;
246 if (n == 0) {
247 head_flags = flags;
248 }
249 else {
250 desc[i].flags = flags;
251 }
252
253 prev = curr;
254 curr = vq->packed.desc_state[curr].next;
255
256 if (++i >= vq->packed.vring.num) {
257 i = 0;
258 vq->avail_used_flags ^=
261 }
262 }
263
264 if (i < head)
265 vq->packed.avail_wrap_counter ^= 1;
266
267 /* We're using some buffers from the free list. */
268 vq->num_free -= descs_used;
269
270 /* Update free pointer */
271 vq->packed.next_avail_idx = i;
272 vq->free_head = curr;
273
274 /* Store token. */
275 vq->packed.desc_state[id].num = (u16)descs_used;
276 vq->packed.desc_state[id].data = opaque;
277 vq->packed.desc_state[id].last = prev;
278
279 /*
280 * A driver MUST NOT make the first descriptor in the list
281 * available before all subsequent descriptors comprising
282 * the list are made available.
283 */
285 vq->packed.vring.desc[head].flags = head_flags;
286 vq->num_added += descs_used;
287
288 DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
289 }
290
291 return 0;
292}
293
294static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
295{
296 struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
297
298 /* Clear data ptr. */
299 state->data = NULL;
300
301 vq->packed.desc_state[state->last].next = (u16)vq->free_head;
302 vq->free_head = id;
303 vq->num_free += state->num;
304}
305
307{
308 struct virtqueue_packed *vq = packedvq(_vq);
309 unsigned int i;
310 void *buf;
311
312 for (i = 0; i < vq->packed.vring.num; i++) {
313 if (!vq->packed.desc_state[i].data)
314 continue;
315 /* detach_buf clears data, so grab it now. */
316 buf = vq->packed.desc_state[i].data;
318 return buf;
319 }
320 /* That should have freed everything. */
321 BUG_ON(vq->num_free != vq->packed.vring.num);
322
323 return NULL;
324}
325
327{
328 struct virtqueue_packed *vq = packedvq(_vq);
329
330 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
331 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
332 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
333 }
334}
335
336static inline bool is_used_desc_packed(const struct virtqueue_packed *vq,
338{
339 bool avail, used;
340 u16 flags;
341
342 flags = vq->packed.vring.desc[idx].flags;
344 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
345
346 return avail == used && used == used_wrap_counter;
347}
348
349static inline bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
350{
351 bool wrap_counter;
352 u16 used_idx;
354
355 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
356 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
357
358 return is_used_desc_packed(vq, used_idx, wrap_counter);
359
360}
361
363{
364 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
365 /*
366 * We optimistically turn back on interrupts, then check if there was
367 * more to do.
368 */
369
370 if (event_suppression_enabled) {
371 vq->packed.vring.driver->off_wrap =
372 vq->last_used_idx |
373 (vq->packed.used_wrap_counter <<
375 /*
376 * We need to update event offset and event wrap
377 * counter first before updating event flags.
378 */
380 }
381
382 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
383 vq->packed.event_flags_shadow = event_suppression_enabled ?
386 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
387 }
388
389 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
391}
392
393static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
394{
395 struct virtqueue_packed *vq = packedvq(_vq);
397
399}
400
402{
403 struct virtqueue_packed *vq = packedvq(_vq);
404 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
405 u16 used_idx, wrap_counter;
406 u16 bufs;
407
408 /*
409 * We optimistically turn back on interrupts, then check if there was
410 * more to do.
411 */
412
413 if (event_suppression_enabled) {
414 /* TODO: tune this threshold */
415 bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
416 wrap_counter = vq->packed.used_wrap_counter;
417
418 used_idx = vq->last_used_idx + bufs;
419 if (used_idx >= vq->packed.vring.num) {
420 used_idx -= (u16)vq->packed.vring.num;
421 wrap_counter ^= 1;
422 }
423
424 vq->packed.vring.driver->off_wrap = used_idx |
425 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
426
427 /*
428 * We need to update event offset and event wrap
429 * counter first before updating event flags.
430 */
432 }
433
434 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
435 vq->packed.event_flags_shadow = event_suppression_enabled ?
438 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
439 }
440
441 /*
442 * We need to update event suppression structure first
443 * before re-checking for more used buffers.
444 */
446
448 vq->last_used_idx,
449 vq->packed.used_wrap_counter)) {
450 return false;
451 }
452
453 return true;
454}
455
457{
458 struct virtqueue_packed *vq = packedvq(_vq);
459 return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
460}
461
462static void virtqueue_shutdown_packed(struct virtqueue *_vq)
463{
464 struct virtqueue_packed *vq = packedvq(_vq);
465 unsigned int num = vq->packed.vring.num;
466 void *pages = vq->packed.vring.desc;
467 unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
468
469 RtlZeroMemory(pages, vring_size_packed(num, vring_align));
471 _vq->index,
472 num,
473 vring_align,
474 _vq->vdev,
475 pages,
476 _vq->notification_cb,
477 _vq);
478}
479
480static inline bool more_used_packed(const struct virtqueue_packed *vq)
481{
482 return is_used_desc_packed(vq, vq->last_used_idx,
483 vq->packed.used_wrap_counter);
484}
485
487 struct virtqueue *_vq, /* the queue */
488 unsigned int *len) /* number of bytes returned by the device */
489{
490 struct virtqueue_packed *vq = packedvq(_vq);
491 u16 last_used, id;
492 void *ret;
493
494 if (!more_used_packed(vq)) {
495 DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
496 return NULL;
497 }
498
499 /* Only get used elements after they have been exposed by host. */
501
502 last_used = vq->last_used_idx;
503 id = vq->packed.vring.desc[last_used].id;
504 *len = vq->packed.vring.desc[last_used].len;
505
506 if (id >= vq->packed.vring.num) {
507 BAD_RING(vq, "id %u out of range\n", id);
508 return NULL;
509 }
510 if (!vq->packed.desc_state[id].data) {
511 BAD_RING(vq, "id %u is not a head!\n", id);
512 return NULL;
513 }
514
515 /* detach_buf_packed clears data, so grab it now. */
516 ret = vq->packed.desc_state[id].data;
518
519 vq->last_used_idx += vq->packed.desc_state[id].num;
520 if (vq->last_used_idx >= vq->packed.vring.num) {
521 vq->last_used_idx -= (u16)vq->packed.vring.num;
522 vq->packed.used_wrap_counter ^= 1;
523 }
524
525 /*
526 * If we expect an interrupt for the next entry, tell host
527 * by writing event index and flush out the write before
528 * the read in the next get_buf call.
529 */
530 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
531 vq->packed.vring.driver->off_wrap = vq->last_used_idx |
532 ((u16)vq->packed.used_wrap_counter <<
535 }
536
537 return ret;
538}
539
541{
542 struct virtqueue_packed *vq = packedvq(_vq);
543 return more_used_packed(vq);
544}
545
547{
548 struct virtqueue_packed *vq = packedvq(_vq);
549 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
550 bool needs_kick;
551 union {
552 struct {
553 __le16 off_wrap;
555 };
556 u32 value32;
557 } snapshot;
558
559 /*
560 * We need to expose the new flags value before checking notification
561 * suppressions.
562 */
564
565 old = vq->packed.next_avail_idx - vq->num_added;
566 new = vq->packed.next_avail_idx;
567 vq->num_added = 0;
568
569 snapshot.value32 = *(u32 *)vq->packed.vring.device;
570 flags = snapshot.flags;
571
573 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
574 goto out;
575 }
576
577 off_wrap = snapshot.off_wrap;
578
579 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
580 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
581 if (wrap_counter != vq->packed.avail_wrap_counter)
582 event_idx -= (u16)vq->packed.vring.num;
583
584 needs_kick = vring_need_event(event_idx, new, old);
585out:
586 return needs_kick;
587}
588
590{
591 struct virtqueue_packed *vq = packedvq(_vq);
593 vq->num_added = 0;
594 virtqueue_notify(_vq);
595}
596
597/* Initializes a new virtqueue using already allocated memory */
599 unsigned int index, /* virtqueue index */
600 unsigned int num, /* virtqueue size (always a power of 2) */
601 unsigned int vring_align, /* vring alignment requirement */
602 VirtIODevice *vdev, /* the virtio device owning the queue */
603 void *pages, /* vring memory */
604 void(*notify)(struct virtqueue *), /* notification callback */
605 void *control) /* virtqueue memory */
606{
607 struct virtqueue_packed *vq = packedvq(control);
608 unsigned int i;
609
610 vq->vq.vdev = vdev;
611 vq->vq.notification_cb = notify;
612 vq->vq.index = index;
613
614 vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
615 vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
616
617 /* initialize the ring */
618 vq->packed.vring.num = num;
619 vq->packed.vring.desc = pages;
620 vq->packed.vring.driver = vq->vq.avail_va;
621 vq->packed.vring.device = vq->vq.used_va;
622
623 vq->num_free = num;
624 vq->free_head = 0;
625 vq->num_added = 0;
626 vq->packed.avail_wrap_counter = 1;
627 vq->packed.used_wrap_counter = 1;
628 vq->last_used_idx = 0;
629 vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
630 vq->packed.next_avail_idx = 0;
631 vq->packed.event_flags_shadow = 0;
632 vq->packed.desc_state = vq->desc_states;
633
634 RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
635 for (i = 0; i < num - 1; i++) {
636 vq->packed.desc_state[i].next = i + 1;
637 }
638
639 vq->vq.add_buf = virtqueue_add_buf_packed;
640 vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
641 vq->vq.disable_cb = virtqueue_disable_cb_packed;
642 vq->vq.enable_cb = virtqueue_enable_cb_packed;
643 vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
644 vq->vq.get_buf = virtqueue_get_buf_packed;
645 vq->vq.has_buf = virtqueue_has_buf_packed;
646 vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
647 vq->vq.kick_always = virtqueue_kick_always_packed;
648 vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
649 vq->vq.shutdown = virtqueue_shutdown_packed;
650 return &vq->vq;
651}
unsigned char BOOLEAN
#define VRING_PACKED_EVENT_FLAG_DISABLE
static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
struct virtqueue * vring_new_virtqueue_packed(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
#define packedvq(vq)
#define VRING_PACKED_EVENT_FLAG_DESC
static bool more_used_packed(const struct virtqueue_packed *vq)
static bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
static void virtqueue_kick_always_packed(struct virtqueue *_vq)
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
#define VRING_DESC_F_WRITE
#define VRING_PACKED_EVENT_F_WRAP_CTR
static bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)
#define BAD_RING(vq, fmt,...)
static void * virtqueue_get_buf_packed(struct virtqueue *_vq, unsigned int *len)
static int virtqueue_add_buf_packed(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *opaque, void *va_indirect, ULONGLONG phys_indirect)
#define BUG_ON(condition)
static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
#define VRING_PACKED_EVENT_FLAG_ENABLE
static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
static bool is_used_desc_packed(const struct virtqueue_packed *vq, u16 idx, bool used_wrap_counter)
static void * virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
#define VRING_DESC_F_NEXT
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
unsigned long vring_size_packed(unsigned int num, unsigned long align)
static void virtqueue_shutdown_packed(struct virtqueue *_vq)
unsigned int vring_control_block_size_packed(u16 qsize)
#define VRING_PACKED_DESC_F_USED
#define VRING_DESC_F_INDIRECT
#define VRING_PACKED_DESC_F_AVAIL
void virtqueue_notify(struct virtqueue *vq)
#define scatterlist
Definition: VirtIO.h:6
static int used
Definition: adh-main.c:39
static int avail
Definition: adh-main.c:39
struct outqueuenode * head
Definition: adnsresfilter.c:66
static int state
Definition: maze.c:121
#define index(s, c)
Definition: various.h:29
u16 __u16
Definition: btrfs.h:18
ULONG32 u32
Definition: btrfs.h:14
UCHAR u8
Definition: btrfs.h:12
USHORT u16
Definition: btrfs.h:13
#define NULL
Definition: types.h:112
unsigned int idx
Definition: utils.c:41
#define ENOSPC
Definition: errno.h:34
#define __le16
Definition: types.h:43
#define __le32
Definition: types.h:44
#define __FUNCTION__
Definition: types.h:116
#define DPrintf(Level, Fmt)
Definition: kdebugprint.h:61
int align(int length, int align)
Definition: dsound8.c:36
#define PAGE_SIZE
Definition: env_spec_w32.h:49
GLdouble n
Definition: glext.h:7729
GLuint res
Definition: glext.h:9613
GLuint index
Definition: glext.h:6031
const GLenum * bufs
Definition: glext.h:6026
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glext.h:7751
GLuint in
Definition: glext.h:9616
GLbitfield flags
Definition: glext.h:7161
GLuint GLuint num
Definition: glext.h:9618
GLenum GLsizei len
Definition: glext.h:6722
GLuint id
Definition: glext.h:5910
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
if(dx< 0)
Definition: linetemp.h:194
static const WCHAR desc[]
Definition: protectdata.c:36
int notify
Definition: msacm.c:1366
#define SMP_CACHE_BYTES
Definition: osdep.h:41
static FILE * out
Definition: regtests2xml.c:44
FORCEINLINE VOID KeMemoryBarrier(VOID)
Definition: ke.h:58
#define u16
Definition: types.h:8
ULONG_PTR addr
Definition: virtio_pci.h:239
struct virtqueue_packed::@4332::@4333 vring
struct vring_desc_state_packed * desc_state
struct vring_packed_desc * desc
struct virtqueue vq
struct virtqueue_packed::@4332 packed
struct vring_packed_desc_event * device
struct vring_desc_state_packed desc_states[]
struct vring_packed_desc_event * driver
unsigned int num_added
unsigned int num_free
unsigned int free_head
unsigned int index
Definition: VirtIO.h:45
void(* notification_cb)(struct virtqueue *vq)
Definition: VirtIO.h:46
VirtIODevice * vdev
Definition: VirtIO.h:44
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
uint64_t ULONGLONG
Definition: typedefs.h:67
__u64 __bitwise__ __virtio64
Definition: virtio_types.h:45
int ret
ActualNumberDriverObjects * sizeof(PDRIVER_OBJECT)) PDRIVER_OBJECT *DriverObjectList