ReactOS 0.4.16-dev-226-g79f2289
VirtIORing-Packed.c File Reference
#include "osdep.h"
#include "virtio_pci.h"
#include "VirtIO.h"
#include "kdebugprint.h"
#include "virtio_ring.h"
#include "windows/virtio_ring_allocation.h"
#include <pshpack1.h>
#include <poppack.h>
Include dependency graph for VirtIORing-Packed.c:

Go to the source code of this file.

Classes

struct  vring_packed_desc_event
 
struct  vring_packed_desc
 
struct  vring_desc_state_packed
 
struct  virtqueue_packed
 

Macros

#define BUG_ON(condition)   { if (condition) { KeBugCheck(0xE0E1E2E3); }}
 
#define BAD_RING(vq, fmt, ...)   DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)
 
#define VRING_DESC_F_NEXT   1
 
#define VRING_DESC_F_WRITE   2
 
#define VRING_DESC_F_INDIRECT   4
 
#define VRING_PACKED_DESC_F_AVAIL   7
 
#define VRING_PACKED_DESC_F_USED   15
 
#define VRING_PACKED_EVENT_FLAG_ENABLE   0x0
 
#define VRING_PACKED_EVENT_FLAG_DISABLE   0x1
 
#define VRING_PACKED_EVENT_FLAG_DESC   0x2
 
#define VRING_PACKED_EVENT_F_WRAP_CTR   15
 
#define packedvq(vq)   ((struct virtqueue_packed *)vq)
 

Functions

static bool vring_need_event (__u16 event_idx, __u16 new_idx, __u16 old)
 
unsigned int vring_control_block_size_packed (u16 qsize)
 
unsigned long vring_size_packed (unsigned int num, unsigned long align)
 
static int virtqueue_add_buf_packed (struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *opaque, void *va_indirect, ULONGLONG phys_indirect)
 
static void detach_buf_packed (struct virtqueue_packed *vq, unsigned int id)
 
static voidvirtqueue_detach_unused_buf_packed (struct virtqueue *_vq)
 
static void virtqueue_disable_cb_packed (struct virtqueue *_vq)
 
static bool is_used_desc_packed (const struct virtqueue_packed *vq, u16 idx, bool used_wrap_counter)
 
static bool virtqueue_poll_packed (struct virtqueue_packed *vq, u16 off_wrap)
 
static unsigned virtqueue_enable_cb_prepare_packed (struct virtqueue_packed *vq)
 
static bool virtqueue_enable_cb_packed (struct virtqueue *_vq)
 
static bool virtqueue_enable_cb_delayed_packed (struct virtqueue *_vq)
 
static BOOLEAN virtqueue_is_interrupt_enabled_packed (struct virtqueue *_vq)
 
static void virtqueue_shutdown_packed (struct virtqueue *_vq)
 
static bool more_used_packed (const struct virtqueue_packed *vq)
 
static voidvirtqueue_get_buf_packed (struct virtqueue *_vq, unsigned int *len)
 
static BOOLEAN virtqueue_has_buf_packed (struct virtqueue *_vq)
 
static bool virtqueue_kick_prepare_packed (struct virtqueue *_vq)
 
static void virtqueue_kick_always_packed (struct virtqueue *_vq)
 
struct virtqueuevring_new_virtqueue_packed (unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
 

Macro Definition Documentation

◆ BAD_RING

#define BAD_RING (   vq,
  fmt,
  ... 
)    DPrintf(0, "%s: queue %d: " fmt, __FUNCTION__, vq->vq.index, __VA_ARGS__); BUG_ON(true)

Definition at line 63 of file VirtIORing-Packed.c.

◆ BUG_ON

#define BUG_ON (   condition)    { if (condition) { KeBugCheck(0xE0E1E2E3); }}

Definition at line 62 of file VirtIORing-Packed.c.

◆ packedvq

#define packedvq (   vq)    ((struct virtqueue_packed *)vq)

Definition at line 155 of file VirtIORing-Packed.c.

◆ VRING_DESC_F_INDIRECT

#define VRING_DESC_F_INDIRECT   4

Definition at line 70 of file VirtIORing-Packed.c.

◆ VRING_DESC_F_NEXT

#define VRING_DESC_F_NEXT   1

Definition at line 66 of file VirtIORing-Packed.c.

◆ VRING_DESC_F_WRITE

#define VRING_DESC_F_WRITE   2

Definition at line 68 of file VirtIORing-Packed.c.

◆ VRING_PACKED_DESC_F_AVAIL

#define VRING_PACKED_DESC_F_AVAIL   7

Definition at line 76 of file VirtIORing-Packed.c.

◆ VRING_PACKED_DESC_F_USED

#define VRING_PACKED_DESC_F_USED   15

Definition at line 77 of file VirtIORing-Packed.c.

◆ VRING_PACKED_EVENT_F_WRAP_CTR

#define VRING_PACKED_EVENT_F_WRAP_CTR   15

Definition at line 94 of file VirtIORing-Packed.c.

◆ VRING_PACKED_EVENT_FLAG_DESC

#define VRING_PACKED_EVENT_FLAG_DESC   0x2

Definition at line 89 of file VirtIORing-Packed.c.

◆ VRING_PACKED_EVENT_FLAG_DISABLE

#define VRING_PACKED_EVENT_FLAG_DISABLE   0x1

Definition at line 82 of file VirtIORing-Packed.c.

◆ VRING_PACKED_EVENT_FLAG_ENABLE

#define VRING_PACKED_EVENT_FLAG_ENABLE   0x0

Definition at line 80 of file VirtIORing-Packed.c.

Function Documentation

◆ detach_buf_packed()

static void detach_buf_packed ( struct virtqueue_packed vq,
unsigned int  id 
)
static

Definition at line 294 of file VirtIORing-Packed.c.

295{
296 struct vring_desc_state_packed *state = &vq->packed.desc_state[id];
297
298 /* Clear data ptr. */
299 state->data = NULL;
300
301 vq->packed.desc_state[state->last].next = (u16)vq->free_head;
302 vq->free_head = id;
303 vq->num_free += state->num;
304}
static int state
Definition: maze.c:121
#define NULL
Definition: types.h:112
GLuint id
Definition: glext.h:5910
#define u16
Definition: types.h:8
struct virtqueue_packed::@4333 packed
unsigned int num_free
unsigned int free_head

Referenced by virtqueue_detach_unused_buf_packed(), and virtqueue_get_buf_packed().

◆ is_used_desc_packed()

static bool is_used_desc_packed ( const struct virtqueue_packed vq,
u16  idx,
bool  used_wrap_counter 
)
inlinestatic

Definition at line 336 of file VirtIORing-Packed.c.

338{
339 bool avail, used;
340 u16 flags;
341
342 flags = vq->packed.vring.desc[idx].flags;
344 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
345
346 return avail == used && used == used_wrap_counter;
347}
#define VRING_PACKED_DESC_F_USED
#define VRING_PACKED_DESC_F_AVAIL
static int used
Definition: adh-main.c:39
static int avail
Definition: adh-main.c:39
USHORT u16
Definition: btrfs.h:13
unsigned int idx
Definition: utils.c:41
GLbitfield flags
Definition: glext.h:7161

Referenced by more_used_packed(), virtqueue_enable_cb_delayed_packed(), and virtqueue_poll_packed().

◆ more_used_packed()

static bool more_used_packed ( const struct virtqueue_packed vq)
inlinestatic

Definition at line 480 of file VirtIORing-Packed.c.

481{
482 return is_used_desc_packed(vq, vq->last_used_idx,
483 vq->packed.used_wrap_counter);
484}
static bool is_used_desc_packed(const struct virtqueue_packed *vq, u16 idx, bool used_wrap_counter)

Referenced by virtqueue_get_buf_packed(), and virtqueue_has_buf_packed().

◆ virtqueue_add_buf_packed()

static int virtqueue_add_buf_packed ( struct virtqueue _vq,
struct scatterlist  sg[],
unsigned int  out,
unsigned int  in,
void opaque,
void va_indirect,
ULONGLONG  phys_indirect 
)
static

Definition at line 171 of file VirtIORing-Packed.c.

179{
180 struct virtqueue_packed *vq = packedvq(_vq);
181 unsigned int descs_used;
182 struct vring_packed_desc *desc;
183 u16 head, id, i;
184
185 descs_used = out + in;
186 head = vq->packed.next_avail_idx;
187 id = (u16)vq->free_head;
188
189 BUG_ON(descs_used == 0);
190 BUG_ON(id >= vq->packed.vring.num);
191
192 if (va_indirect && vq->num_free > 0) {
193 desc = va_indirect;
194 for (i = 0; i < descs_used; i++) {
195 desc[i].flags = i < out ? 0 : VRING_DESC_F_WRITE;
196 desc[i].addr = sg[i].physAddr.QuadPart;
197 desc[i].len = sg[i].length;
198 }
199 vq->packed.vring.desc[head].addr = phys_indirect;
200 vq->packed.vring.desc[head].len = descs_used * sizeof(struct vring_packed_desc);
201 vq->packed.vring.desc[head].id = id;
202
204 vq->packed.vring.desc[head].flags = VRING_DESC_F_INDIRECT | vq->avail_used_flags;
205
206 DPrintf(5, "Added buffer head %i to Q%d\n", head, vq->vq.index);
207 head++;
208 if (head >= vq->packed.vring.num) {
209 head = 0;
210 vq->packed.avail_wrap_counter ^= 1;
211 vq->avail_used_flags ^=
214 }
215 vq->packed.next_avail_idx = head;
216 /* We're using some buffers from the free list. */
217 vq->num_free -= 1;
218 vq->num_added += 1;
219
220 vq->free_head = vq->packed.desc_state[id].next;
221
222 /* Store token and indirect buffer state. */
223 vq->packed.desc_state[id].num = 1;
224 vq->packed.desc_state[id].data = opaque;
225 vq->packed.desc_state[id].last = id;
226
227 } else {
228 unsigned int n;
229 u16 curr, prev, head_flags;
230 if (vq->num_free < descs_used) {
231 DPrintf(6, "Can't add buffer to Q%d\n", vq->vq.index);
232 return -ENOSPC;
233 }
234 desc = vq->packed.vring.desc;
235 i = head;
236 curr = id;
237 for (n = 0; n < descs_used; n++) {
239 flags |= n < out ? 0 : VRING_DESC_F_WRITE;
240 if (n != descs_used - 1) {
242 }
243 desc[i].addr = sg[n].physAddr.QuadPart;
244 desc[i].len = sg[n].length;
245 desc[i].id = id;
246 if (n == 0) {
247 head_flags = flags;
248 }
249 else {
250 desc[i].flags = flags;
251 }
252
253 prev = curr;
254 curr = vq->packed.desc_state[curr].next;
255
256 if (++i >= vq->packed.vring.num) {
257 i = 0;
258 vq->avail_used_flags ^=
261 }
262 }
263
264 if (i < head)
265 vq->packed.avail_wrap_counter ^= 1;
266
267 /* We're using some buffers from the free list. */
268 vq->num_free -= descs_used;
269
270 /* Update free pointer */
271 vq->packed.next_avail_idx = i;
272 vq->free_head = curr;
273
274 /* Store token. */
275 vq->packed.desc_state[id].num = (u16)descs_used;
276 vq->packed.desc_state[id].data = opaque;
277 vq->packed.desc_state[id].last = prev;
278
279 /*
280 * A driver MUST NOT make the first descriptor in the list
281 * available before all subsequent descriptors comprising
282 * the list are made available.
283 */
285 vq->packed.vring.desc[head].flags = head_flags;
286 vq->num_added += descs_used;
287
288 DPrintf(5, "Added buffer head @%i+%d to Q%d\n", head, descs_used, vq->vq.index);
289 }
290
291 return 0;
292}
#define packedvq(vq)
#define VRING_DESC_F_WRITE
#define BUG_ON(condition)
#define VRING_DESC_F_NEXT
#define VRING_DESC_F_INDIRECT
struct outqueuenode * head
Definition: adnsresfilter.c:66
#define ENOSPC
Definition: errno.h:34
#define DPrintf(Level, Fmt)
Definition: kdebugprint.h:61
GLdouble n
Definition: glext.h:7729
GLuint in
Definition: glext.h:9616
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
static const WCHAR desc[]
Definition: protectdata.c:36
static FILE * out
Definition: regtests2xml.c:44
FORCEINLINE VOID KeMemoryBarrier(VOID)
Definition: ke.h:58
struct virtqueue vq
unsigned int num_added

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_detach_unused_buf_packed()

static void * virtqueue_detach_unused_buf_packed ( struct virtqueue _vq)
static

Definition at line 306 of file VirtIORing-Packed.c.

307{
308 struct virtqueue_packed *vq = packedvq(_vq);
309 unsigned int i;
310 void *buf;
311
312 for (i = 0; i < vq->packed.vring.num; i++) {
313 if (!vq->packed.desc_state[i].data)
314 continue;
315 /* detach_buf clears data, so grab it now. */
316 buf = vq->packed.desc_state[i].data;
318 return buf;
319 }
320 /* That should have freed everything. */
321 BUG_ON(vq->num_free != vq->packed.vring.num);
322
323 return NULL;
324}
static void detach_buf_packed(struct virtqueue_packed *vq, unsigned int id)
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glext.h:7751

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_disable_cb_packed()

static void virtqueue_disable_cb_packed ( struct virtqueue _vq)
static

Definition at line 326 of file VirtIORing-Packed.c.

327{
328 struct virtqueue_packed *vq = packedvq(_vq);
329
330 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
331 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
332 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
333 }
334}
#define VRING_PACKED_EVENT_FLAG_DISABLE

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_enable_cb_delayed_packed()

static bool virtqueue_enable_cb_delayed_packed ( struct virtqueue _vq)
static

Definition at line 401 of file VirtIORing-Packed.c.

402{
403 struct virtqueue_packed *vq = packedvq(_vq);
404 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
405 u16 used_idx, wrap_counter;
406 u16 bufs;
407
408 /*
409 * We optimistically turn back on interrupts, then check if there was
410 * more to do.
411 */
412
413 if (event_suppression_enabled) {
414 /* TODO: tune this threshold */
415 bufs = (vq->packed.vring.num - vq->num_free) * 3 / 4;
416 wrap_counter = vq->packed.used_wrap_counter;
417
418 used_idx = vq->last_used_idx + bufs;
419 if (used_idx >= vq->packed.vring.num) {
420 used_idx -= (u16)vq->packed.vring.num;
421 wrap_counter ^= 1;
422 }
423
424 vq->packed.vring.driver->off_wrap = used_idx |
425 (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR);
426
427 /*
428 * We need to update event offset and event wrap
429 * counter first before updating event flags.
430 */
432 }
433
434 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
435 vq->packed.event_flags_shadow = event_suppression_enabled ?
438 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
439 }
440
441 /*
442 * We need to update event suppression structure first
443 * before re-checking for more used buffers.
444 */
446
448 vq->last_used_idx,
449 vq->packed.used_wrap_counter)) {
450 return false;
451 }
452
453 return true;
454}
#define VRING_PACKED_EVENT_FLAG_DESC
#define VRING_PACKED_EVENT_F_WRAP_CTR
#define VRING_PACKED_EVENT_FLAG_ENABLE
const GLenum * bufs
Definition: glext.h:6026

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_enable_cb_packed()

static bool virtqueue_enable_cb_packed ( struct virtqueue _vq)
static

Definition at line 393 of file VirtIORing-Packed.c.

394{
395 struct virtqueue_packed *vq = packedvq(_vq);
397
399}
static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue_packed *vq)
static bool virtqueue_poll_packed(struct virtqueue_packed *vq, u16 off_wrap)

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_enable_cb_prepare_packed()

static unsigned virtqueue_enable_cb_prepare_packed ( struct virtqueue_packed vq)
inlinestatic

Definition at line 362 of file VirtIORing-Packed.c.

363{
364 bool event_suppression_enabled = vq->vq.vdev->event_suppression_enabled;
365 /*
366 * We optimistically turn back on interrupts, then check if there was
367 * more to do.
368 */
369
370 if (event_suppression_enabled) {
371 vq->packed.vring.driver->off_wrap =
372 vq->last_used_idx |
373 (vq->packed.used_wrap_counter <<
375 /*
376 * We need to update event offset and event wrap
377 * counter first before updating event flags.
378 */
380 }
381
382 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
383 vq->packed.event_flags_shadow = event_suppression_enabled ?
386 vq->packed.vring.driver->flags = vq->packed.event_flags_shadow;
387 }
388
389 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
391}

Referenced by virtqueue_enable_cb_packed().

◆ virtqueue_get_buf_packed()

static void * virtqueue_get_buf_packed ( struct virtqueue _vq,
unsigned int len 
)
static

Definition at line 486 of file VirtIORing-Packed.c.

489{
490 struct virtqueue_packed *vq = packedvq(_vq);
491 u16 last_used, id;
492 void *ret;
493
494 if (!more_used_packed(vq)) {
495 DPrintf(6, "%s: No more buffers in queue\n", __FUNCTION__);
496 return NULL;
497 }
498
499 /* Only get used elements after they have been exposed by host. */
501
502 last_used = vq->last_used_idx;
503 id = vq->packed.vring.desc[last_used].id;
504 *len = vq->packed.vring.desc[last_used].len;
505
506 if (id >= vq->packed.vring.num) {
507 BAD_RING(vq, "id %u out of range\n", id);
508 return NULL;
509 }
510 if (!vq->packed.desc_state[id].data) {
511 BAD_RING(vq, "id %u is not a head!\n", id);
512 return NULL;
513 }
514
515 /* detach_buf_packed clears data, so grab it now. */
516 ret = vq->packed.desc_state[id].data;
518
519 vq->last_used_idx += vq->packed.desc_state[id].num;
520 if (vq->last_used_idx >= vq->packed.vring.num) {
521 vq->last_used_idx -= (u16)vq->packed.vring.num;
522 vq->packed.used_wrap_counter ^= 1;
523 }
524
525 /*
526 * If we expect an interrupt for the next entry, tell host
527 * by writing event index and flush out the write before
528 * the read in the next get_buf call.
529 */
530 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) {
531 vq->packed.vring.driver->off_wrap = vq->last_used_idx |
532 ((u16)vq->packed.used_wrap_counter <<
535 }
536
537 return ret;
538}
static bool more_used_packed(const struct virtqueue_packed *vq)
#define BAD_RING(vq, fmt,...)
#define __FUNCTION__
Definition: types.h:116
GLenum GLsizei len
Definition: glext.h:6722
int ret

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_has_buf_packed()

static BOOLEAN virtqueue_has_buf_packed ( struct virtqueue _vq)
static

Definition at line 540 of file VirtIORing-Packed.c.

541{
542 struct virtqueue_packed *vq = packedvq(_vq);
543 return more_used_packed(vq);
544}

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_is_interrupt_enabled_packed()

static BOOLEAN virtqueue_is_interrupt_enabled_packed ( struct virtqueue _vq)
static

Definition at line 456 of file VirtIORing-Packed.c.

457{
458 struct virtqueue_packed *vq = packedvq(_vq);
459 return vq->packed.event_flags_shadow & VRING_PACKED_EVENT_FLAG_DISABLE;
460}

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_kick_always_packed()

static void virtqueue_kick_always_packed ( struct virtqueue _vq)
static

Definition at line 589 of file VirtIORing-Packed.c.

590{
591 struct virtqueue_packed *vq = packedvq(_vq);
593 vq->num_added = 0;
594 virtqueue_notify(_vq);
595}
void virtqueue_notify(struct virtqueue *vq)

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_kick_prepare_packed()

static bool virtqueue_kick_prepare_packed ( struct virtqueue _vq)
static

Definition at line 546 of file VirtIORing-Packed.c.

547{
548 struct virtqueue_packed *vq = packedvq(_vq);
549 u16 new, old, off_wrap, flags, wrap_counter, event_idx;
550 bool needs_kick;
551 union {
552 struct {
553 __le16 off_wrap;
555 };
556 u32 value32;
557 } snapshot;
558
559 /*
560 * We need to expose the new flags value before checking notification
561 * suppressions.
562 */
564
565 old = vq->packed.next_avail_idx - vq->num_added;
566 new = vq->packed.next_avail_idx;
567 vq->num_added = 0;
568
569 snapshot.value32 = *(u32 *)vq->packed.vring.device;
570 flags = snapshot.flags;
571
573 needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
574 goto out;
575 }
576
577 off_wrap = snapshot.off_wrap;
578
579 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
580 event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
581 if (wrap_counter != vq->packed.avail_wrap_counter)
582 event_idx -= (u16)vq->packed.vring.num;
583
584 needs_kick = vring_need_event(event_idx, new, old);
585out:
586 return needs_kick;
587}
static bool vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
ULONG32 u32
Definition: btrfs.h:14
#define __le16
Definition: types.h:43
if(dx< 0)
Definition: linetemp.h:194

Referenced by vring_new_virtqueue_packed().

◆ virtqueue_poll_packed()

static bool virtqueue_poll_packed ( struct virtqueue_packed vq,
u16  off_wrap 
)
inlinestatic

Definition at line 349 of file VirtIORing-Packed.c.

350{
351 bool wrap_counter;
352 u16 used_idx;
354
355 wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
356 used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
357
358 return is_used_desc_packed(vq, used_idx, wrap_counter);
359
360}

Referenced by virtqueue_enable_cb_packed().

◆ virtqueue_shutdown_packed()

static void virtqueue_shutdown_packed ( struct virtqueue _vq)
static

Definition at line 462 of file VirtIORing-Packed.c.

463{
464 struct virtqueue_packed *vq = packedvq(_vq);
465 unsigned int num = vq->packed.vring.num;
466 void *pages = vq->packed.vring.desc;
467 unsigned int vring_align = _vq->vdev->addr ? PAGE_SIZE : SMP_CACHE_BYTES;
468
469 RtlZeroMemory(pages, vring_size_packed(num, vring_align));
471 _vq->index,
472 num,
473 vring_align,
474 _vq->vdev,
475 pages,
476 _vq->notification_cb,
477 _vq);
478}
struct virtqueue * vring_new_virtqueue_packed(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
unsigned long vring_size_packed(unsigned int num, unsigned long align)
#define PAGE_SIZE
Definition: env_spec_w32.h:49
GLuint GLuint num
Definition: glext.h:9618
#define SMP_CACHE_BYTES
Definition: osdep.h:41
ULONG_PTR addr
Definition: virtio_pci.h:239
unsigned int index
Definition: VirtIO.h:45
void(* notification_cb)(struct virtqueue *vq)
Definition: VirtIO.h:46
VirtIODevice * vdev
Definition: VirtIO.h:44
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262

Referenced by vring_new_virtqueue_packed().

◆ vring_control_block_size_packed()

unsigned int vring_control_block_size_packed ( u16  qsize)

Definition at line 157 of file VirtIORing-Packed.c.

158{
159 return sizeof(struct virtqueue_packed) + sizeof(struct vring_desc_state_packed) * qsize;
160}
ActualNumberDriverObjects * sizeof(PDRIVER_OBJECT)) PDRIVER_OBJECT *DriverObjectList

Referenced by vring_control_block_size().

◆ vring_need_event()

static bool vring_need_event ( __u16  event_idx,
__u16  new_idx,
__u16  old 
)
inlinestatic

Definition at line 101 of file VirtIORing-Packed.c.

102{
103 /* Note: Xen has similar logic for notification hold-off
104 * in include/xen/interface/io/ring.h with req_event and req_prod
105 * corresponding to event_idx + 1 and new_idx respectively.
106 * Note also that req_event and req_prod in Xen start at 1,
107 * event indexes in virtio start at 0. */
108 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
109}
u16 __u16
Definition: btrfs.h:18

Referenced by virtqueue_kick_prepare_packed().

◆ vring_new_virtqueue_packed()

struct virtqueue * vring_new_virtqueue_packed ( unsigned int  index,
unsigned int  num,
unsigned int  vring_align,
VirtIODevice vdev,
void pages,
void(*)(struct virtqueue *)  notify,
void control 
)

Definition at line 598 of file VirtIORing-Packed.c.

606{
608 unsigned int i;
609
610 vq->vq.vdev = vdev;
611 vq->vq.notification_cb = notify;
612 vq->vq.index = index;
613
614 vq->vq.avail_va = (u8 *)pages + num * sizeof(struct vring_packed_desc);
615 vq->vq.used_va = (u8 *)vq->vq.avail_va + sizeof(struct vring_packed_desc_event);
616
617 /* initialize the ring */
618 vq->packed.vring.num = num;
619 vq->packed.vring.desc = pages;
620 vq->packed.vring.driver = vq->vq.avail_va;
621 vq->packed.vring.device = vq->vq.used_va;
622
623 vq->num_free = num;
624 vq->free_head = 0;
625 vq->num_added = 0;
626 vq->packed.avail_wrap_counter = 1;
627 vq->packed.used_wrap_counter = 1;
628 vq->last_used_idx = 0;
629 vq->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
630 vq->packed.next_avail_idx = 0;
631 vq->packed.event_flags_shadow = 0;
632 vq->packed.desc_state = vq->desc_states;
633
634 RtlZeroMemory(vq->packed.desc_state, num * sizeof(*vq->packed.desc_state));
635 for (i = 0; i < num - 1; i++) {
636 vq->packed.desc_state[i].next = i + 1;
637 }
638
639 vq->vq.add_buf = virtqueue_add_buf_packed;
640 vq->vq.detach_unused_buf = virtqueue_detach_unused_buf_packed;
641 vq->vq.disable_cb = virtqueue_disable_cb_packed;
642 vq->vq.enable_cb = virtqueue_enable_cb_packed;
643 vq->vq.enable_cb_delayed = virtqueue_enable_cb_delayed_packed;
644 vq->vq.get_buf = virtqueue_get_buf_packed;
645 vq->vq.has_buf = virtqueue_has_buf_packed;
646 vq->vq.is_interrupt_enabled = virtqueue_is_interrupt_enabled_packed;
647 vq->vq.kick_always = virtqueue_kick_always_packed;
648 vq->vq.kick_prepare = virtqueue_kick_prepare_packed;
649 vq->vq.shutdown = virtqueue_shutdown_packed;
650 return &vq->vq;
651}
static void virtqueue_kick_always_packed(struct virtqueue *_vq)
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
static BOOLEAN virtqueue_is_interrupt_enabled_packed(struct virtqueue *_vq)
static void * virtqueue_get_buf_packed(struct virtqueue *_vq, unsigned int *len)
static int virtqueue_add_buf_packed(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, unsigned int in, void *opaque, void *va_indirect, ULONGLONG phys_indirect)
static BOOLEAN virtqueue_has_buf_packed(struct virtqueue *_vq)
static bool virtqueue_enable_cb_packed(struct virtqueue *_vq)
static void * virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
static void virtqueue_shutdown_packed(struct virtqueue *_vq)
#define index(s, c)
Definition: various.h:29
UCHAR u8
Definition: btrfs.h:12
int notify
Definition: msacm.c:1366
Definition: dialog.c:52

Referenced by vio_modern_setup_vq(), and virtqueue_shutdown_packed().

◆ vring_size_packed()

unsigned long vring_size_packed ( unsigned int  num,
unsigned long  align 
)

Definition at line 162 of file VirtIORing-Packed.c.

163{
164 /* array of descriptors */
165 unsigned long res = num * sizeof(struct vring_packed_desc);
166 /* driver and device event */
167 res += 2 * sizeof(struct vring_packed_desc_event);
168 return res;
169}
GLuint res
Definition: glext.h:9613

Referenced by virtqueue_shutdown_packed(), and vring_size().