ReactOS  0.4.15-dev-1070-ge1a01de
VirtIOPCIModern.c
Go to the documentation of this file.
1 /*
2  * Virtio PCI driver - modern (virtio 1.0) device support
3  *
4  * Copyright IBM Corp. 2007
5  * Copyright Red Hat, Inc. 2014
6  *
7  * Authors:
8  * Anthony Liguori <aliguori@us.ibm.com>
9  * Rusty Russell <rusty@rustcorp.com.au>
10  * Michael S. Tsirkin <mst@redhat.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met :
15  * 1. Redistributions of source code must retain the above copyright
16  * notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  * notice, this list of conditions and the following disclaimer in the
19  * documentation and / or other materials provided with the distribution.
20  * 3. Neither the names of the copyright holders nor the names of their contributors
21  * may be used to endorse or promote products derived from this software
22  * without specific prior written permission.
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 #include "osdep.h"
36 #define VIRTIO_PCI_NO_LEGACY
37 #include "virtio_pci.h"
38 #include "VirtIO.h"
39 #include "kdebugprint.h"
40 #include "virtio_ring.h"
41 #include "virtio_pci_common.h"
43 #include <stddef.h>
44 
45 #ifdef WPP_EVENT_TRACING
46 #include "VirtIOPCIModern.tmh"
47 #endif
48 
49 static void *vio_modern_map_capability(VirtIODevice *vdev, int cap_offset,
50  size_t minlen, u32 alignment,
51  u32 start, u32 size, size_t *len)
52 {
53  u8 bar;
54  u32 bar_offset, bar_length;
55  void *addr;
56 
57  pci_read_config_byte(vdev, cap_offset + offsetof(struct virtio_pci_cap, bar), &bar);
58  pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, offset), &bar_offset);
59  pci_read_config_dword(vdev, cap_offset + offsetof(struct virtio_pci_cap, length), &bar_length);
60 
61  if (start + minlen > bar_length) {
62  DPrintf(0, "bar %i cap is not large enough to map %zu bytes at offset %u\n", bar, minlen, start);
63  return NULL;
64  }
65 
66  bar_length -= start;
67  bar_offset += start;
68 
69  if (bar_offset & (alignment - 1)) {
70  DPrintf(0, "bar %i offset %u not aligned to %u\n", bar, bar_offset, alignment);
71  return NULL;
72  }
73 
74  if (bar_length > size) {
75  bar_length = size;
76  }
77 
78  if (len) {
79  *len = bar_length;
80  }
81 
82  if (bar_offset + minlen > pci_get_resource_len(vdev, bar)) {
83  DPrintf(0, "bar %i is not large enough to map %zu bytes at offset %u\n", bar, minlen, bar_offset);
84  return NULL;
85  }
86 
87  addr = pci_map_address_range(vdev, bar, bar_offset, bar_length);
88  if (!addr) {
89  DPrintf(0, "unable to map %u bytes at bar %i offset %u\n", bar_length, bar, bar_offset);
90  }
91  return addr;
92 }
93 
94 static void *vio_modern_map_simple_capability(VirtIODevice *vdev, int cap_offset, size_t length, u32 alignment)
95 {
97  vdev,
98  cap_offset,
99  length, // minlen
100  alignment,
101  0, // offset
102  (u32)length, // size is equal to minlen
103  NULL); // not interested in the full length
104 }
105 
106 static void vio_modern_get_config(VirtIODevice *vdev, unsigned offset,
107  void *buf, unsigned len)
108 {
109  if (!vdev->config) {
110  ASSERT(!"Device has no config to read");
111  return;
112  }
113  if (offset + len > vdev->config_len) {
114  ASSERT(!"Can't read beyond the config length");
115  return;
116  }
117 
118  switch (len) {
119  case 1:
120  *(u8 *)buf = ioread8(vdev, vdev->config + offset);
121  break;
122  case 2:
123  *(u16 *)buf = ioread16(vdev, vdev->config + offset);
124  break;
125  case 4:
126  *(u32 *)buf = ioread32(vdev, vdev->config + offset);
127  break;
128  default:
129  ASSERT(!"Only 1, 2, 4 byte config reads are supported");
130  }
131 }
132 
133 static void vio_modern_set_config(VirtIODevice *vdev, unsigned offset,
134  const void *buf, unsigned len)
135 {
136  if (!vdev->config) {
137  ASSERT(!"Device has no config to write");
138  return;
139  }
140  if (offset + len > vdev->config_len) {
141  ASSERT(!"Can't write beyond the config length");
142  return;
143  }
144 
145  switch (len) {
146  case 1:
147  iowrite8(vdev, *(u8 *)buf, vdev->config + offset);
148  break;
149  case 2:
150  iowrite16(vdev, *(u16 *)buf, vdev->config + offset);
151  break;
152  case 4:
153  iowrite32(vdev, *(u32 *)buf, vdev->config + offset);
154  break;
155  default:
156  ASSERT(!"Only 1, 2, 4 byte config writes are supported");
157  }
158 }
159 
161 {
162  return ioread8(vdev, &vdev->common->config_generation);
163 }
164 
166 {
167  return ioread8(vdev, &vdev->common->device_status);
168 }
169 
171 {
172  /* We should never be setting status to 0. */
173  ASSERT(status != 0);
174  iowrite8(vdev, status, &vdev->common->device_status);
175 }
176 
177 static void vio_modern_reset(VirtIODevice *vdev)
178 {
179  /* 0 status means a reset. */
180  iowrite8(vdev, 0, &vdev->common->device_status);
181  /* After writing 0 to device_status, the driver MUST wait for a read of
182  * device_status to return 0 before reinitializing the device.
183  * This will flush out the status write, and flush in device writes,
184  * including MSI-X interrupts, if any.
185  */
186  while (ioread8(vdev, &vdev->common->device_status)) {
187  vdev_sleep(vdev, 1);
188  }
189 }
190 
192 {
193  u64 features;
194 
195  iowrite32(vdev, 0, &vdev->common->device_feature_select);
196  features = ioread32(vdev, &vdev->common->device_feature);
197  iowrite32(vdev, 1, &vdev->common->device_feature_select);
198  features |= ((u64)ioread32(vdev, &vdev->common->device_feature) << 32);
199 
200  return features;
201 }
202 
204 {
205  /* Give virtio_ring a chance to accept features. */
206  vring_transport_features(vdev, &features);
207 
209  DPrintf(0, "virtio: device uses modern interface but does not have VIRTIO_F_VERSION_1\n", 0);
211  }
212 
213  iowrite32(vdev, 0, &vdev->common->guest_feature_select);
214  iowrite32(vdev, (u32)features, &vdev->common->guest_feature);
215  iowrite32(vdev, 1, &vdev->common->guest_feature_select);
216  iowrite32(vdev, features >> 32, &vdev->common->guest_feature);
217 
218  return STATUS_SUCCESS;
219 }
220 
222 {
223  /* Setup the vector used for configuration events */
224  iowrite16(vdev, vector, &vdev->common->msix_config);
225  /* Verify we had enough resources to assign the vector */
226  /* Will also flush the write out to device */
227  return ioread16(vdev, &vdev->common->msix_config);
228 }
229 
231 {
232  VirtIODevice *vdev = vq->vdev;
233  volatile struct virtio_pci_common_cfg *cfg = vdev->common;
234 
235  iowrite16(vdev, (u16)vq->index, &cfg->queue_select);
236  iowrite16(vdev, vector, &cfg->queue_msix_vector);
237  return ioread16(vdev, &cfg->queue_msix_vector);
238 }
239 
240 static size_t vring_pci_size(u16 num, bool packed)
241 {
242  /* We only need a cacheline separation. */
244 }
245 
247  unsigned index,
248  unsigned short *pNumEntries,
249  unsigned long *pRingSize,
250  unsigned long *pHeapSize)
251 {
252  volatile struct virtio_pci_common_cfg *cfg = vdev->common;
253  u16 num;
254 
255  if (index >= ioread16(vdev, &cfg->num_queues)) {
256  return STATUS_NOT_FOUND;
257  }
258 
259  /* Select the queue we're interested in */
260  iowrite16(vdev, (u16)index, &cfg->queue_select);
261 
262  /* Check if queue is either not available or already active. */
263  num = ioread16(vdev, &cfg->queue_size);
264  /* QEMU has a bug where queues don't revert to inactive on device
265  * reset. Skip checking the queue_enable field until it is fixed.
266  */
267  if (!num /*|| ioread16(vdev, &cfg->queue_enable)*/) {
268  return STATUS_NOT_FOUND;
269  }
270 
271  if (num & (num - 1)) {
272  DPrintf(0, "%p: bad queue size %u", vdev, num);
274  }
275 
276  *pNumEntries = num;
277  *pRingSize = (unsigned long)vring_pci_size(num, vdev->packed_ring);
278  *pHeapSize = vring_control_block_size(num, vdev->packed_ring);
279 
280  return STATUS_SUCCESS;
281 }
282 
284  VirtIODevice *vdev,
286  unsigned index,
287  u16 msix_vec)
288 {
289  volatile struct virtio_pci_common_cfg *cfg = vdev->common;
290  struct virtqueue *vq;
291  void *vq_addr;
292  u16 off;
293  unsigned long ring_size, heap_size;
295 
296  /* select the queue and query allocation parameters */
297  status = vio_modern_query_vq_alloc(vdev, index, &info->num, &ring_size, &heap_size);
298  if (!NT_SUCCESS(status)) {
299  return status;
300  }
301 
302  /* get offset of notification word for this vq */
303  off = ioread16(vdev, &cfg->queue_notify_off);
304 
305  /* try to allocate contiguous pages, scale down on failure */
306  while (!(info->queue = mem_alloc_contiguous_pages(vdev, vring_pci_size(info->num, vdev->packed_ring)))) {
307  if (info->num > 0) {
308  info->num /= 2;
309  } else {
311  }
312  }
313 
314  vq_addr = mem_alloc_nonpaged_block(vdev, heap_size);
315  if (vq_addr == NULL) {
317  }
318 
319  /* create the vring */
320  if (vdev->packed_ring) {
323  info->queue, vp_notify, vq_addr);
324  } else {
327  info->queue, vp_notify, vq_addr);
328  }
329 
330  if (!vq) {
332  goto err_new_queue;
333  }
334 
335  /* activate the queue */
336  iowrite16(vdev, info->num, &cfg->queue_size);
338  &cfg->queue_desc_lo, &cfg->queue_desc_hi);
340  &cfg->queue_avail_lo, &cfg->queue_avail_hi);
342  &cfg->queue_used_lo, &cfg->queue_used_hi);
343 
344  if (vdev->notify_base) {
345  /* offset should not wrap */
347  > vdev->notify_len) {
348  DPrintf(0,
349  "%p: bad notification offset %u (x %u) "
350  "for queue %u > %zd",
351  vdev,
353  index, vdev->notify_len);
355  goto err_map_notify;
356  }
357  vq->notification_addr = (void *)(vdev->notify_base +
359  } else {
361  vdev->notify_map_cap, 2, 2,
363  NULL);
364  }
365 
366  if (!vq->notification_addr) {
368  goto err_map_notify;
369  }
370 
371  if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
372  msix_vec = vdev->device->set_queue_vector(vq, msix_vec);
373  if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
375  goto err_assign_vector;
376  }
377  }
378 
379  /* enable the queue */
380  iowrite16(vdev, 1, &vdev->common->queue_enable);
381 
382  *queue = vq;
383  return STATUS_SUCCESS;
384 
385 err_assign_vector:
386 err_map_notify:
387  virtqueue_shutdown(vq);
388 err_new_queue:
389  mem_free_nonpaged_block(vdev, vq_addr);
391  return status;
392 }
393 
395 {
396  struct virtqueue *vq = info->vq;
397  VirtIODevice *vdev = vq->vdev;
398 
399  iowrite16(vdev, (u16)vq->index, &vdev->common->queue_select);
400 
401  if (vdev->msix_used) {
402  iowrite16(vdev, VIRTIO_MSI_NO_VECTOR, &vdev->common->queue_msix_vector);
403  /* Flush the write out to device */
404  ioread16(vdev, &vdev->common->queue_msix_vector);
405  }
406 
407  virtqueue_shutdown(vq);
408 
411 }
412 
414  /* .get_config = */ vio_modern_get_config,
415  /* .set_config = */ vio_modern_set_config,
416  /* .get_config_generation = */ vio_modern_get_generation,
417  /* .get_status = */ vio_modern_get_status,
418  /* .set_status = */ vio_modern_set_status,
419  /* .reset = */ vio_modern_reset,
420  /* .get_features = */ vio_modern_get_features,
421  /* .set_features = */ vio_modern_set_features,
422  /* .set_config_vector = */ vio_modern_set_config_vector,
423  /* .set_queue_vector = */ vio_modern_set_queue_vector,
424  /* .query_queue_alloc = */ vio_modern_query_vq_alloc,
425  /* .setup_queue = */ vio_modern_setup_vq,
426  /* .delete_queue = */ vio_modern_del_vq,
427 };
428 
430 {
431  u8 id = 0;
432  int iterations = 48;
433 
434  if (pci_read_config_byte(vdev, offset, &offset) != 0) {
435  return 0;
436  }
437 
438  while (iterations-- && offset >= 0x40) {
439  offset &= ~3;
441  CapabilityID), &id) != 0) {
442  break;
443  }
444  if (id == 0xFF) {
445  break;
446  }
448  return offset;
449  }
451  Next), &offset) != 0) {
452  break;
453  }
454  }
455  return 0;
456 }
457 
459 {
460  u8 hdr_type, offset;
461  u16 status;
462 
463  if (pci_read_config_byte(vdev, offsetof(PCI_COMMON_HEADER, HeaderType), &hdr_type) != 0) {
464  return 0;
465  }
467  return 0;
468  }
469  if ((status & PCI_STATUS_CAPABILITIES_LIST) == 0) {
470  return 0;
471  }
472 
473  switch (hdr_type & ~PCI_MULTIFUNCTION) {
474  case PCI_BRIDGE_TYPE:
475  offset = offsetof(PCI_COMMON_HEADER, u.type1.CapabilitiesPtr);
476  break;
478  offset = offsetof(PCI_COMMON_HEADER, u.type2.CapabilitiesPtr);
479  break;
480  default:
481  offset = offsetof(PCI_COMMON_HEADER, u.type0.CapabilitiesPtr);
482  break;
483  }
484 
485  if (offset != 0) {
487  }
488  return offset;
489 }
490 
491 /* Populate Offsets with virtio vendor capability offsets within the PCI config space */
492 static void find_pci_vendor_capabilities(VirtIODevice *vdev, int *Offsets, size_t nOffsets)
493 {
495  while (offset > 0) {
496  u8 cfg_type, bar;
497  pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, cfg_type), &cfg_type);
498  pci_read_config_byte(vdev, offset + offsetof(struct virtio_pci_cap, bar), &bar);
499 
500  if (bar < PCI_TYPE0_ADDRESSES &&
501  cfg_type < nOffsets &&
502  pci_get_resource_len(vdev, bar) > 0) {
503  Offsets[cfg_type] = offset;
504  }
505 
507  }
508 }
509 
510 /* Modern device initialization */
512 {
513  int capabilities[VIRTIO_PCI_CAP_PCI_CFG];
514 
515  u32 notify_length;
516  u32 notify_offset;
517 
518  RtlZeroMemory(capabilities, sizeof(capabilities));
520 
521  /* Check for a common config, if not found use legacy mode */
522  if (!capabilities[VIRTIO_PCI_CAP_COMMON_CFG]) {
523  DPrintf(0, "%s(%p): device not found\n", __FUNCTION__, vdev);
525  }
526 
527  /* Check isr and notify caps, if not found fail */
528  if (!capabilities[VIRTIO_PCI_CAP_ISR_CFG] || !capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]) {
529  DPrintf(0, "%s(%p): missing capabilities %i/%i/%i\n",
530  __FUNCTION__, vdev,
531  capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
532  capabilities[VIRTIO_PCI_CAP_ISR_CFG],
533  capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG]);
535  }
536 
537  /* Map bars according to the capabilities */
539  capabilities[VIRTIO_PCI_CAP_COMMON_CFG],
540  sizeof(struct virtio_pci_common_cfg), 4);
541  if (!vdev->common) {
543  }
544 
546  capabilities[VIRTIO_PCI_CAP_ISR_CFG],
547  sizeof(u8), 1);
548  if (!vdev->isr) {
550  }
551 
552  /* Read notify_off_multiplier from config space. */
555  notify_off_multiplier),
556  &vdev->notify_offset_multiplier);
557 
558  /* Read notify length and offset from config space. */
561  cap.length),
562  &notify_length);
565  cap.offset),
566  &notify_offset);
567 
568  /* Map the notify capability if it's small enough.
569  * Otherwise, map each VQ individually later.
570  */
571  if (notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
573  capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG], 2, 2,
574  0, notify_length,
575  &vdev->notify_len);
576  if (!vdev->notify_base) {
578  }
579  } else {
580  vdev->notify_map_cap = capabilities[VIRTIO_PCI_CAP_NOTIFY_CFG];
581  }
582 
583  /* Map the device config capability, the PAGE_SIZE size is a guess */
584  if (capabilities[VIRTIO_PCI_CAP_DEVICE_CFG]) {
585  vdev->config = vio_modern_map_capability(vdev,
586  capabilities[VIRTIO_PCI_CAP_DEVICE_CFG], 0, 4,
587  0, PAGE_SIZE,
588  &vdev->config_len);
589  if (!vdev->config) {
591  }
592  }
593 
594  vdev->device = &virtio_pci_device_ops;
595 
596  return STATUS_SUCCESS;
597 }
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble * u
Definition: glfuncs.h:240
void vp_notify(struct virtqueue *vq)
#define PCI_TYPE0_ADDRESSES
Definition: iotypes.h:3479
#define PCI_STATUS_CAPABILITIES_LIST
Definition: iotypes.h:3609
#define pci_read_config_dword(vdev, where, dwVal)
#define pci_read_config_byte(vdev, where, bVal)
struct virtqueue * vring_new_virtqueue_packed(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
bool packed_ring
Definition: virtio_pci.h:248
#define STATUS_INSUFFICIENT_RESOURCES
Definition: udferr_usr.h:158
#define VIRTIO_PCI_CAP_NOTIFY_CFG
Definition: virtio_pci.h:105
GLuint GLenum GLsizei GLsizei GLint GLint GLboolean packed
Definition: glext.h:9271
#define pci_read_config_word(vdev, where, wVal)
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition: glext.h:7751
static NTSTATUS vio_modern_setup_vq(struct virtqueue **queue, VirtIODevice *vdev, VirtIOQueueInfo *info, unsigned index, u16 msix_vec)
#define STATUS_INVALID_PARAMETER
Definition: udferr_usr.h:135
#define PCI_MULTIFUNCTION
Definition: iotypes.h:3583
static u8 find_first_pci_vendor_capability(VirtIODevice *vdev)
#define u64
Definition: types.h:10
#define iowrite32(vdev, val, addr)
#define ioread8(vdev, addr)
unsigned int vring_control_block_size(u16 qsize, bool packed)
Definition: VirtIORing.c:474
LONG NTSTATUS
Definition: precomp.h:26
GLintptr offset
Definition: glext.h:5920
void * notification_addr
Definition: VirtIO.h:47
#define VIRTIO_PCI_CAP_COMMON_CFG
Definition: virtio_pci.h:103
#define pci_map_address_range(vdev, bar, offset, maxlen)
void * used_va
Definition: VirtIO.h:49
static size_t vring_pci_size(u16 num, bool packed)
#define SMP_CACHE_BYTES
Definition: osdep.h:39
volatile u8 * isr
Definition: virtio_pci.h:260
#define iowrite8(vdev, val, addr)
unsigned int index
Definition: VirtIO.h:45
void vring_transport_features(VirtIODevice *vdev, u64 *features)
Definition: VirtIORing.c:534
ULONG32 u32
Definition: btrfs.h:14
int notify_map_cap
Definition: virtio_pci.h:266
#define pci_get_resource_len(vdev, bar)
static void vio_modern_set_config(VirtIODevice *vdev, unsigned offset, const void *buf, unsigned len)
static u32 vio_modern_get_generation(VirtIODevice *vdev)
#define mem_alloc_nonpaged_block(vdev, size)
u32 notify_offset_multiplier
Definition: virtio_pci.h:267
static u64 vio_modern_get_features(VirtIODevice *vdev)
GLenum cap
Definition: glext.h:9639
static void vio_modern_set_status(VirtIODevice *vdev, u8 status)
volatile unsigned char * config
Definition: virtio_pci.h:264
static void vio_modern_del_vq(VirtIOQueueInfo *info)
const struct virtio_device_ops * device
Definition: virtio_pci.h:251
volatile unsigned char * notify_base
Definition: virtio_pci.h:265
static u16 vio_modern_set_config_vector(VirtIODevice *vdev, u16 vector)
size_t notify_len
Definition: virtio_pci.h:270
#define virtio_is_feature_enabled(FeaturesList, Feature)
Definition: virtio_pci.h:311
#define vdev_sleep(vdev, msecs)
#define DPrintf(Level, Fmt)
Definition: kdebugprint.h:61
Definition: _queue.h:59
smooth NULL
Definition: ftsmooth.c:416
#define offsetof(TYPE, MEMBER)
static void * vio_modern_map_capability(VirtIODevice *vdev, int cap_offset, size_t minlen, u32 alignment, u32 start, u32 size, size_t *len)
GLuint index
Definition: glext.h:6031
#define VIRTIO_PCI_CAP_DEVICE_CFG
Definition: virtio_pci.h:109
static NTSTATUS vio_modern_set_features(VirtIODevice *vdev, u64 features)
static void * vio_modern_map_simple_capability(VirtIODevice *vdev, int cap_offset, size_t length, u32 alignment)
#define STATUS_DEVICE_NOT_CONNECTED
Definition: udferr_usr.h:160
#define iowrite64_twopart(vdev, val, lo_addr, hi_addr)
#define STATUS_NOT_FOUND
Definition: shellext.h:72
GLsizeiptr size
Definition: glext.h:5919
#define NT_SUCCESS(StatCode)
Definition: apphelp.c:32
void * avail_va
Definition: VirtIO.h:48
GLenum GLuint GLenum GLsizei length
Definition: glext.h:5579
#define mem_free_nonpaged_block(vdev, addr)
NTSTATUS vio_modern_initialize(VirtIODevice *vdev)
volatile struct virtio_pci_common_cfg * common
Definition: virtio_pci.h:263
GLuint GLuint num
Definition: glext.h:9618
static void vio_modern_get_config(VirtIODevice *vdev, unsigned offset, void *buf, unsigned len)
UCHAR u8
Definition: btrfs.h:12
static void vio_modern_reset(VirtIODevice *vdev)
ASSERT((InvokeOnSuccess||InvokeOnError||InvokeOnCancel) ?(CompletionRoutine !=NULL) :TRUE)
ULONG64 u64
Definition: btrfs.h:15
Status
Definition: gdiplustypes.h:24
GLenum const GLvoid * addr
Definition: glext.h:9621
static void virtqueue_shutdown(struct virtqueue *vq)
Definition: VirtIO.h:120
#define VIRTIO_PCI_CAP_PCI_CFG
Definition: virtio_pci.h:111
GLenum GLsizei len
Definition: glext.h:6722
#define PAGE_SIZE
Definition: env_spec_w32.h:49
static const struct virtio_device_ops virtio_pci_device_ops
static NTSTATUS vio_modern_query_vq_alloc(VirtIODevice *vdev, unsigned index, unsigned short *pNumEntries, unsigned long *pRingSize, unsigned long *pHeapSize)
static void find_pci_vendor_capabilities(VirtIODevice *vdev, int *Offsets, size_t nOffsets)
#define ROUND_TO_PAGES(Size)
GLuint start
Definition: gl.h:1545
#define long
Definition: qsort.c:33
#define STATUS_DEVICE_BUSY
Definition: udferr_usr.h:129
#define VIRTIO_MSI_NO_VECTOR
Definition: virtio_pci.h:98
size_t config_len
Definition: virtio_pci.h:269
static u8 find_next_pci_vendor_capability(VirtIODevice *vdev, u8 offset)
unsigned long vring_size(unsigned int num, unsigned long align, bool packed)
Definition: VirtIORing.c:555
static u8 vio_modern_get_status(VirtIODevice *vdev)
#define mem_alloc_contiguous_pages(vdev, size)
VirtIODevice * vdev
Definition: VirtIO.h:44
#define mem_free_contiguous_pages(vdev, virt)
#define ioread32(vdev, addr)
#define iowrite16(vdev, val, addr)
#define RtlZeroMemory(Destination, Length)
Definition: typedefs.h:262
#define ioread16(vdev, addr)
#define VIRTIO_F_VERSION_1
Definition: virtio_config.h:63
return STATUS_SUCCESS
Definition: btrfs.c:3014
USHORT u16
Definition: btrfs.h:13
#define __FUNCTION__
Definition: types.h:112
#define VIRTIO_PCI_CAP_ISR_CFG
Definition: virtio_pci.h:107
static SERVICE_STATUS status
Definition: service.c:31
#define mem_get_physical_address(vdev, virt)
#define PCI_CARDBUS_BRIDGE_TYPE
Definition: iotypes.h:3586
struct virtqueue * vring_new_virtqueue_split(unsigned int index, unsigned int num, unsigned int vring_align, VirtIODevice *vdev, void *pages, void(*notify)(struct virtqueue *), void *control)
Definition: VirtIORing.c:486
static u16 vio_modern_set_queue_vector(struct virtqueue *vq, u16 vector)
off
Definition: i386-dis.c:3909
Definition: ps.c:97
#define PCI_BRIDGE_TYPE
Definition: iotypes.h:3585
#define PCI_CAPABILITY_ID_VENDOR_SPECIFIC
Definition: iotypes.h:3634