ReactOS 0.4.15-dev-6068-g8061a6f
context.c
Go to the documentation of this file.
1/*
2 * Context and render target management in wined3d
3 *
4 * Copyright 2002-2004 Jason Edmeades
5 * Copyright 2002-2004 Raphael Junqueira
6 * Copyright 2004 Christian Costa
7 * Copyright 2005 Oliver Stieber
8 * Copyright 2006, 2008 Henri Verbeet
9 * Copyright 2007-2011, 2013 Stefan Dösinger for CodeWeavers
10 * Copyright 2009-2011 Henri Verbeet for CodeWeavers
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
25 */
26
27#include "config.h"
28#include "wine/port.h"
29
30#include <stdio.h>
31#ifdef HAVE_FLOAT_H
32# include <float.h>
33#endif
34
35#include "wined3d_private.h"
36
37#ifdef __REACTOS__
38#include <reactos/undocuser.h>
39#endif
40
44
45#define WINED3D_MAX_FBO_ENTRIES 64
46#define WINED3D_ALL_LAYERS (~0u)
47
49
50/* FBO helper functions */
51
52/* Context activation is done by the caller. */
54{
55 const struct wined3d_gl_info *gl_info = context->gl_info;
56
57 switch (target)
58 {
60 if (context->fbo_read_binding == fbo) return;
61 context->fbo_read_binding = fbo;
62 break;
63
65 if (context->fbo_draw_binding == fbo) return;
66 context->fbo_draw_binding = fbo;
67 break;
68
69 case GL_FRAMEBUFFER:
70 if (context->fbo_read_binding == fbo
71 && context->fbo_draw_binding == fbo) return;
72 context->fbo_read_binding = fbo;
73 context->fbo_draw_binding = fbo;
74 break;
75
76 default:
77 FIXME("Unhandled target %#x.\n", target);
78 break;
79 }
80
81 gl_info->fbo_ops.glBindFramebuffer(target, fbo);
82 checkGLcall("glBindFramebuffer()");
83}
84
85/* Context activation is done by the caller. */
87{
88 unsigned int i;
89
90 for (i = 0; i < gl_info->limits.buffers; ++i)
91 {
92 gl_info->fbo_ops.glFramebufferTexture2D(target, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, 0, 0);
93 checkGLcall("glFramebufferTexture2D()");
94 }
95 gl_info->fbo_ops.glFramebufferTexture2D(target, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
96 checkGLcall("glFramebufferTexture2D()");
97
98 gl_info->fbo_ops.glFramebufferTexture2D(target, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
99 checkGLcall("glFramebufferTexture2D()");
100}
101
102/* Context activation is done by the caller. */
104{
105 const struct wined3d_gl_info *gl_info = context->gl_info;
106
110
111 gl_info->fbo_ops.glDeleteFramebuffers(1, &fbo);
112 checkGLcall("glDeleteFramebuffers()");
113}
114
115static void context_attach_depth_stencil_rb(const struct wined3d_gl_info *gl_info,
116 GLenum fbo_target, DWORD flags, GLuint rb)
117{
119 {
120 gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, rb);
121 checkGLcall("glFramebufferRenderbuffer()");
122 }
123
125 {
126 gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rb);
127 checkGLcall("glFramebufferRenderbuffer()");
128 }
129}
130
132 GLenum fbo_target, GLenum attachment, const struct wined3d_fbo_resource *resource)
133{
134 const struct wined3d_gl_info *gl_info = context->gl_info;
135
136 if (!resource)
137 {
138 gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, attachment, GL_TEXTURE_2D, 0, 0);
139 }
140 else if (resource->layer == WINED3D_ALL_LAYERS)
141 {
142 if (!gl_info->fbo_ops.glFramebufferTexture)
143 {
144 FIXME("OpenGL implementation doesn't support glFramebufferTexture().\n");
145 return;
146 }
147
148 gl_info->fbo_ops.glFramebufferTexture(fbo_target, attachment,
149 resource->object, resource->level);
150 }
151 else if (resource->target == GL_TEXTURE_1D_ARRAY || resource->target == GL_TEXTURE_2D_ARRAY ||
152 resource->target == GL_TEXTURE_3D)
153 {
154 if (!gl_info->fbo_ops.glFramebufferTextureLayer)
155 {
156 FIXME("OpenGL implementation doesn't support glFramebufferTextureLayer().\n");
157 return;
158 }
159
160 gl_info->fbo_ops.glFramebufferTextureLayer(fbo_target, attachment,
161 resource->object, resource->level, resource->layer);
162 }
163 else if (resource->target == GL_TEXTURE_1D)
164 {
165 gl_info->fbo_ops.glFramebufferTexture1D(fbo_target, attachment,
166 resource->target, resource->object, resource->level);
167 checkGLcall("glFramebufferTexture1D()");
168 }
169 else
170 {
171 gl_info->fbo_ops.glFramebufferTexture2D(fbo_target, attachment,
172 resource->target, resource->object, resource->level);
173 }
174 checkGLcall("attach texture to fbo");
175}
176
177/* Context activation is done by the caller. */
179 GLenum fbo_target, const struct wined3d_fbo_resource *resource, BOOL rb_namespace,
180 DWORD flags)
181{
182 const struct wined3d_gl_info *gl_info = context->gl_info;
183
184 if (resource->object)
185 {
186 TRACE("Attach depth stencil %u.\n", resource->object);
187
188 if (rb_namespace)
189 {
190 context_attach_depth_stencil_rb(gl_info, fbo_target,
191 flags, resource->object);
192 }
193 else
194 {
197
200 }
201
204
207 }
208 else
209 {
210 TRACE("Attach depth stencil 0.\n");
211
214 }
215}
216
217/* Context activation is done by the caller. */
219 GLenum fbo_target, DWORD idx, const struct wined3d_fbo_resource *resource, BOOL rb_namespace)
220{
221 const struct wined3d_gl_info *gl_info = context->gl_info;
222
223 TRACE("Attach GL object %u to %u.\n", resource->object, idx);
224
225 if (resource->object)
226 {
227 if (rb_namespace)
228 {
229 gl_info->fbo_ops.glFramebufferRenderbuffer(fbo_target, GL_COLOR_ATTACHMENT0 + idx,
230 GL_RENDERBUFFER, resource->object);
231 checkGLcall("glFramebufferRenderbuffer()");
232 }
233 else
234 {
236 }
237 }
238 else
239 {
241 }
242}
243
244static void context_dump_fbo_attachment(const struct wined3d_gl_info *gl_info, GLenum target,
246{
247 static const struct
248 {
250 GLenum binding;
251 const char *str;
252 enum wined3d_gl_extension extension;
253 }
254 texture_type[] =
255 {
262 };
263
264 GLint type, name, samples, width, height, old_texture, level, face, fmt, tex_target;
265 const char *tex_type_str;
266 unsigned int i;
267
268 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
270 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
272
273 if (type == GL_RENDERBUFFER)
274 {
275 gl_info->fbo_ops.glBindRenderbuffer(GL_RENDERBUFFER, name);
276 gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &width);
277 gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &height);
278 if (gl_info->limits.samples > 1)
279 gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_SAMPLES, &samples);
280 else
281 samples = 1;
282 gl_info->fbo_ops.glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_INTERNAL_FORMAT, &fmt);
283 FIXME(" %s: renderbuffer %d, %dx%d, %d samples, format %#x.\n",
285 }
286 else if (type == GL_TEXTURE)
287 {
288 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
290 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv(target, attachment,
292
293 if (gl_info->gl_ops.ext.p_glGetTextureParameteriv)
294 {
295 GL_EXTCALL(glGetTextureParameteriv(name, GL_TEXTURE_TARGET, &tex_target));
296
297 for (i = 0; i < ARRAY_SIZE(texture_type); ++i)
298 {
299 if (texture_type[i].target == tex_target)
300 {
301 tex_type_str = texture_type[i].str;
302 break;
303 }
304 }
305 if (i == ARRAY_SIZE(texture_type))
306 tex_type_str = wine_dbg_sprintf("%#x", tex_target);
307 }
308 else if (face)
309 {
310 gl_info->gl_ops.gl.p_glGetIntegerv(GL_TEXTURE_BINDING_CUBE_MAP, &old_texture);
311 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, name);
312
313 tex_target = GL_TEXTURE_CUBE_MAP;
314 tex_type_str = "cube";
315 }
316 else
317 {
318 tex_type_str = NULL;
319
320 for (i = 0; i < ARRAY_SIZE(texture_type); ++i)
321 {
322 if (!gl_info->supported[texture_type[i].extension])
323 continue;
324
325 gl_info->gl_ops.gl.p_glGetIntegerv(texture_type[i].binding, &old_texture);
326 while (gl_info->gl_ops.gl.p_glGetError());
327
328 gl_info->gl_ops.gl.p_glBindTexture(texture_type[i].target, name);
329 if (!gl_info->gl_ops.gl.p_glGetError())
330 {
331 tex_target = texture_type[i].target;
332 tex_type_str = texture_type[i].str;
333 break;
334 }
335 gl_info->gl_ops.gl.p_glBindTexture(texture_type[i].target, old_texture);
336 }
337
338 if (!tex_type_str)
339 {
340 FIXME("Cannot find type of texture %d.\n", name);
341 return;
342 }
343 }
344
345 if (gl_info->gl_ops.ext.p_glGetTextureParameteriv)
346 {
347 GL_EXTCALL(glGetTextureLevelParameteriv(name, level, GL_TEXTURE_INTERNAL_FORMAT, &fmt));
348 GL_EXTCALL(glGetTextureLevelParameteriv(name, level, GL_TEXTURE_WIDTH, &width));
349 GL_EXTCALL(glGetTextureLevelParameteriv(name, level, GL_TEXTURE_HEIGHT, &height));
350 GL_EXTCALL(glGetTextureLevelParameteriv(name, level, GL_TEXTURE_SAMPLES, &samples));
351 }
352 else
353 {
354 gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_INTERNAL_FORMAT, &fmt);
355 gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_WIDTH, &width);
356 gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_HEIGHT, &height);
358 gl_info->gl_ops.gl.p_glGetTexLevelParameteriv(tex_target, level, GL_TEXTURE_SAMPLES, &samples);
359 else
360 samples = 1;
361
362 gl_info->gl_ops.gl.p_glBindTexture(tex_target, old_texture);
363 }
364
365 FIXME(" %s: %s texture %d, %dx%d, %d samples, format %#x.\n",
367 }
368 else if (type == GL_NONE)
369 {
370 FIXME(" %s: NONE.\n", debug_fboattachment(attachment));
371 }
372 else
373 {
374 ERR(" %s: Unknown attachment %#x.\n", debug_fboattachment(attachment), type);
375 }
376
377 checkGLcall("dump FBO attachment");
378}
379
380/* Context activation is done by the caller. */
382{
383 const struct wined3d_gl_info *gl_info = context->gl_info;
385
386 if (!FIXME_ON(d3d))
387 return;
388
389 status = gl_info->fbo_ops.glCheckFramebufferStatus(target);
391 {
392 TRACE("FBO complete.\n");
393 }
394 else
395 {
396 unsigned int i;
397
398 FIXME("FBO status %s (%#x).\n", debug_fbostatus(status), status);
399
400 if (!context->current_fbo)
401 {
402 ERR("FBO 0 is incomplete, driver bug?\n");
403 return;
404 }
405
408
409 for (i = 0; i < gl_info->limits.buffers; ++i)
411 }
412}
413
415{
416 /* Should take care of all the GL_FRONT/GL_BACK/GL_AUXi/GL_NONE... cases */
417 return buffer ? (1u << 31) | buffer : 0;
418}
419
421{
423 {
424 FIXME("Not implemented for %s resources.\n", debug_d3dresourcetype(resource->type));
425 return 0;
426 }
427
429}
430
432 struct wined3d_fbo_entry_key *key, unsigned int idx, const struct wined3d_rendertarget_info *render_target,
434{
435 unsigned int sub_resource_idx = render_target->sub_resource_idx;
436 struct wined3d_resource *resource = render_target->resource;
437 struct wined3d_texture *texture;
438
439 if (!resource || resource->format->id == WINED3DFMT_NULL || resource->type == WINED3D_RTYPE_BUFFER)
440 {
441 if (resource && resource->type == WINED3D_RTYPE_BUFFER)
442 FIXME("Not implemented for %s resources.\n", debug_d3dresourcetype(resource->type));
443 key->objects[idx].object = 0;
444 key->objects[idx].target = 0;
445 key->objects[idx].level = key->objects[idx].layer = 0;
446 return;
447 }
448
449 if (render_target->gl_view.name)
450 {
451 key->objects[idx].object = render_target->gl_view.name;
452 key->objects[idx].target = render_target->gl_view.target;
453 key->objects[idx].level = 0;
454 key->objects[idx].layer = WINED3D_ALL_LAYERS;
455 return;
456 }
457
460 {
461 struct wined3d_surface *surface = texture->sub_resources[sub_resource_idx].u.surface;
462
463 if (surface->current_renderbuffer)
464 {
465 key->objects[idx].object = surface->current_renderbuffer->id;
466 key->objects[idx].target = 0;
467 key->objects[idx].level = key->objects[idx].layer = 0;
468 key->rb_namespace |= 1 << idx;
469 return;
470 }
471 }
472 key->objects[idx].target = wined3d_texture_get_sub_resource_target(texture, sub_resource_idx);
473 key->objects[idx].level = sub_resource_idx % texture->level_count;
474 key->objects[idx].layer = sub_resource_idx / texture->level_count;
475
476 if (render_target->layer_count != 1)
477 key->objects[idx].layer = WINED3D_ALL_LAYERS;
478
479 switch (location)
480 {
483 break;
484
487 break;
488
490 key->objects[idx].object = texture->rb_multisample;
491 key->objects[idx].target = 0;
492 key->objects[idx].level = key->objects[idx].layer = 0;
493 key->rb_namespace |= 1 << idx;
494 break;
495
497 key->objects[idx].object = texture->rb_resolved;
498 key->objects[idx].target = 0;
499 key->objects[idx].level = key->objects[idx].layer = 0;
500 key->rb_namespace |= 1 << idx;
501 break;
502 }
503}
504
506 struct wined3d_fbo_entry_key *key, const struct wined3d_rendertarget_info *render_targets,
507 const struct wined3d_rendertarget_info *depth_stencil, DWORD color_location, DWORD ds_location)
508{
509 unsigned int buffers = context->gl_info->limits.buffers;
510 unsigned int i;
511
512 key->rb_namespace = 0;
513 context_set_fbo_key_for_render_target(context, key, 0, depth_stencil, ds_location);
514
515 for (i = 0; i < buffers; ++i)
516 context_set_fbo_key_for_render_target(context, key, i + 1, &render_targets[i], color_location);
517
518 memset(&key->objects[buffers + 1], 0, (ARRAY_SIZE(key->objects) - buffers - 1) * sizeof(*key->objects));
519}
520
522 const struct wined3d_rendertarget_info *render_targets, const struct wined3d_rendertarget_info *depth_stencil,
523 DWORD color_location, DWORD ds_location)
524{
525 const struct wined3d_gl_info *gl_info = context->gl_info;
526 struct fbo_entry *entry;
527
528 entry = heap_alloc(sizeof(*entry));
529 context_generate_fbo_key(context, &entry->key, render_targets, depth_stencil, color_location, ds_location);
530 entry->flags = 0;
531 if (depth_stencil->resource)
532 {
533 if (depth_stencil->resource->format_flags & WINED3DFMT_FLAG_DEPTH)
535 if (depth_stencil->resource->format_flags & WINED3DFMT_FLAG_STENCIL)
537 }
539 gl_info->fbo_ops.glGenFramebuffers(1, &entry->id);
540 checkGLcall("glGenFramebuffers()");
541 TRACE("Created FBO %u.\n", entry->id);
542
543 return entry;
544}
545
546/* Context activation is done by the caller. */
548 const struct wined3d_rendertarget_info *render_targets, const struct wined3d_rendertarget_info *depth_stencil,
549 DWORD color_location, DWORD ds_location, struct fbo_entry *entry)
550{
551 const struct wined3d_gl_info *gl_info = context->gl_info;
552
555
556 context_generate_fbo_key(context, &entry->key, render_targets, depth_stencil, color_location, ds_location);
557 entry->flags = 0;
558 if (depth_stencil->resource)
559 {
560 if (depth_stencil->resource->format_flags & WINED3DFMT_FLAG_DEPTH)
562 if (depth_stencil->resource->format_flags & WINED3DFMT_FLAG_STENCIL)
564 }
565}
566
567/* Context activation is done by the caller. */
569{
570 if (entry->id)
571 {
572 TRACE("Destroy FBO %u.\n", entry->id);
574 }
575 --context->fbo_entry_count;
576 list_remove(&entry->entry);
578}
579
580/* Context activation is done by the caller. */
582 const struct wined3d_rendertarget_info *render_targets, const struct wined3d_rendertarget_info *depth_stencil,
583 DWORD color_location, DWORD ds_location)
584{
585 static const struct wined3d_rendertarget_info ds_null = {{0}};
586 const struct wined3d_gl_info *gl_info = context->gl_info;
587 struct wined3d_texture *rt_texture, *ds_texture;
588 struct wined3d_fbo_entry_key fbo_key;
589 unsigned int i, ds_level, rt_level;
590 struct fbo_entry *entry;
591
592 if (depth_stencil->resource && depth_stencil->resource->type != WINED3D_RTYPE_BUFFER
593 && render_targets[0].resource && render_targets[0].resource->type != WINED3D_RTYPE_BUFFER)
594 {
595 rt_texture = wined3d_texture_from_resource(render_targets[0].resource);
596 rt_level = render_targets[0].sub_resource_idx % rt_texture->level_count;
597 ds_texture = wined3d_texture_from_resource(depth_stencil->resource);
598 ds_level = depth_stencil->sub_resource_idx % ds_texture->level_count;
599
600 if (wined3d_texture_get_level_width(ds_texture, ds_level)
601 < wined3d_texture_get_level_width(rt_texture, rt_level)
602 || wined3d_texture_get_level_height(ds_texture, ds_level)
603 < wined3d_texture_get_level_height(rt_texture, rt_level))
604 {
605 WARN("Depth stencil is smaller than the primary color buffer, disabling.\n");
606 depth_stencil = &ds_null;
607 }
608 else if (ds_texture->resource.multisample_type != rt_texture->resource.multisample_type
609 || ds_texture->resource.multisample_quality != rt_texture->resource.multisample_quality)
610 {
611 WARN("Color multisample type %u and quality %u, depth stencil has %u and %u, disabling ds buffer.\n",
612 rt_texture->resource.multisample_type, rt_texture->resource.multisample_quality,
613 ds_texture->resource.multisample_type, ds_texture->resource.multisample_quality);
614 depth_stencil = &ds_null;
615 }
616 else if (depth_stencil->resource->type == WINED3D_RTYPE_TEXTURE_2D)
617 {
618 struct wined3d_surface *surface;
619
620 surface = ds_texture->sub_resources[depth_stencil->sub_resource_idx].u.surface;
621 surface_set_compatible_renderbuffer(surface, &render_targets[0]);
622 }
623 }
624
625 context_generate_fbo_key(context, &fbo_key, render_targets, depth_stencil, color_location, ds_location);
626
627 if (TRACE_ON(d3d))
628 {
630 unsigned int width, height;
631 const char *resource_type;
632
633 TRACE("Dumping FBO attachments:\n");
634 for (i = 0; i < gl_info->limits.buffers; ++i)
635 {
636 if ((resource = render_targets[i].resource))
637 {
638 if (resource->type == WINED3D_RTYPE_BUFFER)
639 {
641 height = 1;
642 resource_type = "buffer";
643 }
644 else
645 {
647 rt_level = render_targets[i].sub_resource_idx % rt_texture->level_count;
648 width = wined3d_texture_get_level_pow2_width(rt_texture, rt_level);
649 height = wined3d_texture_get_level_pow2_height(rt_texture, rt_level);
650 resource_type = "texture";
651 }
652
653 TRACE(" Color attachment %u: %p, %u format %s, %s %u, %ux%u, %u samples.\n",
654 i, resource, render_targets[i].sub_resource_idx, debug_d3dformat(resource->format->id),
655 fbo_key.rb_namespace & (1 << (i + 1)) ? "renderbuffer" : resource_type,
656 fbo_key.objects[i + 1].object, width, height, resource->multisample_type);
657 }
658 }
659 if ((resource = depth_stencil->resource))
660 {
661 if (resource->type == WINED3D_RTYPE_BUFFER)
662 {
664 height = 1;
665 resource_type = "buffer";
666 }
667 else
668 {
670 ds_level = depth_stencil->sub_resource_idx % ds_texture->level_count;
671 width = wined3d_texture_get_level_pow2_width(ds_texture, ds_level);
672 height = wined3d_texture_get_level_pow2_height(ds_texture, ds_level);
673 resource_type = "texture";
674 }
675
676 TRACE(" Depth attachment: %p, %u format %s, %s %u, %ux%u, %u samples.\n",
677 resource, depth_stencil->sub_resource_idx, debug_d3dformat(resource->format->id),
678 fbo_key.rb_namespace & (1 << 0) ? "renderbuffer" : resource_type,
679 fbo_key.objects[0].object, width, height, resource->multisample_type);
680 }
681 }
682
683 LIST_FOR_EACH_ENTRY(entry, &context->fbo_list, struct fbo_entry, entry)
684 {
685 if (memcmp(&fbo_key, &entry->key, sizeof(fbo_key)))
686 continue;
687
688 list_remove(&entry->entry);
689 list_add_head(&context->fbo_list, &entry->entry);
690 return entry;
691 }
692
693 if (context->fbo_entry_count < WINED3D_MAX_FBO_ENTRIES)
694 {
695 entry = context_create_fbo_entry(context, render_targets, depth_stencil, color_location, ds_location);
696 list_add_head(&context->fbo_list, &entry->entry);
697 ++context->fbo_entry_count;
698 }
699 else
700 {
701 entry = LIST_ENTRY(list_tail(&context->fbo_list), struct fbo_entry, entry);
702 context_reuse_fbo_entry(context, target, render_targets, depth_stencil, color_location, ds_location, entry);
703 list_remove(&entry->entry);
704 list_add_head(&context->fbo_list, &entry->entry);
705 }
706
707 return entry;
708}
709
710/* Context activation is done by the caller. */
712{
713 const struct wined3d_gl_info *gl_info = context->gl_info;
714 GLuint read_binding, draw_binding;
715 unsigned int i;
716
718 {
720 return;
721 }
722
723 read_binding = context->fbo_read_binding;
724 draw_binding = context->fbo_draw_binding;
726
728 {
729 GL_EXTCALL(glFramebufferParameteri(GL_FRAMEBUFFER,
730 GL_FRAMEBUFFER_DEFAULT_WIDTH, gl_info->limits.framebuffer_width));
731 GL_EXTCALL(glFramebufferParameteri(GL_FRAMEBUFFER,
732 GL_FRAMEBUFFER_DEFAULT_HEIGHT, gl_info->limits.framebuffer_height));
733 GL_EXTCALL(glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_LAYERS, 1));
734 GL_EXTCALL(glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_SAMPLES, 1));
735 }
736
737 /* Apply render targets */
738 for (i = 0; i < gl_info->limits.buffers; ++i)
739 {
740 context_attach_surface_fbo(context, target, i, &entry->key.objects[i + 1],
741 entry->key.rb_namespace & (1 << (i + 1)));
742 }
743
745 entry->key.rb_namespace & 0x1, entry->flags);
746
747 /* Set valid read and draw buffer bindings to satisfy pedantic pre-ES2_compatibility
748 * GL contexts requirements. */
749 gl_info->gl_ops.gl.p_glReadBuffer(GL_NONE);
751 if (target != GL_FRAMEBUFFER)
752 {
755 else
757 }
758
760}
761
762/* Context activation is done by the caller. */
764 struct wined3d_rendertarget_info *render_targets, struct wined3d_surface *depth_stencil,
765 DWORD color_location, DWORD ds_location)
766{
767 struct fbo_entry *entry, *entry2;
768
769 LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &context->fbo_destroy_list, struct fbo_entry, entry)
770 {
772 }
773
774 if (context->rebind_fbo)
775 {
777 context->rebind_fbo = FALSE;
778 }
779
780 if (color_location == WINED3D_LOCATION_DRAWABLE)
781 {
782 context->current_fbo = NULL;
784 }
785 else
786 {
787 struct wined3d_rendertarget_info ds = {{0}};
788
789 if (depth_stencil)
790 {
791 ds.resource = &depth_stencil->container->resource;
792 ds.sub_resource_idx = surface_get_sub_resource_idx(depth_stencil);
793 ds.layer_count = 1;
794 }
796 render_targets, &ds, color_location, ds_location);
798 }
799}
800
801/* Context activation is done by the caller. */
803 struct wined3d_surface *render_target, struct wined3d_surface *depth_stencil, DWORD location)
804{
805 memset(context->blit_targets, 0, sizeof(context->blit_targets));
806 if (render_target)
807 {
808 context->blit_targets[0].resource = &render_target->container->resource;
809 context->blit_targets[0].sub_resource_idx = surface_get_sub_resource_idx(render_target);
810 context->blit_targets[0].layer_count = 1;
811 }
812 context_apply_fbo_state(context, target, context->blit_targets, depth_stencil, location, location);
813}
814
815/* Context activation is done by the caller. */
817{
818 const struct wined3d_gl_info *gl_info = context->gl_info;
819
820 if (context->free_occlusion_query_count)
821 {
822 query->id = context->free_occlusion_queries[--context->free_occlusion_query_count];
823 }
824 else
825 {
826 if (gl_info->supported[ARB_OCCLUSION_QUERY])
827 {
828 GL_EXTCALL(glGenQueries(1, &query->id));
829 checkGLcall("glGenQueries");
830
831 TRACE("Allocated occlusion query %u in context %p.\n", query->id, context);
832 }
833 else
834 {
835 WARN("Occlusion queries not supported, not allocating query id.\n");
836 query->id = 0;
837 }
838 }
839
840 query->context = context;
841 list_add_head(&context->occlusion_queries, &query->entry);
842}
843
845{
846 struct wined3d_context *context = query->context;
847
848 list_remove(&query->entry);
849 query->context = NULL;
850
851 if (!wined3d_array_reserve((void **)&context->free_occlusion_queries,
852 &context->free_occlusion_query_size, context->free_occlusion_query_count + 1,
853 sizeof(*context->free_occlusion_queries)))
854 {
855 ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
856 return;
857 }
858
859 context->free_occlusion_queries[context->free_occlusion_query_count++] = query->id;
860}
861
862/* Context activation is done by the caller. */
864{
865 const struct wined3d_gl_info *gl_info = context->gl_info;
866
867 if (context->free_fence_count)
868 {
869 fence->object = context->free_fences[--context->free_fence_count];
870 }
871 else
872 {
873 if (gl_info->supported[ARB_SYNC])
874 {
875 /* Using ARB_sync, not much to do here. */
876 fence->object.sync = NULL;
877 TRACE("Allocated sync object in context %p.\n", context);
878 }
879 else if (gl_info->supported[APPLE_FENCE])
880 {
881 GL_EXTCALL(glGenFencesAPPLE(1, &fence->object.id));
882 checkGLcall("glGenFencesAPPLE");
883
884 TRACE("Allocated fence %u in context %p.\n", fence->object.id, context);
885 }
886 else if(gl_info->supported[NV_FENCE])
887 {
888 GL_EXTCALL(glGenFencesNV(1, &fence->object.id));
889 checkGLcall("glGenFencesNV");
890
891 TRACE("Allocated fence %u in context %p.\n", fence->object.id, context);
892 }
893 else
894 {
895 WARN("Fences not supported, not allocating fence.\n");
896 fence->object.id = 0;
897 }
898 }
899
900 fence->context = context;
901 list_add_head(&context->fences, &fence->entry);
902}
903
905{
906 struct wined3d_context *context = fence->context;
907
908 list_remove(&fence->entry);
909 fence->context = NULL;
910
911 if (!wined3d_array_reserve((void **)&context->free_fences,
912 &context->free_fence_size, context->free_fence_count + 1,
913 sizeof(*context->free_fences)))
914 {
915 ERR("Failed to grow free list, leaking fence %u in context %p.\n", fence->object.id, context);
916 return;
917 }
918
919 context->free_fences[context->free_fence_count++] = fence->object;
920}
921
922/* Context activation is done by the caller. */
924{
925 const struct wined3d_gl_info *gl_info = context->gl_info;
926
927 if (context->free_timestamp_query_count)
928 {
929 query->id = context->free_timestamp_queries[--context->free_timestamp_query_count];
930 }
931 else
932 {
933 GL_EXTCALL(glGenQueries(1, &query->id));
934 checkGLcall("glGenQueries");
935
936 TRACE("Allocated timestamp query %u in context %p.\n", query->id, context);
937 }
938
939 query->context = context;
940 list_add_head(&context->timestamp_queries, &query->entry);
941}
942
944{
945 struct wined3d_context *context = query->context;
946
947 list_remove(&query->entry);
948 query->context = NULL;
949
950 if (!wined3d_array_reserve((void **)&context->free_timestamp_queries,
951 &context->free_timestamp_query_size, context->free_timestamp_query_count + 1,
952 sizeof(*context->free_timestamp_queries)))
953 {
954 ERR("Failed to grow free list, leaking query %u in context %p.\n", query->id, context);
955 return;
956 }
957
958 context->free_timestamp_queries[context->free_timestamp_query_count++] = query->id;
959}
960
963{
964 const struct wined3d_gl_info *gl_info = context->gl_info;
965
966 if (context->free_so_statistics_query_count)
967 {
968 query->u = context->free_so_statistics_queries[--context->free_so_statistics_query_count];
969 }
970 else
971 {
972 GL_EXTCALL(glGenQueries(ARRAY_SIZE(query->u.id), query->u.id));
973 checkGLcall("glGenQueries");
974
975 TRACE("Allocated SO statistics queries %u, %u in context %p.\n",
976 query->u.id[0], query->u.id[1], context);
977 }
978
979 query->context = context;
980 list_add_head(&context->so_statistics_queries, &query->entry);
981}
982
984{
985 struct wined3d_context *context = query->context;
986
987 list_remove(&query->entry);
988 query->context = NULL;
989
990 if (!wined3d_array_reserve((void **)&context->free_so_statistics_queries,
991 &context->free_so_statistics_query_size, context->free_so_statistics_query_count + 1,
992 sizeof(*context->free_so_statistics_queries)))
993 {
994 ERR("Failed to grow free list, leaking GL queries %u, %u in context %p.\n",
995 query->u.id[0], query->u.id[1], context);
996 return;
997 }
998
999 context->free_so_statistics_queries[context->free_so_statistics_query_count++] = query->u;
1000}
1001
1004{
1005 const struct wined3d_gl_info *gl_info = context->gl_info;
1006
1007 if (context->free_pipeline_statistics_query_count)
1008 {
1009 query->u = context->free_pipeline_statistics_queries[--context->free_pipeline_statistics_query_count];
1010 }
1011 else
1012 {
1013 GL_EXTCALL(glGenQueries(ARRAY_SIZE(query->u.id), query->u.id));
1014 checkGLcall("glGenQueries");
1015 }
1016
1017 query->context = context;
1018 list_add_head(&context->pipeline_statistics_queries, &query->entry);
1019}
1020
1022{
1023 struct wined3d_context *context = query->context;
1024
1025 list_remove(&query->entry);
1026 query->context = NULL;
1027
1028 if (!wined3d_array_reserve((void **)&context->free_pipeline_statistics_queries,
1029 &context->free_pipeline_statistics_query_size, context->free_pipeline_statistics_query_count + 1,
1030 sizeof(*context->free_pipeline_statistics_queries)))
1031 {
1032 ERR("Failed to grow free list, leaking GL queries in context %p.\n", context);
1033 return;
1034 }
1035
1036 context->free_pipeline_statistics_queries[context->free_pipeline_statistics_query_count++] = query->u;
1037}
1038
1040
1043{
1044 unsigned int i, j;
1045
1046 for (i = 0; i < device->context_count; ++i)
1047 {
1048 struct wined3d_context *context = device->contexts[i];
1049 const struct wined3d_gl_info *gl_info = context->gl_info;
1050 struct fbo_entry *entry, *entry2;
1051
1052 LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &context->fbo_list, struct fbo_entry, entry)
1053 {
1054 for (j = 0; j < gl_info->limits.buffers + 1; ++j)
1055 {
1056 if (entry->key.objects[j].object == name
1057 && !(entry->key.rb_namespace & (1 << j)) == !rb_namespace)
1058 {
1060 break;
1061 }
1062 }
1063 }
1064 }
1065}
1066
1068{
1069 list_remove(&entry->entry);
1070 list_add_head(&context->fbo_destroy_list, &entry->entry);
1071}
1072
1075{
1076 struct wined3d_texture *texture;
1077 UINT i;
1078
1079 if (!device->d3d_initialized)
1080 return;
1081
1082 switch (type)
1083 {
1087
1088 for (i = 0; i < device->context_count; ++i)
1089 {
1090 struct wined3d_context *context = device->contexts[i];
1091 if (context->current_rt.texture == texture)
1092 {
1093 context->current_rt.texture = NULL;
1094 context->current_rt.sub_resource_idx = 0;
1095 }
1096 }
1097 break;
1098
1099 default:
1100 break;
1101 }
1102}
1103
1105 GLuint name, BOOL rb_namespace)
1106{
1108}
1109
1111{
1112 const struct wined3d_gl_info *gl_info = context->gl_info;
1113 struct fbo_entry *entry = context->current_fbo;
1114 unsigned int i;
1115
1116 if (!entry || context->rebind_fbo) return;
1117
1118 for (i = 0; i < gl_info->limits.buffers + 1; ++i)
1119 {
1120 if (surface->container->texture_rgb.name == entry->key.objects[i].object
1121 || surface->container->texture_srgb.name == entry->key.objects[i].object)
1122 {
1123 TRACE("Updated surface %p is bound as attachment %u to the current FBO.\n", surface, i);
1124 context->rebind_fbo = TRUE;
1125 return;
1126 }
1127 }
1128}
1129
1131{
1132 const struct wined3d_gl_info *gl_info = ctx->gl_info;
1133 BOOL ret = FALSE;
1134
1135 if (ctx->restore_pf && IsWindow(ctx->restore_pf_win))
1136 {
1137 if (ctx->gl_info->supported[WGL_WINE_PIXEL_FORMAT_PASSTHROUGH])
1138 {
1139 HDC dc = GetDCEx(ctx->restore_pf_win, 0, DCX_USESTYLE | DCX_CACHE);
1140 if (dc)
1141 {
1142 if (!(ret = GL_EXTCALL(wglSetPixelFormatWINE(dc, ctx->restore_pf))))
1143 {
1144 ERR("wglSetPixelFormatWINE failed to restore pixel format %d on window %p.\n",
1145 ctx->restore_pf, ctx->restore_pf_win);
1146 }
1147 ReleaseDC(ctx->restore_pf_win, dc);
1148 }
1149 }
1150 else
1151 {
1152 ERR("can't restore pixel format %d on window %p\n", ctx->restore_pf, ctx->restore_pf_win);
1153 }
1154 }
1155
1156 ctx->restore_pf = 0;
1157 ctx->restore_pf_win = NULL;
1158 return ret;
1159}
1160
1162{
1163 const struct wined3d_gl_info *gl_info = context->gl_info;
1164 BOOL private = context->hdc_is_private;
1165 int format = context->pixel_format;
1166 HDC dc = context->hdc;
1167 int current;
1168
1169 if (private && context->hdc_has_format)
1170 return TRUE;
1171
1172 if (!private && WindowFromDC(dc) != context->win_handle)
1173 return FALSE;
1174
1175 current = gl_info->gl_ops.wgl.p_wglGetPixelFormat(dc);
1176 if (current == format) goto success;
1177
1178 if (!current)
1179 {
1180 if (!SetPixelFormat(dc, format, NULL))
1181 {
1182 /* This may also happen if the dc belongs to a destroyed window. */
1183 WARN("Failed to set pixel format %d on device context %p, last error %#x.\n",
1184 format, dc, GetLastError());
1185 return FALSE;
1186 }
1187
1188 context->restore_pf = 0;
1189 context->restore_pf_win = private ? NULL : WindowFromDC(dc);
1190 goto success;
1191 }
1192
1193 /* By default WGL doesn't allow pixel format adjustments but we need it
1194 * here. For this reason there's a Wine specific wglSetPixelFormat()
1195 * which allows us to set the pixel format multiple times. Only use it
1196 * when really needed. */
1198 {
1199 HWND win;
1200
1201 if (!GL_EXTCALL(wglSetPixelFormatWINE(dc, format)))
1202 {
1203 ERR("wglSetPixelFormatWINE failed to set pixel format %d on device context %p.\n",
1204 format, dc);
1205 return FALSE;
1206 }
1207
1208 win = private ? NULL : WindowFromDC(dc);
1209 if (win != context->restore_pf_win)
1210 {
1212
1213 context->restore_pf = private ? 0 : current;
1214 context->restore_pf_win = win;
1215 }
1216
1217 goto success;
1218 }
1219
1220 /* OpenGL doesn't allow pixel format adjustments. Print an error and
1221 * continue using the old format. There's a big chance that the old
1222 * format works although with a performance hit and perhaps rendering
1223 * errors. */
1224 ERR("Unable to set pixel format %d on device context %p. Already using format %d.\n",
1225 format, dc, current);
1226 return TRUE;
1227
1228success:
1229 if (private)
1230 context->hdc_has_format = TRUE;
1231 return TRUE;
1232}
1233
1235{
1236 struct wined3d_swapchain *swapchain = ctx->swapchain;
1237 BOOL backup = FALSE;
1238
1240 {
1241 WARN("Failed to set pixel format %d on device context %p.\n",
1242 ctx->pixel_format, ctx->hdc);
1243 backup = TRUE;
1244 }
1245
1246 if (backup || !wglMakeCurrent(ctx->hdc, ctx->glCtx))
1247 {
1248 WARN("Failed to make GL context %p current on device context %p, last error %#x.\n",
1249 ctx->glCtx, ctx->hdc, GetLastError());
1250 ctx->valid = 0;
1251 WARN("Trying fallback to the backup window.\n");
1252
1253 /* FIXME: If the context is destroyed it's no longer associated with
1254 * a swapchain, so we can't use the swapchain to get a backup dc. To
1255 * make this work windowless contexts would need to be handled by the
1256 * device. */
1257 if (ctx->destroyed || !swapchain)
1258 {
1259 FIXME("Unable to get backup dc for destroyed context %p.\n", ctx);
1261 return FALSE;
1262 }
1263
1264 if (!(ctx->hdc = swapchain_get_backup_dc(swapchain)))
1265 {
1267 return FALSE;
1268 }
1269
1270 ctx->hdc_is_private = TRUE;
1271 ctx->hdc_has_format = FALSE;
1272
1274 {
1275 ERR("Failed to set pixel format %d on device context %p.\n",
1276 ctx->pixel_format, ctx->hdc);
1278 return FALSE;
1279 }
1280
1281 if (!wglMakeCurrent(ctx->hdc, ctx->glCtx))
1282 {
1283 ERR("Fallback to backup window (dc %p) failed too, last error %#x.\n",
1284 ctx->hdc, GetLastError());
1286 return FALSE;
1287 }
1288
1289 ctx->valid = 1;
1290 }
1291 ctx->needs_set = 0;
1292 return TRUE;
1293}
1294
1295static void context_restore_gl_context(const struct wined3d_gl_info *gl_info, HDC dc, HGLRC gl_ctx)
1296{
1297 if (!wglMakeCurrent(dc, gl_ctx))
1298 {
1299 ERR("Failed to restore GL context %p on device context %p, last error %#x.\n",
1300 gl_ctx, dc, GetLastError());
1302 }
1303}
1304
1306{
1307 if (!context->swapchain)
1308 return;
1309
1310 if (context->win_handle == context->swapchain->win_handle)
1311 return;
1312
1313 TRACE("Updating context %p window from %p to %p.\n",
1314 context, context->win_handle, context->swapchain->win_handle);
1315
1316 if (context->hdc)
1317 wined3d_release_dc(context->win_handle, context->hdc);
1318
1319 context->win_handle = context->swapchain->win_handle;
1320 context->hdc_is_private = FALSE;
1321 context->hdc_has_format = FALSE;
1322 context->needs_set = 1;
1323 context->valid = 1;
1324
1325 if (!(context->hdc = GetDCEx(context->win_handle, 0, DCX_USESTYLE | DCX_CACHE)))
1326 {
1327 ERR("Failed to get a device context for window %p.\n", context->win_handle);
1328 context->valid = 0;
1329 }
1330}
1331
1333{
1334 struct wined3d_pipeline_statistics_query *pipeline_statistics_query;
1335 const struct wined3d_gl_info *gl_info = context->gl_info;
1336 struct wined3d_so_statistics_query *so_statistics_query;
1337 struct wined3d_timestamp_query *timestamp_query;
1338 struct wined3d_occlusion_query *occlusion_query;
1339 struct fbo_entry *entry, *entry2;
1340 struct wined3d_fence *fence;
1341 HGLRC restore_ctx;
1343 unsigned int i;
1344
1345 restore_ctx = wglGetCurrentContext();
1347
1348 if (restore_ctx == context->glCtx)
1349 restore_ctx = NULL;
1350 else if (context->valid)
1352
1353 LIST_FOR_EACH_ENTRY(so_statistics_query, &context->so_statistics_queries,
1355 {
1356 if (context->valid)
1357 GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(so_statistics_query->u.id), so_statistics_query->u.id));
1358 so_statistics_query->context = NULL;
1359 }
1360
1361 LIST_FOR_EACH_ENTRY(pipeline_statistics_query, &context->pipeline_statistics_queries,
1363 {
1364 if (context->valid)
1365 GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(pipeline_statistics_query->u.id), pipeline_statistics_query->u.id));
1366 pipeline_statistics_query->context = NULL;
1367 }
1368
1369 LIST_FOR_EACH_ENTRY(timestamp_query, &context->timestamp_queries, struct wined3d_timestamp_query, entry)
1370 {
1371 if (context->valid)
1372 GL_EXTCALL(glDeleteQueries(1, &timestamp_query->id));
1373 timestamp_query->context = NULL;
1374 }
1375
1376 LIST_FOR_EACH_ENTRY(occlusion_query, &context->occlusion_queries, struct wined3d_occlusion_query, entry)
1377 {
1378 if (context->valid && gl_info->supported[ARB_OCCLUSION_QUERY])
1379 GL_EXTCALL(glDeleteQueries(1, &occlusion_query->id));
1380 occlusion_query->context = NULL;
1381 }
1382
1383 LIST_FOR_EACH_ENTRY(fence, &context->fences, struct wined3d_fence, entry)
1384 {
1385 if (context->valid)
1386 {
1387 if (gl_info->supported[ARB_SYNC])
1388 {
1389 if (fence->object.sync)
1390 GL_EXTCALL(glDeleteSync(fence->object.sync));
1391 }
1392 else if (gl_info->supported[APPLE_FENCE])
1393 {
1394 GL_EXTCALL(glDeleteFencesAPPLE(1, &fence->object.id));
1395 }
1396 else if (gl_info->supported[NV_FENCE])
1397 {
1398 GL_EXTCALL(glDeleteFencesNV(1, &fence->object.id));
1399 }
1400 }
1401 fence->context = NULL;
1402 }
1403
1404 LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &context->fbo_destroy_list, struct fbo_entry, entry)
1405 {
1406 if (!context->valid) entry->id = 0;
1408 }
1409
1410 LIST_FOR_EACH_ENTRY_SAFE(entry, entry2, &context->fbo_list, struct fbo_entry, entry)
1411 {
1412 if (!context->valid) entry->id = 0;
1414 }
1415
1416 if (context->valid)
1417 {
1418 if (context->dummy_arbfp_prog)
1419 {
1420 GL_EXTCALL(glDeleteProgramsARB(1, &context->dummy_arbfp_prog));
1421 }
1422
1424 {
1425 for (i = 0; i < context->free_so_statistics_query_count; ++i)
1426 {
1427 union wined3d_gl_so_statistics_query *q = &context->free_so_statistics_queries[i];
1428 GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(q->id), q->id));
1429 }
1430 }
1431
1433 {
1434 for (i = 0; i < context->free_pipeline_statistics_query_count; ++i)
1435 {
1436 union wined3d_gl_pipeline_statistics_query *q = &context->free_pipeline_statistics_queries[i];
1437 GL_EXTCALL(glDeleteQueries(ARRAY_SIZE(q->id), q->id));
1438 }
1439 }
1440
1441 if (gl_info->supported[ARB_TIMER_QUERY])
1442 GL_EXTCALL(glDeleteQueries(context->free_timestamp_query_count, context->free_timestamp_queries));
1443
1444 if (gl_info->supported[ARB_OCCLUSION_QUERY])
1445 GL_EXTCALL(glDeleteQueries(context->free_occlusion_query_count, context->free_occlusion_queries));
1446
1447 if (gl_info->supported[ARB_SYNC])
1448 {
1449 for (i = 0; i < context->free_fence_count; ++i)
1450 {
1451 GL_EXTCALL(glDeleteSync(context->free_fences[i].sync));
1452 }
1453 }
1454 else if (gl_info->supported[APPLE_FENCE])
1455 {
1456 for (i = 0; i < context->free_fence_count; ++i)
1457 {
1458 GL_EXTCALL(glDeleteFencesAPPLE(1, &context->free_fences[i].id));
1459 }
1460 }
1461 else if (gl_info->supported[NV_FENCE])
1462 {
1463 for (i = 0; i < context->free_fence_count; ++i)
1464 {
1465 GL_EXTCALL(glDeleteFencesNV(1, &context->free_fences[i].id));
1466 }
1467 }
1468
1469 checkGLcall("context cleanup");
1470 }
1471
1472 heap_free(context->free_so_statistics_queries);
1473 heap_free(context->free_pipeline_statistics_queries);
1474 heap_free(context->free_timestamp_queries);
1475 heap_free(context->free_occlusion_queries);
1476 heap_free(context->free_fences);
1477
1479 if (restore_ctx)
1480 {
1481 context_restore_gl_context(gl_info, restore_dc, restore_ctx);
1482 }
1484 {
1485 ERR("Failed to disable GL context.\n");
1486 }
1487
1488 wined3d_release_dc(context->win_handle, context->hdc);
1489
1490 if (!wglDeleteContext(context->glCtx))
1491 {
1493 ERR("wglDeleteContext(%p) failed, last error %#x.\n", context->glCtx, err);
1494 }
1495}
1496
1498{
1500}
1501
1503{
1505}
1506
1508{
1510}
1511
1513{
1514 struct wined3d_context *old = context_get_current();
1515
1516 if (old == ctx)
1517 {
1518 TRACE("Already using D3D context %p.\n", ctx);
1519 return TRUE;
1520 }
1521
1522 if (old)
1523 {
1524 if (old->destroyed)
1525 {
1526 TRACE("Switching away from destroyed context %p.\n", old);
1528 heap_free((void *)old->gl_info);
1529 heap_free(old);
1530 }
1531 else
1532 {
1534 {
1535 const struct wined3d_gl_info *gl_info = old->gl_info;
1536 TRACE("Flushing context %p before switching to %p.\n", old, ctx);
1537 gl_info->gl_ops.gl.p_glFlush();
1538 }
1539 old->current = 0;
1540 }
1541 }
1542
1543 if (ctx)
1544 {
1545 if (!ctx->valid)
1546 {
1547 ERR("Trying to make invalid context %p current\n", ctx);
1548 return FALSE;
1549 }
1550
1551 TRACE("Switching to D3D context %p, GL context %p, device context %p.\n", ctx, ctx->glCtx, ctx->hdc);
1553 return FALSE;
1554 ctx->current = 1;
1555 }
1556 else if (wglGetCurrentContext())
1557 {
1558 TRACE("Clearing current D3D context.\n");
1559 if (!wglMakeCurrent(NULL, NULL))
1560 {
1562 ERR("Failed to clear current GL context, last error %#x.\n", err);
1564 return FALSE;
1565 }
1566 }
1567
1569}
1570
1572{
1573 TRACE("Releasing context %p, level %u.\n", context, context->level);
1574
1575 if (WARN_ON(d3d))
1576 {
1577 if (!context->level)
1578 WARN("Context %p is not active.\n", context);
1579 else if (context != context_get_current())
1580 WARN("Context %p is not the current context.\n", context);
1581 }
1582
1583 if (!--context->level)
1584 {
1586 context->needs_set = 1;
1587 if (context->restore_ctx)
1588 {
1589 TRACE("Restoring GL context %p on device context %p.\n", context->restore_ctx, context->restore_dc);
1590 context_restore_gl_context(context->gl_info, context->restore_dc, context->restore_ctx);
1591 context->restore_ctx = NULL;
1592 context->restore_dc = NULL;
1593 }
1594
1595 if (context->destroy_delayed)
1596 {
1597 TRACE("Destroying context %p.\n", context);
1599 }
1600 }
1601}
1602
1603/* This is used when a context for render target A is active, but a separate context is
1604 * needed to access the WGL framebuffer for render target B. Re-acquire a context for rt
1605 * A to avoid breaking caller code. */
1607{
1608 if (context->current_rt.texture != restore->container
1609 || context->current_rt.sub_resource_idx != surface_get_sub_resource_idx(restore))
1610 {
1612 context = context_acquire(restore->container->resource.device,
1613 restore->container, surface_get_sub_resource_idx(restore));
1614 }
1615
1617}
1618
1620{
1621 TRACE("Entering context %p, level %u.\n", context, context->level + 1);
1622
1623 if (!context->level++)
1624 {
1625 const struct wined3d_context *current_context = context_get_current();
1626 HGLRC current_gl = wglGetCurrentContext();
1627
1628 if (current_gl && (!current_context || current_context->glCtx != current_gl))
1629 {
1630 TRACE("Another GL context (%p on device context %p) is already current.\n",
1631 current_gl, wglGetCurrentDC());
1632 context->restore_ctx = current_gl;
1633 context->restore_dc = wglGetCurrentDC();
1634 context->needs_set = 1;
1635 }
1636 else if (!context->needs_set && !(context->hdc_is_private && context->hdc_has_format)
1637 && context->pixel_format != context->gl_info->gl_ops.wgl.p_wglGetPixelFormat(context->hdc))
1638 context->needs_set = 1;
1639 }
1640}
1641
1643{
1644 DWORD representative = context->state_table[state_id].representative - STATE_COMPUTE_OFFSET;
1645 unsigned int index, shift;
1646
1647 index = representative / (sizeof(*context->dirty_compute_states) * CHAR_BIT);
1648 shift = representative & (sizeof(*context->dirty_compute_states) * CHAR_BIT - 1);
1649 context->dirty_compute_states[index] |= (1u << shift);
1650}
1651
1653{
1654 DWORD rep = context->state_table[state].representative;
1655 DWORD idx;
1656 BYTE shift;
1657
1658 if (isStateDirty(context, rep)) return;
1659
1660 context->dirtyArray[context->numDirtyEntries++] = rep;
1661 idx = rep / (sizeof(*context->isStateDirty) * CHAR_BIT);
1662 shift = rep & ((sizeof(*context->isStateDirty) * CHAR_BIT) - 1);
1663 context->isStateDirty[idx] |= (1u << shift);
1664}
1665
1666/* This function takes care of wined3d pixel format selection. */
1668 const struct wined3d_format *color_format, const struct wined3d_format *ds_format,
1669 BOOL auxBuffers)
1670{
1671 unsigned int cfg_count = device->adapter->cfg_count;
1672 unsigned int current_value;
1674 int iPixelFormat = 0;
1675 unsigned int i;
1676
1677 TRACE("device %p, dc %p, color_format %s, ds_format %s, aux_buffers %#x.\n",
1678 device, hdc, debug_d3dformat(color_format->id), debug_d3dformat(ds_format->id),
1679 auxBuffers);
1680
1681 current_value = 0;
1682 for (i = 0; i < cfg_count; ++i)
1683 {
1684 const struct wined3d_pixel_format *cfg = &device->adapter->cfgs[i];
1685 unsigned int value;
1686
1687 /* For now only accept RGBA formats. Perhaps some day we will
1688 * allow floating point formats for pbuffers. */
1689 if (cfg->iPixelType != WGL_TYPE_RGBA_ARB)
1690 continue;
1691 /* In window mode we need a window drawable format and double buffering. */
1692 if (!(cfg->windowDrawable && cfg->doubleBuffer))
1693 continue;
1694 if (cfg->redSize < color_format->red_size)
1695 continue;
1696 if (cfg->greenSize < color_format->green_size)
1697 continue;
1698 if (cfg->blueSize < color_format->blue_size)
1699 continue;
1700 if (cfg->alphaSize < color_format->alpha_size)
1701 continue;
1702 if (cfg->depthSize < ds_format->depth_size)
1703 continue;
1704 if (ds_format->stencil_size && cfg->stencilSize != ds_format->stencil_size)
1705 continue;
1706 /* Check multisampling support. */
1707 if (cfg->numSamples)
1708 continue;
1709
1710 value = 1;
1711 /* We try to locate a format which matches our requirements exactly. In case of
1712 * depth it is no problem to emulate 16-bit using e.g. 24-bit, so accept that. */
1713 if (cfg->depthSize == ds_format->depth_size)
1714 value += 1;
1715 if (cfg->stencilSize == ds_format->stencil_size)
1716 value += 2;
1717 if (cfg->alphaSize == color_format->alpha_size)
1718 value += 4;
1719 /* We like to have aux buffers in backbuffer mode */
1720 if (auxBuffers && cfg->auxBuffers)
1721 value += 8;
1722 if (cfg->redSize == color_format->red_size
1723 && cfg->greenSize == color_format->green_size
1724 && cfg->blueSize == color_format->blue_size)
1725 value += 16;
1726
1727 if (value > current_value)
1728 {
1730 current_value = value;
1731 }
1732 }
1733
1734 if (!iPixelFormat)
1735 {
1736 ERR("Trying to locate a compatible pixel format because an exact match failed.\n");
1737
1738 memset(&pfd, 0, sizeof(pfd));
1739 pfd.nSize = sizeof(pfd);
1740 pfd.nVersion = 1;
1741 pfd.dwFlags = PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER | PFD_DRAW_TO_WINDOW;/*PFD_GENERIC_ACCELERATED*/
1743 pfd.cAlphaBits = color_format->alpha_size;
1744 pfd.cColorBits = color_format->red_size + color_format->green_size
1745 + color_format->blue_size + color_format->alpha_size;
1746 pfd.cDepthBits = ds_format->depth_size;
1747 pfd.cStencilBits = ds_format->stencil_size;
1749
1751 {
1752 /* Something is very wrong as ChoosePixelFormat() barely fails. */
1753 ERR("Can't find a suitable pixel format.\n");
1754 return 0;
1755 }
1756 }
1757
1758 TRACE("Found iPixelFormat=%d for ColorFormat=%s, DepthStencilFormat=%s.\n",
1759 iPixelFormat, debug_d3dformat(color_format->id), debug_d3dformat(ds_format->id));
1760 return iPixelFormat;
1761}
1762
1763/* Context activation is done by the caller. */
1765{
1766 const struct wined3d_dummy_textures *textures = &context->device->dummy_textures;
1767 const struct wined3d_gl_info *gl_info = context->gl_info;
1768 unsigned int i;
1769
1770 for (i = 0; i < gl_info->limits.combined_samplers; ++i)
1771 {
1773
1774 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D, textures->tex_1d);
1775 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, textures->tex_2d);
1776
1777 if (gl_info->supported[ARB_TEXTURE_RECTANGLE])
1778 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, textures->tex_rect);
1779
1780 if (gl_info->supported[EXT_TEXTURE3D])
1781 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, textures->tex_3d);
1782
1783 if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
1784 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, textures->tex_cube);
1785
1787 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, textures->tex_cube_array);
1788
1789 if (gl_info->supported[EXT_TEXTURE_ARRAY])
1790 {
1791 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D_ARRAY, textures->tex_1d_array);
1792 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_ARRAY, textures->tex_2d_array);
1793 }
1795 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_BUFFER, textures->tex_buffer);
1796
1797 if (gl_info->supported[ARB_TEXTURE_MULTISAMPLE])
1798 {
1799 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, textures->tex_2d_ms);
1800 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_MULTISAMPLE_ARRAY, textures->tex_2d_ms_array);
1801 }
1802 }
1803
1804 checkGLcall("bind dummy textures");
1805}
1806
1807void wined3d_check_gl_call(const struct wined3d_gl_info *gl_info,
1808 const char *file, unsigned int line, const char *name)
1809{
1810 GLint err;
1811
1812 if (gl_info->supported[ARB_DEBUG_OUTPUT] || (err = gl_info->gl_ops.gl.p_glGetError()) == GL_NO_ERROR)
1813 {
1814 TRACE("%s call ok %s / %u.\n", name, file, line);
1815 return;
1816 }
1817
1818 do
1819 {
1820 ERR(">>>>>>> %s (%#x) from %s @ %s / %u.\n",
1822 err = gl_info->gl_ops.gl.p_glGetError();
1823 } while (err != GL_NO_ERROR);
1824}
1825
1827{
1828 return gl_info->supported[ARB_DEBUG_OUTPUT]
1829 && (ERR_ON(d3d) || FIXME_ON(d3d) || WARN_ON(d3d_perf));
1830}
1831
1833 GLenum severity, GLsizei length, const char *message, void *ctx)
1834{
1835 switch (type)
1836 {
1838 ERR("%p: %s.\n", ctx, debugstr_an(message, length));
1839 break;
1840
1844 FIXME("%p: %s.\n", ctx, debugstr_an(message, length));
1845 break;
1846
1848 WARN_(d3d_perf)("%p: %s.\n", ctx, debugstr_an(message, length));
1849 break;
1850
1851 default:
1852 FIXME("ctx %p, type %#x: %s.\n", ctx, type, debugstr_an(message, length));
1853 break;
1854 }
1855}
1856
1858{
1859 HGLRC ctx;
1860 unsigned int ctx_attrib_idx = 0;
1861 GLint ctx_attribs[7], ctx_flags = 0;
1862
1863 if (context_debug_output_enabled(gl_info))
1864 ctx_flags = WGL_CONTEXT_DEBUG_BIT_ARB;
1865 ctx_attribs[ctx_attrib_idx++] = WGL_CONTEXT_MAJOR_VERSION_ARB;
1866 ctx_attribs[ctx_attrib_idx++] = gl_info->selected_gl_version >> 16;
1867 ctx_attribs[ctx_attrib_idx++] = WGL_CONTEXT_MINOR_VERSION_ARB;
1868 ctx_attribs[ctx_attrib_idx++] = gl_info->selected_gl_version & 0xffff;
1869 if (gl_info->selected_gl_version >= MAKEDWORD_VERSION(3, 2))
1871 if (ctx_flags)
1872 {
1873 ctx_attribs[ctx_attrib_idx++] = WGL_CONTEXT_FLAGS_ARB;
1874 ctx_attribs[ctx_attrib_idx++] = ctx_flags;
1875 }
1876 ctx_attribs[ctx_attrib_idx] = 0;
1877
1878 if (!(ctx = gl_info->p_wglCreateContextAttribsARB(hdc, share_ctx, ctx_attribs)))
1879 {
1881 {
1882 ctx_attribs[ctx_attrib_idx - 1] &= ~WGL_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB;
1883 if (!(ctx = gl_info->p_wglCreateContextAttribsARB(hdc, share_ctx, ctx_attribs)))
1884 WARN("Failed to create a WGL context with wglCreateContextAttribsARB, last error %#x.\n",
1885 GetLastError());
1886 }
1887 }
1888 return ctx;
1889}
1890
1892 struct wined3d_texture *target, const struct wined3d_format *ds_format)
1893{
1894 struct wined3d_device *device = swapchain->device;
1895 const struct wined3d_d3d_info *d3d_info = &device->adapter->d3d_info;
1896 const struct wined3d_gl_info *gl_info = &device->adapter->gl_info;
1897 const struct wined3d_format *color_format;
1898 struct wined3d_context *ret;
1899 BOOL auxBuffers = FALSE;
1900 HGLRC ctx, share_ctx;
1901 DWORD target_usage;
1902 unsigned int i;
1903 DWORD state;
1904
1905 TRACE("swapchain %p, target %p, window %p.\n", swapchain, target, swapchain->win_handle);
1906
1908
1909 if (!(ret = heap_alloc_zero(sizeof(*ret))))
1910 return NULL;
1911
1912 ret->free_timestamp_query_size = 4;
1913 if (!(ret->free_timestamp_queries = heap_calloc(ret->free_timestamp_query_size,
1914 sizeof(*ret->free_timestamp_queries))))
1915 goto out;
1916 list_init(&ret->timestamp_queries);
1917
1918 ret->free_occlusion_query_size = 4;
1919 if (!(ret->free_occlusion_queries = heap_calloc(ret->free_occlusion_query_size,
1920 sizeof(*ret->free_occlusion_queries))))
1921 goto out;
1922 list_init(&ret->occlusion_queries);
1923
1924 ret->free_fence_size = 4;
1925 if (!(ret->free_fences = heap_calloc(ret->free_fence_size, sizeof(*ret->free_fences))))
1926 goto out;
1927 list_init(&ret->fences);
1928
1929 list_init(&ret->so_statistics_queries);
1930
1931 list_init(&ret->pipeline_statistics_queries);
1932
1933 list_init(&ret->fbo_list);
1934 list_init(&ret->fbo_destroy_list);
1935
1936 if (!device->shader_backend->shader_allocate_context_data(ret))
1937 {
1938 ERR("Failed to allocate shader backend context data.\n");
1939 goto out;
1940 }
1941 if (!device->adapter->fragment_pipe->allocate_context_data(ret))
1942 {
1943 ERR("Failed to allocate fragment pipeline context data.\n");
1944 goto out;
1945 }
1946
1947 for (i = 0; i < ARRAY_SIZE(ret->tex_unit_map); ++i)
1948 ret->tex_unit_map[i] = WINED3D_UNMAPPED_STAGE;
1949 for (i = 0; i < ARRAY_SIZE(ret->rev_tex_unit_map); ++i)
1950 ret->rev_tex_unit_map[i] = WINED3D_UNMAPPED_STAGE;
1951 if (gl_info->limits.graphics_samplers >= MAX_COMBINED_SAMPLERS)
1952 {
1953 /* Initialize the texture unit mapping to a 1:1 mapping. */
1954 unsigned int base, count;
1955
1957 if (base + MAX_FRAGMENT_SAMPLERS > ARRAY_SIZE(ret->rev_tex_unit_map))
1958 {
1959 ERR("Unexpected texture unit base index %u.\n", base);
1960 goto out;
1961 }
1962 for (i = 0; i < min(count, MAX_FRAGMENT_SAMPLERS); ++i)
1963 {
1964 ret->tex_unit_map[i] = base + i;
1965 ret->rev_tex_unit_map[base + i] = i;
1966 }
1967
1969 if (base + MAX_VERTEX_SAMPLERS > ARRAY_SIZE(ret->rev_tex_unit_map))
1970 {
1971 ERR("Unexpected texture unit base index %u.\n", base);
1972 goto out;
1973 }
1974 for (i = 0; i < min(count, MAX_VERTEX_SAMPLERS); ++i)
1975 {
1976 ret->tex_unit_map[MAX_FRAGMENT_SAMPLERS + i] = base + i;
1977 ret->rev_tex_unit_map[base + i] = MAX_FRAGMENT_SAMPLERS + i;
1978 }
1979 }
1980
1981 if (!(ret->texture_type = heap_calloc(gl_info->limits.combined_samplers,
1982 sizeof(*ret->texture_type))))
1983 goto out;
1984
1985 if (!(ret->hdc = GetDCEx(swapchain->win_handle, 0, DCX_USESTYLE | DCX_CACHE)))
1986 {
1987 WARN("Failed to retrieve device context, trying swapchain backup.\n");
1988
1990 ret->hdc_is_private = TRUE;
1991 else
1992 {
1993 ERR("Failed to retrieve a device context.\n");
1994 goto out;
1995 }
1996 }
1997
1998 color_format = target->resource.format;
1999 target_usage = target->resource.usage;
2000
2001 /* In case of ORM_BACKBUFFER, make sure to request an alpha component for
2002 * X4R4G4B4/X8R8G8B8 as we might need it for the backbuffer. */
2004 {
2005 auxBuffers = TRUE;
2006
2007 if (color_format->id == WINED3DFMT_B4G4R4X4_UNORM)
2008 color_format = wined3d_get_format(gl_info, WINED3DFMT_B4G4R4A4_UNORM, target_usage);
2009 else if (color_format->id == WINED3DFMT_B8G8R8X8_UNORM)
2010 color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
2011 }
2012
2013 /* DirectDraw supports 8bit paletted render targets and these are used by
2014 * old games like StarCraft and C&C. Most modern hardware doesn't support
2015 * 8bit natively so we perform some form of 8bit -> 32bit conversion. The
2016 * conversion (ab)uses the alpha component for storing the palette index.
2017 * For this reason we require a format with 8bit alpha, so request
2018 * A8R8G8B8. */
2019 if (color_format->id == WINED3DFMT_P8_UINT)
2020 color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
2021
2022 /* When using FBOs for off-screen rendering, we only use the drawable for
2023 * presentation blits, and don't do any rendering to it. That means we
2024 * don't need depth or stencil buffers, and can mostly ignore the render
2025 * target format. This wouldn't necessarily be quite correct for 10bpc
2026 * display modes, but we don't currently support those.
2027 * Using the same format regardless of the color/depth/stencil targets
2028 * makes it much less likely that different wined3d instances will set
2029 * conflicting pixel formats. */
2031 {
2032 color_format = wined3d_get_format(gl_info, WINED3DFMT_B8G8R8A8_UNORM, target_usage);
2034 }
2035
2036 /* Try to find a pixel format which matches our requirements. */
2037 if (!(ret->pixel_format = context_choose_pixel_format(device, ret->hdc, color_format, ds_format, auxBuffers)))
2038 goto out;
2039
2040 ret->gl_info = gl_info;
2041 ret->win_handle = swapchain->win_handle;
2042
2044
2046 {
2047 ERR("Failed to set pixel format %d on device context %p.\n", ret->pixel_format, ret->hdc);
2049 goto out;
2050 }
2051
2052 share_ctx = device->context_count ? device->contexts[0]->glCtx : NULL;
2053 if (gl_info->p_wglCreateContextAttribsARB)
2054 {
2055 if (!(ctx = context_create_wgl_attribs(gl_info, ret->hdc, share_ctx)))
2056 goto out;
2057 }
2058 else
2059 {
2060 if (!(ctx = wglCreateContext(ret->hdc)))
2061 {
2062 ERR("Failed to create a WGL context.\n");
2064 goto out;
2065 }
2066
2067 if (share_ctx && !wglShareLists(share_ctx, ctx))
2068 {
2069 ERR("wglShareLists(%p, %p) failed, last error %#x.\n", share_ctx, ctx, GetLastError());
2071 if (!wglDeleteContext(ctx))
2072 ERR("wglDeleteContext(%p) failed, last error %#x.\n", ctx, GetLastError());
2073 goto out;
2074 }
2075 }
2076
2078 {
2079 ERR("Failed to add the newly created context to the context list\n");
2081 if (!wglDeleteContext(ctx))
2082 ERR("wglDeleteContext(%p) failed, last error %#x.\n", ctx, GetLastError());
2083 goto out;
2084 }
2085
2086 ret->d3d_info = d3d_info;
2087 ret->state_table = device->StateTable;
2088
2089 /* Mark all states dirty to force a proper initialization of the states on
2090 * the first use of the context. Compute states do not need initialization. */
2091 for (state = 0; state <= STATE_HIGHEST; ++state)
2092 {
2093 if (ret->state_table[state].representative && !STATE_IS_COMPUTE(state))
2095 }
2096
2097 ret->device = device;
2098 ret->swapchain = swapchain;
2099 ret->current_rt.texture = target;
2100 ret->current_rt.sub_resource_idx = 0;
2101 ret->tid = GetCurrentThreadId();
2102
2103 ret->render_offscreen = wined3d_resource_is_offscreen(&target->resource);
2104 ret->draw_buffers_mask = context_generate_rt_mask(GL_BACK);
2105 ret->valid = 1;
2106
2107 ret->glCtx = ctx;
2108 ret->hdc_has_format = TRUE;
2109 ret->needs_set = 1;
2110
2111 /* Set up the context defaults */
2113 {
2114 ERR("Cannot activate context to set up defaults.\n");
2117 if (!wglDeleteContext(ctx))
2118 ERR("wglDeleteContext(%p) failed, last error %#x.\n", ctx, GetLastError());
2119 goto out;
2120 }
2121
2123 {
2124 GL_EXTCALL(glDebugMessageCallback(wined3d_debug_callback, ret));
2125 if (TRACE_ON(d3d_synchronous))
2126 gl_info->gl_ops.gl.p_glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
2127 GL_EXTCALL(glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, NULL, GL_FALSE));
2128 if (ERR_ON(d3d))
2129 {
2130 GL_EXTCALL(glDebugMessageControl(GL_DONT_CARE, GL_DEBUG_TYPE_ERROR,
2131 GL_DONT_CARE, 0, NULL, GL_TRUE));
2132 }
2133 if (FIXME_ON(d3d))
2134 {
2136 GL_DONT_CARE, 0, NULL, GL_TRUE));
2138 GL_DONT_CARE, 0, NULL, GL_TRUE));
2139 GL_EXTCALL(glDebugMessageControl(GL_DONT_CARE, GL_DEBUG_TYPE_PORTABILITY,
2140 GL_DONT_CARE, 0, NULL, GL_TRUE));
2141 }
2142 if (WARN_ON(d3d_perf))
2143 {
2144 GL_EXTCALL(glDebugMessageControl(GL_DONT_CARE, GL_DEBUG_TYPE_PERFORMANCE,
2145 GL_DONT_CARE, 0, NULL, GL_TRUE));
2146 }
2147 }
2148
2149 if (gl_info->supported[WINED3D_GL_LEGACY_CONTEXT])
2150 gl_info->gl_ops.gl.p_glGetIntegerv(GL_AUX_BUFFERS, &ret->aux_buffers);
2151
2152 TRACE("Setting up the screen\n");
2153
2154 if (gl_info->supported[WINED3D_GL_LEGACY_CONTEXT])
2155 {
2156 gl_info->gl_ops.gl.p_glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);
2157 checkGLcall("glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE);");
2158
2159 gl_info->gl_ops.gl.p_glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);
2160 checkGLcall("glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_COMBINE_EXT);");
2161
2163 checkGLcall("glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR);");
2164 }
2165 else
2166 {
2167 GLuint vao;
2168
2169 GL_EXTCALL(glGenVertexArrays(1, &vao));
2170 GL_EXTCALL(glBindVertexArray(vao));
2171 checkGLcall("creating VAO");
2172 }
2173
2174 gl_info->gl_ops.gl.p_glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);
2175 checkGLcall("glPixelStorei(GL_PACK_ALIGNMENT, device->surface_alignment);");
2176 gl_info->gl_ops.gl.p_glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
2177 checkGLcall("glPixelStorei(GL_UNPACK_ALIGNMENT, 1);");
2178
2179 if (gl_info->supported[ARB_VERTEX_BLEND])
2180 {
2181 /* Direct3D always uses n-1 weights for n world matrices and uses
2182 * 1 - sum for the last one this is equal to GL_WEIGHT_SUM_UNITY_ARB.
2183 * Enabling it doesn't do anything unless GL_VERTEX_BLEND_ARB isn't
2184 * enabled as well. */
2185 gl_info->gl_ops.gl.p_glEnable(GL_WEIGHT_SUM_UNITY_ARB);
2186 checkGLcall("glEnable(GL_WEIGHT_SUM_UNITY_ARB)");
2187 }
2188 if (gl_info->supported[NV_TEXTURE_SHADER2])
2189 {
2190 /* Set up the previous texture input for all shader units. This applies to bump mapping, and in d3d
2191 * the previous texture where to source the offset from is always unit - 1.
2192 */
2193 for (i = 1; i < gl_info->limits.textures; ++i)
2194 {
2196 gl_info->gl_ops.gl.p_glTexEnvi(GL_TEXTURE_SHADER_NV,
2198 checkGLcall("glTexEnvi(GL_TEXTURE_SHADER_NV, GL_PREVIOUS_TEXTURE_INPUT_NV, ...");
2199 }
2200 }
2201 if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
2202 {
2203 /* MacOS(radeon X1600 at least, but most likely others too) refuses to draw if GLSL and ARBFP are
2204 * enabled, but the currently bound arbfp program is 0. Enabling ARBFP with prog 0 is invalid, but
2205 * GLSL should bypass this. This causes problems in programs that never use the fixed function pipeline,
2206 * because the ARBFP extension is enabled by the ARBFP pipeline at context creation, but no program
2207 * is ever assigned.
2208 *
2209 * So make sure a program is assigned to each context. The first real ARBFP use will set a different
2210 * program and the dummy program is destroyed when the context is destroyed.
2211 */
2212 static const char dummy_program[] =
2213 "!!ARBfp1.0\n"
2214 "MOV result.color, fragment.color.primary;\n"
2215 "END\n";
2216 GL_EXTCALL(glGenProgramsARB(1, &ret->dummy_arbfp_prog));
2217 GL_EXTCALL(glBindProgramARB(GL_FRAGMENT_PROGRAM_ARB, ret->dummy_arbfp_prog));
2218 GL_EXTCALL(glProgramStringARB(GL_FRAGMENT_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB, strlen(dummy_program), dummy_program));
2219 }
2220
2221 if (gl_info->supported[ARB_POINT_SPRITE])
2222 {
2223 for (i = 0; i < gl_info->limits.textures; ++i)
2224 {
2226 gl_info->gl_ops.gl.p_glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE);
2227 checkGLcall("glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE)");
2228 }
2229 }
2230
2231 if (gl_info->supported[ARB_PROVOKING_VERTEX])
2232 {
2233 GL_EXTCALL(glProvokingVertex(GL_FIRST_VERTEX_CONVENTION));
2234 }
2235 else if (gl_info->supported[EXT_PROVOKING_VERTEX])
2236 {
2237 GL_EXTCALL(glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT));
2238 }
2239 if (!(d3d_info->wined3d_creation_flags & WINED3D_NO_PRIMITIVE_RESTART))
2240 {
2241 if (gl_info->supported[ARB_ES3_COMPATIBILITY])
2242 {
2243 gl_info->gl_ops.gl.p_glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
2244 checkGLcall("enable GL_PRIMITIVE_RESTART_FIXED_INDEX");
2245 }
2246 else
2247 {
2248 FIXME("OpenGL implementation does not support GL_PRIMITIVE_RESTART_FIXED_INDEX.\n");
2249 }
2250 }
2251 if (!(d3d_info->wined3d_creation_flags & WINED3D_LEGACY_CUBEMAP_FILTERING)
2252 && gl_info->supported[ARB_SEAMLESS_CUBE_MAP])
2253 {
2254 gl_info->gl_ops.gl.p_glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
2255 checkGLcall("enable seamless cube map filtering");
2256 }
2257 if (gl_info->supported[ARB_CLIP_CONTROL])
2259 device->shader_backend->shader_init_context_state(ret);
2260 ret->shader_update_mask = (1u << WINED3D_SHADER_TYPE_PIXEL)
2263 | (1u << WINED3D_SHADER_TYPE_HULL)
2266
2267 /* If this happens to be the first context for the device, dummy textures
2268 * are not created yet. In that case, they will be created (and bound) by
2269 * create_dummy_textures right after this context is initialized. */
2270 if (device->dummy_textures.tex_2d)
2272
2273 TRACE("Created context %p.\n", ret);
2274
2275 return ret;
2276
2277out:
2278 if (ret->hdc)
2279 wined3d_release_dc(swapchain->win_handle, ret->hdc);
2280 device->shader_backend->shader_free_context_data(ret);
2281 device->adapter->fragment_pipe->free_context_data(ret);
2282 heap_free(ret->texture_type);
2283 heap_free(ret->free_fences);
2284 heap_free(ret->free_occlusion_queries);
2285 heap_free(ret->free_timestamp_queries);
2286 heap_free(ret);
2287 return NULL;
2288}
2289
2291{
2292 BOOL destroy;
2293
2294 TRACE("Destroying ctx %p\n", context);
2295
2297
2298 /* We delay destroying a context when it is active. The context_release()
2299 * function invokes context_destroy() again while leaving the last level. */
2300 if (context->level)
2301 {
2302 TRACE("Delaying destruction of context %p.\n", context);
2303 context->destroy_delayed = 1;
2304 /* FIXME: Get rid of a pointer to swapchain from wined3d_context. */
2305 context->swapchain = NULL;
2306 return;
2307 }
2308
2309 if (context->tid == GetCurrentThreadId() || !context->current)
2310 {
2313 destroy = TRUE;
2314 }
2315 else
2316 {
2317 /* Make a copy of gl_info for context_destroy_gl_resources use, the one
2318 in wined3d_adapter may go away in the meantime */
2319 struct wined3d_gl_info *gl_info = heap_alloc(sizeof(*gl_info));
2320 *gl_info = *context->gl_info;
2321 context->gl_info = gl_info;
2322 context->destroyed = 1;
2323 destroy = FALSE;
2324 }
2325
2326 device->shader_backend->shader_free_context_data(context);
2327 device->adapter->fragment_pipe->free_context_data(context);
2328 heap_free(context->texture_type);
2330 if (destroy)
2332}
2333
2335 const struct wined3d_shader_version *shader_version, unsigned int *base, unsigned int *count)
2336{
2337 const struct wined3d_gl_info *gl_info = context->gl_info;
2338
2339 if (!shader_version)
2340 {
2341 *base = 0;
2343 return context->tex_unit_map;
2344 }
2345
2346 if (shader_version->major >= 4)
2347 {
2348 wined3d_gl_limits_get_texture_unit_range(&gl_info->limits, shader_version->type, base, count);
2349 return NULL;
2350 }
2351
2352 switch (shader_version->type)
2353 {
2355 *base = 0;
2357 break;
2361 break;
2362 default:
2363 ERR("Unhandled shader type %#x.\n", shader_version->type);
2364 *base = 0;
2365 *count = 0;
2366 }
2367
2368 return context->tex_unit_map;
2369}
2370
2371/* Context activation is done by the caller. */
2372static void set_blit_dimension(const struct wined3d_gl_info *gl_info, UINT width, UINT height)
2373{
2374 const GLdouble projection[] =
2375 {
2376 2.0 / width, 0.0, 0.0, 0.0,
2377 0.0, 2.0 / height, 0.0, 0.0,
2378 0.0, 0.0, 2.0, 0.0,
2379 -1.0, -1.0, -1.0, 1.0,
2380 };
2381
2383 {
2384 gl_info->gl_ops.gl.p_glMatrixMode(GL_PROJECTION);
2385 checkGLcall("glMatrixMode(GL_PROJECTION)");
2386 gl_info->gl_ops.gl.p_glLoadMatrixd(projection);
2387 checkGLcall("glLoadMatrixd");
2388 }
2389 gl_info->gl_ops.gl.p_glViewport(0, 0, width, height);
2390 checkGLcall("glViewport");
2391}
2392
2394{
2395 const struct wined3d_texture *rt = context->current_rt.texture;
2396 unsigned int level;
2397
2398 if (rt->swapchain)
2399 {
2400 RECT window_size;
2401
2402 GetClientRect(context->win_handle, &window_size);
2403 size->cx = window_size.right - window_size.left;
2404 size->cy = window_size.bottom - window_size.top;
2405
2406 return;
2407 }
2408
2409 level = context->current_rt.sub_resource_idx % rt->level_count;
2412}
2413
2414void context_enable_clip_distances(struct wined3d_context *context, unsigned int enable_mask)
2415{
2416 const struct wined3d_gl_info *gl_info = context->gl_info;
2417 unsigned int clip_distance_count = gl_info->limits.user_clip_distances;
2418 unsigned int i, disable_mask, current_mask;
2419
2420 disable_mask = ~enable_mask;
2421 enable_mask &= (1u << clip_distance_count) - 1;
2422 disable_mask &= (1u << clip_distance_count) - 1;
2423 current_mask = context->clip_distance_mask;
2424 context->clip_distance_mask = enable_mask;
2425
2426 enable_mask &= ~current_mask;
2427 while (enable_mask)
2428 {
2429 i = wined3d_bit_scan(&enable_mask);
2430 gl_info->gl_ops.gl.p_glEnable(GL_CLIP_DISTANCE0 + i);
2431 }
2432 disable_mask &= current_mask;
2433 while (disable_mask)
2434 {
2435 i = wined3d_bit_scan(&disable_mask);
2436 gl_info->gl_ops.gl.p_glDisable(GL_CLIP_DISTANCE0 + i);
2437 }
2438 checkGLcall("toggle clip distances");
2439}
2440
2441/*****************************************************************************
2442 * SetupForBlit
2443 *
2444 * Sets up a context for DirectDraw blitting.
2445 * All texture units are disabled, texture unit 0 is set as current unit
2446 * fog, lighting, blending, alpha test, z test, scissor test, culling disabled
2447 * color writing enabled for all channels
2448 * register combiners disabled, shaders disabled
2449 * world matrix is set to identity, texture matrix 0 too
2450 * projection matrix is setup for drawing screen coordinates
2451 *
2452 * Params:
2453 * This: Device to activate the context for
2454 * context: Context to setup
2455 *
2456 *****************************************************************************/
2457/* Context activation is done by the caller. */
2458static void SetupForBlit(const struct wined3d_device *device, struct wined3d_context *context)
2459{
2460 const struct wined3d_gl_info *gl_info = context->gl_info;
2461 DWORD sampler;
2462 SIZE rt_size;
2463 int i;
2464
2465 TRACE("Setting up context %p for blitting\n", context);
2466
2467 context_get_rt_size(context, &rt_size);
2468
2469 if (context->last_was_blit)
2470 {
2471 if (context->blit_w != rt_size.cx || context->blit_h != rt_size.cy)
2472 {
2473 set_blit_dimension(gl_info, rt_size.cx, rt_size.cy);
2474 context->blit_w = rt_size.cx;
2475 context->blit_h = rt_size.cy;
2476 /* No need to dirtify here, the states are still dirtified because
2477 * they weren't applied since the last SetupForBlit() call. */
2478 }
2479 TRACE("Context is already set up for blitting, nothing to do\n");
2480 return;
2481 }
2482 context->last_was_blit = TRUE;
2483
2485 {
2486 /* Disable all textures. The caller can then bind a texture it wants to blit
2487 * from
2488 *
2489 * The blitting code uses (for now) the fixed function pipeline, so make sure to reset all fixed
2490 * function texture unit. No need to care for higher samplers
2491 */
2492 for (i = gl_info->limits.textures - 1; i > 0 ; --i)
2493 {
2494 sampler = context->rev_tex_unit_map[i];
2495 context_active_texture(context, gl_info, i);
2496
2497 if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
2498 {
2499 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_CUBE_MAP_ARB);
2500 checkGLcall("glDisable GL_TEXTURE_CUBE_MAP_ARB");
2501 }
2502 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_3D);
2503 checkGLcall("glDisable GL_TEXTURE_3D");
2504 if (gl_info->supported[ARB_TEXTURE_RECTANGLE])
2505 {
2506 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_RECTANGLE_ARB);
2507 checkGLcall("glDisable GL_TEXTURE_RECTANGLE_ARB");
2508 }
2509 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_2D);
2510 checkGLcall("glDisable GL_TEXTURE_2D");
2511
2512 gl_info->gl_ops.gl.p_glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
2513 checkGLcall("glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);");
2514
2516 {
2517 if (sampler < MAX_TEXTURES)
2520 }
2521 }
2522
2523 context_active_texture(context, gl_info, 0);
2524 if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
2525 {
2526 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_CUBE_MAP_ARB);
2527 checkGLcall("glDisable GL_TEXTURE_CUBE_MAP_ARB");
2528 }
2529 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_3D);
2530 checkGLcall("glDisable GL_TEXTURE_3D");
2531 if (gl_info->supported[ARB_TEXTURE_RECTANGLE])
2532 {
2533 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_RECTANGLE_ARB);
2534 checkGLcall("glDisable GL_TEXTURE_RECTANGLE_ARB");
2535 }
2536 gl_info->gl_ops.gl.p_glDisable(GL_TEXTURE_2D);
2537 checkGLcall("glDisable GL_TEXTURE_2D");
2538
2539 gl_info->gl_ops.gl.p_glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
2540
2541 gl_info->gl_ops.gl.p_glMatrixMode(GL_TEXTURE);
2542 checkGLcall("glMatrixMode(GL_TEXTURE)");
2543 gl_info->gl_ops.gl.p_glLoadIdentity();
2544 checkGLcall("glLoadIdentity()");
2545
2546 if (gl_info->supported[EXT_TEXTURE_LOD_BIAS])
2547 {
2548 gl_info->gl_ops.gl.p_glTexEnvf(GL_TEXTURE_FILTER_CONTROL_EXT,
2550 checkGLcall("glTexEnvf GL_TEXTURE_LOD_BIAS_EXT ...");
2551 }
2552
2553 /* Setup transforms */
2554 gl_info->gl_ops.gl.p_glMatrixMode(GL_MODELVIEW);
2555 checkGLcall("glMatrixMode(GL_MODELVIEW)");
2556 gl_info->gl_ops.gl.p_glLoadIdentity();
2557 checkGLcall("glLoadIdentity()");
2559
2560 /* Other misc states */
2561 gl_info->gl_ops.gl.p_glDisable(GL_ALPHA_TEST);
2562 checkGLcall("glDisable(GL_ALPHA_TEST)");
2564 gl_info->gl_ops.gl.p_glDisable(GL_LIGHTING);
2565 checkGLcall("glDisable GL_LIGHTING");
2567 glDisableWINE(GL_FOG);
2568 checkGLcall("glDisable GL_FOG");
2570 }
2571
2572 if (gl_info->supported[ARB_SAMPLER_OBJECTS])
2573 GL_EXTCALL(glBindSampler(0, 0));
2574 context_active_texture(context, gl_info, 0);
2575
2576 sampler = context->rev_tex_unit_map[0];
2578 {
2579 if (sampler < MAX_TEXTURES)
2580 {
2583 }
2585 }
2586
2587 /* Other misc states */
2588 gl_info->gl_ops.gl.p_glDisable(GL_DEPTH_TEST);
2589 checkGLcall("glDisable GL_DEPTH_TEST");
2591 gl_info->gl_ops.gl.p_glDisable(GL_BLEND);
2592 checkGLcall("glDisable GL_BLEND");
2594 gl_info->gl_ops.gl.p_glDisable(GL_CULL_FACE);
2595 checkGLcall("glDisable GL_CULL_FACE");
2597 gl_info->gl_ops.gl.p_glDisable(GL_STENCIL_TEST);
2598 checkGLcall("glDisable GL_STENCIL_TEST");
2600 gl_info->gl_ops.gl.p_glDisable(GL_SCISSOR_TEST);
2601 checkGLcall("glDisable GL_SCISSOR_TEST");
2603 if (gl_info->supported[ARB_POINT_SPRITE])
2604 {
2605 gl_info->gl_ops.gl.p_glDisable(GL_POINT_SPRITE_ARB);
2606 checkGLcall("glDisable GL_POINT_SPRITE_ARB");
2608 }
2609 gl_info->gl_ops.gl.p_glColorMask(GL_TRUE, GL_TRUE,GL_TRUE,GL_TRUE);
2610 checkGLcall("glColorMask");
2611 for (i = 0; i < MAX_RENDER_TARGETS; ++i)
2613 if (gl_info->supported[EXT_SECONDARY_COLOR])
2614 {
2615 gl_info->gl_ops.gl.p_glDisable(GL_COLOR_SUM_EXT);
2617 checkGLcall("glDisable(GL_COLOR_SUM_EXT)");
2618 }
2619
2620 context->last_was_rhw = TRUE;
2621 context_invalidate_state(context, STATE_VDECL); /* because of last_was_rhw = TRUE */
2622
2625
2626 /* FIXME: Make draw_textured_quad() able to work with a upper left origin. */
2627 if (gl_info->supported[ARB_CLIP_CONTROL])
2629
2630 set_blit_dimension(gl_info, rt_size.cx, rt_size.cy);
2631
2632 /* Disable shaders */
2633 device->shader_backend->shader_disable(device->shader_priv, context);
2634
2635 context->blit_w = rt_size.cx;
2636 context->blit_h = rt_size.cy;
2639}
2640
2641static inline BOOL is_rt_mask_onscreen(DWORD rt_mask)
2642{
2643 return rt_mask & (1u << 31);
2644}
2645
2647{
2648 return rt_mask & ~(1u << 31);
2649}
2650
2651/* Context activation is done by the caller. */
2653{
2654 const struct wined3d_gl_info *gl_info = context->gl_info;
2655 GLenum draw_buffers[MAX_RENDER_TARGET_VIEWS];
2656
2657 if (!rt_mask)
2658 {
2659 gl_info->gl_ops.gl.p_glDrawBuffer(GL_NONE);
2660 }
2661 else if (is_rt_mask_onscreen(rt_mask))
2662 {
2663 gl_info->gl_ops.gl.p_glDrawBuffer(draw_buffer_from_rt_mask(rt_mask));
2664 }
2665 else
2666 {
2668 {
2669 unsigned int i = 0;
2670
2671 while (rt_mask)
2672 {
2673 if (rt_mask & 1)
2674 draw_buffers[i] = GL_COLOR_ATTACHMENT0 + i;
2675 else
2676 draw_buffers[i] = GL_NONE;
2677
2678 rt_mask >>= 1;
2679 ++i;
2680 }
2681
2682 if (gl_info->supported[ARB_DRAW_BUFFERS])
2683 {
2684 GL_EXTCALL(glDrawBuffers(i, draw_buffers));
2685 }
2686 else
2687 {
2688 gl_info->gl_ops.gl.p_glDrawBuffer(draw_buffers[0]);
2689 }
2690 }
2691 else
2692 {
2693 ERR("Unexpected draw buffers mask with backbuffer ORM.\n");
2694 }
2695 }
2696
2697 checkGLcall("apply draw buffers");
2698}
2699
2700/* Context activation is done by the caller. */
2702{
2703 const struct wined3d_gl_info *gl_info = context->gl_info;
2704 DWORD *current_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
2706
2707 if (new_mask == *current_mask)
2708 return;
2709
2710 gl_info->gl_ops.gl.p_glDrawBuffer(buffer);
2711 checkGLcall("glDrawBuffer()");
2712
2713 *current_mask = new_mask;
2714}
2715
2716/* Context activation is done by the caller. */
2717void context_active_texture(struct wined3d_context *context, const struct wined3d_gl_info *gl_info, unsigned int unit)
2718{
2720 checkGLcall("glActiveTexture");
2721 context->active_texture = unit;
2722}
2723
2725{
2726 const struct wined3d_gl_info *gl_info = context->gl_info;
2727
2728 if (binding == GL_ELEMENT_ARRAY_BUFFER)
2730
2731 GL_EXTCALL(glBindBuffer(binding, name));
2732}
2733
2735{
2736 const struct wined3d_dummy_textures *textures = &context->device->dummy_textures;
2737 const struct wined3d_gl_info *gl_info = context->gl_info;
2738 DWORD unit = context->active_texture;
2739 DWORD old_texture_type = context->texture_type[unit];
2740
2741 if (name)
2742 {
2743 gl_info->gl_ops.gl.p_glBindTexture(target, name);
2744 }
2745 else
2746 {
2747 target = GL_NONE;
2748 }
2749
2750 if (old_texture_type != target)
2751 {
2752 switch (old_texture_type)
2753 {
2754 case GL_NONE:
2755 /* nothing to do */
2756 break;
2757 case GL_TEXTURE_1D:
2758 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D, textures->tex_1d);
2759 checkGLcall("glBindTexture");
2760 break;
2762 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_1D_ARRAY, textures->tex_1d_array);
2763 checkGLcall("glBindTexture");
2764 break;
2765 case GL_TEXTURE_2D:
2766 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D, textures->tex_2d);
2767 break;
2769 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_ARRAY, textures->tex_2d_array);
2770 break;
2772 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_RECTANGLE_ARB, textures->tex_rect);
2773 break;
2775 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP, textures->tex_cube);
2776 break;
2778 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, textures->tex_cube_array);
2779 break;
2780 case GL_TEXTURE_3D:
2781 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_3D, textures->tex_3d);
2782 break;
2783 case GL_TEXTURE_BUFFER:
2784 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_BUFFER, textures->tex_buffer);
2785 break;
2787 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, textures->tex_2d_ms);
2788 break;
2790 gl_info->gl_ops.gl.p_glBindTexture(GL_TEXTURE_2D_MULTISAMPLE_ARRAY, textures->tex_2d_ms_array);
2791 break;
2792 default:
2793 ERR("Unexpected texture target %#x.\n", old_texture_type);
2794 }
2795
2796 context->texture_type[unit] = target;
2797 }
2798
2799 checkGLcall("bind texture");
2800}
2801
2803 const struct wined3d_bo_address *data, size_t size, GLenum binding, DWORD flags)
2804{
2805 const struct wined3d_gl_info *gl_info;
2806 BYTE *memory;
2807
2808 if (!data->buffer_object)
2809 return data->addr;
2810
2811 gl_info = context->gl_info;
2812 context_bind_bo(context, binding, data->buffer_object);
2813
2814 if (gl_info->supported[ARB_MAP_BUFFER_RANGE])
2815 {
2816 GLbitfield map_flags = wined3d_resource_gl_map_flags(flags) & ~GL_MAP_FLUSH_EXPLICIT_BIT;
2817 memory = GL_EXTCALL(glMapBufferRange(binding, (INT_PTR)data->addr, size, map_flags));
2818 }
2819 else
2820 {
2822 memory += (INT_PTR)data->addr;
2823 }
2824
2825 context_bind_bo(context, binding, 0);
2826 checkGLcall("Map buffer object");
2827
2828 return memory;
2829}
2830
2832 const struct wined3d_bo_address *data, GLenum binding)
2833{
2834 const struct wined3d_gl_info *gl_info;
2835
2836 if (!data->buffer_object)
2837 return;
2838
2839 gl_info = context->gl_info;
2840 context_bind_bo(context, binding, data->buffer_object);
2841 GL_EXTCALL(glUnmapBuffer(binding));
2842 context_bind_bo(context, binding, 0);
2843 checkGLcall("Unmap buffer object");
2844}
2845
2847 const struct wined3d_bo_address *dst, GLenum dst_binding,
2848 const struct wined3d_bo_address *src, GLenum src_binding, size_t size)
2849{
2850 const struct wined3d_gl_info *gl_info;
2851 BYTE *dst_ptr, *src_ptr;
2852
2853 gl_info = context->gl_info;
2854
2855 if (dst->buffer_object && src->buffer_object)
2856 {
2857 if (gl_info->supported[ARB_COPY_BUFFER])
2858 {
2859 GL_EXTCALL(glBindBuffer(GL_COPY_READ_BUFFER, src->buffer_object));
2860 GL_EXTCALL(glBindBuffer(GL_COPY_WRITE_BUFFER, dst->buffer_object));
2862 (GLintptr)src->addr, (GLintptr)dst->addr, size));
2863 checkGLcall("direct buffer copy");
2864 }
2865 else
2866 {
2867 src_ptr = context_map_bo_address(context, src, size, src_binding, WINED3D_MAP_READ);
2868 dst_ptr = context_map_bo_address(context, dst, size, dst_binding, WINED3D_MAP_WRITE);
2869
2870 memcpy(dst_ptr, src_ptr, size);
2871
2872 context_unmap_bo_address(context, dst, dst_binding);
2873 context_unmap_bo_address(context, src, src_binding);
2874 }
2875 }
2876 else if (!dst->buffer_object && src->buffer_object)
2877 {
2878 context_bind_bo(context, src_binding, src->buffer_object);
2879 GL_EXTCALL(glGetBufferSubData(src_binding, (GLintptr)src->addr, size, dst->addr));
2880 checkGLcall("buffer download");
2881 }
2882 else if (dst->buffer_object && !src->buffer_object)
2883 {
2884 context_bind_bo(context, dst_binding, dst->buffer_object);
2885 GL_EXTCALL(glBufferSubData(dst_binding, (GLintptr)dst->addr, size, src->addr));
2886 checkGLcall("buffer upload");
2887 }
2888 else
2889 {
2890 memcpy(dst->addr, src->addr, size);
2891 }
2892}
2893
2895{
2896 if (context->render_offscreen == offscreen)
2897 return;
2898
2901 if (!context->gl_info->supported[ARB_CLIP_CONTROL])
2902 {
2906 }
2908 if (context->gl_info->supported[ARB_FRAGMENT_COORD_CONVENTIONS])
2910 context->render_offscreen = offscreen;
2911}
2912
2913static BOOL match_depth_stencil_format(const struct wined3d_format *existing,
2914 const struct wined3d_format *required)
2915{
2916 if (existing == required)
2917 return TRUE;
2920 return FALSE;
2921 if (existing->depth_size < required->depth_size)
2922 return FALSE;
2923 /* If stencil bits are used the exact amount is required - otherwise
2924 * wrapping won't work correctly. */
2925 if (required->stencil_size && required->stencil_size != existing->stencil_size)
2926 return FALSE;
2927 return TRUE;
2928}
2929
2930/* Context activation is done by the caller. */
2932 const struct wined3d_rendertarget_view *depth_stencil)
2933{
2934 /* Onscreen surfaces are always in a swapchain */
2935 struct wined3d_swapchain *swapchain = context->current_rt.texture->swapchain;
2936
2937 if (context->render_offscreen || !depth_stencil) return;
2938 if (match_depth_stencil_format(swapchain->ds_format, depth_stencil->format)) return;
2939
2940 /* TODO: If the requested format would satisfy the needs of the existing one(reverse match),
2941 * or no onscreen depth buffer was created, the OpenGL drawable could be changed to the new
2942 * format. */
2943 WARN("Depth stencil format is not supported by WGL, rendering the backbuffer in an FBO\n");
2944
2945 /* The currently active context is the necessary context to access the swapchain's onscreen buffers */
2946 if (!(wined3d_texture_load_location(context->current_rt.texture, context->current_rt.sub_resource_idx,
2948 ERR("Failed to load location.\n");
2949 swapchain->render_to_fbo = TRUE;
2952}
2953
2955{
2957 {
2958 case ORM_FBO:
2959 return GL_COLOR_ATTACHMENT0;
2960
2961 case ORM_BACKBUFFER:
2962 return context->aux_buffers > 0 ? GL_AUX0 : GL_BACK;
2963
2964 default:
2965 FIXME("Unhandled offscreen rendering mode %#x.\n", wined3d_settings.offscreen_rendering_mode);
2966 return GL_BACK;
2967 }
2968}
2969
2971{
2972 if (!rt || rt->resource.format->id == WINED3DFMT_NULL)
2973 return 0;
2974 else if (rt->swapchain)
2976 else
2978}
2979
2980/* Context activation is done by the caller. */
2982{
2983 struct wined3d_texture *rt = context->current_rt.texture;
2984 struct wined3d_surface *surface;
2985 DWORD rt_mask, *cur_mask;
2986
2988 {
2989 if (context->render_offscreen)
2990 {
2992
2993 surface = rt->sub_resources[context->current_rt.sub_resource_idx].u.surface;
2994 context_apply_fbo_state_blit(context, GL_FRAMEBUFFER, surface, NULL, rt->resource.draw_binding);
2995 if (rt->resource.format->id != WINED3DFMT_NULL)
2996 rt_mask = 1;
2997 else
2998 rt_mask = 0;
2999 }
3000 else
3001 {
3002 context->current_fbo = NULL;
3005 }
3006 }
3007 else
3008 {
3010 }
3011
3012 cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
3013
3014 if (rt_mask != *cur_mask)
3015 {
3017 *cur_mask = rt_mask;
3018 }
3019
3021 {
3023 }
3024
3027}
3028
3029static BOOL have_framebuffer_attachment(unsigned int rt_count, struct wined3d_rendertarget_view * const *rts,
3030 const struct wined3d_rendertarget_view *ds)
3031{
3032 unsigned int i;
3033
3034 if (ds)
3035 return TRUE;
3036
3037 for (i = 0; i < rt_count; ++i)
3038 {
3039 if (rts[i] && rts[i]->format->id != WINED3DFMT_NULL)
3040 return TRUE;
3041 }
3042
3043 return FALSE;
3044}
3045
3046/* Context activation is done by the caller. */
3048 UINT rt_count, const struct wined3d_fb_state *fb)
3049{
3050 struct wined3d_rendertarget_view * const *rts = fb->render_targets;
3051 struct wined3d_rendertarget_view *dsv = fb->depth_stencil;
3052 const struct wined3d_gl_info *gl_info = context->gl_info;
3053 DWORD rt_mask = 0, *cur_mask;
3054 unsigned int i;
3055
3056 if (isStateDirty(context, STATE_FRAMEBUFFER) || fb != state->fb
3057 || rt_count != gl_info->limits.buffers)
3058 {
3059 if (!have_framebuffer_attachment(rt_count, rts, dsv))
3060 {
3061 WARN("Invalid render target config, need at least one attachment.\n");
3062 return FALSE;
3063 }
3064
3066 {
3068
3069 if (!rt_count || wined3d_resource_is_offscreen(rts[0]->resource))
3070 {
3071 memset(context->blit_targets, 0, sizeof(context->blit_targets));
3072 for (i = 0; i < rt_count; ++i)
3073 {
3074 if (rts[i])
3075 {
3076 context->blit_targets[i].gl_view = rts[i]->gl_view;
3077 context->blit_targets[i].resource = rts[i]->resource;
3078 context->blit_targets[i].sub_resource_idx = rts[i]->sub_resource_idx;
3079 context->blit_targets[i].layer_count = rts[i]->layer_count;
3080 }
3081 if (rts[i] && rts[i]->format->id != WINED3DFMT_NULL)
3082 rt_mask |= (1u << i);
3083 }
3086 rt_count ? rts[0]->resource->draw_binding : 0,
3087 dsv ? dsv->resource->draw_binding : 0);
3088 }
3089 else
3090 {
3094 }
3095
3096 /* If the framebuffer is not the device's fb the device's fb has to be reapplied
3097 * next draw. Otherwise we could mark the framebuffer state clean here, once the
3098 * state management allows this */
3100 }
3101 else
3102 {
3105 }
3106 }
3108 && (!rt_count || wined3d_resource_is_offscreen(rts[0]->resource)))
3109 {
3110 for (i = 0; i < rt_count; ++i)
3111 {
3112 if (rts[i] && rts[i]->format->id != WINED3DFMT_NULL)
3113 rt_mask |= (1u << i);
3114 }
3115 }
3116 else
3117 {
3120 }
3121
3122 cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
3123
3124 if (rt_mask != *cur_mask)
3125 {
3127 *cur_mask = rt_mask;
3129 }
3130
3132 {
3134 }
3135
3136 context->last_was_blit = FALSE;
3137
3138 /* Blending and clearing should be orthogonal, but tests on the nvidia
3139 * driver show that disabling blending when clearing improves the clearing
3140 * performance incredibly. */
3141 gl_info->gl_ops.gl.p_glDisable(GL_BLEND);
3142 gl_info->gl_ops.gl.p_glEnable(GL_SCISSOR_TEST);
3143 if (rt_count && gl_info->supported[ARB_FRAMEBUFFER_SRGB])
3144 {
3145 if (needs_srgb_write(context, state, fb))
3146 gl_info->gl_ops.gl.p_glEnable(GL_FRAMEBUFFER_SRGB);
3147 else
3148 gl_info->gl_ops.gl.p_glDisable(GL_FRAMEBUFFER_SRGB);
3150 }
3151 checkGLcall("setting up state for clear");
3152
3156
3157 return TRUE;
3158}
3159
3161{
3162 struct wined3d_rendertarget_view * const *rts = state->fb->render_targets;
3164 DWORD rt_mask, mask;
3165 unsigned int i;
3166
3169 else if (!context->render_offscreen)
3171
3172 /* If we attach more buffers than supported in dual blend mode, the NVIDIA
3173 * driver generates the following error:
3174 * GL_INVALID_OPERATION error generated. State(s) are invalid: blend.
3175 * DX11 does not treat this configuration as invalid, so disable the unused ones.
3176 */
3177 rt_mask = ps ? ps->reg_maps.rt_mask : 1;
3179 rt_mask &= context->d3d_info->valid_dual_rt_mask;
3180 else
3181 rt_mask &= context->d3d_info->valid_rt_mask;
3182
3183 mask = rt_mask;
3184 i = 0;
3185 while (mask)
3186 {
3188 if (!rts[i] || rts[i]->format->id == WINED3DFMT_NULL)
3189 rt_mask &= ~(1u << i);
3190 }
3191
3192 return rt_mask;
3193}
3194
3195/* Context activation is done by the caller. */
3196void context_state_fb(struct wined3d_context *context, const struct wined3d_state *state, DWORD state_id)
3197{
3199 const struct wined3d_fb_state *fb = state->fb;
3200 DWORD color_location = 0;
3201 DWORD *cur_mask;
3202
3204 {
3205 if (!context->render_offscreen)
3206 {
3209 }
3210 else
3211 {
3212 unsigned int i;
3213
3214 memset(context->blit_targets, 0, sizeof(context->blit_targets));
3215 for (i = 0; i < context->gl_info->limits.buffers; ++i)
3216 {
3217 if (!fb->render_targets[i])
3218 continue;
3219
3220 context->blit_targets[i].gl_view = fb->render_targets[i]->gl_view;
3221 context->blit_targets[i].resource = fb->render_targets[i]->resource;
3222 context->blit_targets[i].sub_resource_idx = fb->render_targets[i]->sub_resource_idx;
3223 context->blit_targets[i].layer_count = fb->render_targets[i]->layer_count;
3224
3225 if (!color_location)
3226 color_location = fb->render_targets[i]->resource->draw_binding;
3227 }
3230 color_location, fb->depth_stencil ? fb->depth_stencil->resource->draw_binding : 0);
3231 }
3232 }
3233
3234 cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
3235 if (rt_mask != *cur_mask)
3236 {
3238 *cur_mask = rt_mask;
3239 }
3240 context->constant_update_mask |= WINED3D_SHADER_CONST_PS_Y_CORR;
3241}
3242
3244{
3245 DWORD i = context->rev_tex_unit_map[unit];
3246 DWORD j = context->tex_unit_map[stage];
3247
3248 TRACE("Mapping stage %u to unit %u.\n", stage, unit);
3249 context->tex_unit_map[stage] = unit;
3250 if (i != WINED3D_UNMAPPED_STAGE && i != stage)
3251 context->tex_unit_map[i] = WINED3D_UNMAPPED_STAGE;
3252
3253 context->rev_tex_unit_map[unit] = stage;
3254 if (j != WINED3D_UNMAPPED_STAGE && j != unit)
3255 context->rev_tex_unit_map[j] = WINED3D_UNMAPPED_STAGE;
3256}
3257
3259{
3260 DWORD i;
3261
3262 for (i = 0; i <= WINED3D_HIGHEST_TEXTURE_STATE; ++i)
3264}
3265
3267 const struct wined3d_state *state)
3268{
3269 UINT i, start, end;
3270
3271 context->fixed_function_usage_map = 0;
3272 for (i = 0; i < MAX_TEXTURES; ++i)
3273 {
3274 enum wined3d_texture_op color_op = state->texture_states[i][WINED3D_TSS_COLOR_OP];
3275 enum wined3d_texture_op alpha_op = state->texture_states[i][WINED3D_TSS_ALPHA_OP];
3276 DWORD color_arg1 = state->texture_states[i][WINED3D_TSS_COLOR_ARG1] & WINED3DTA_SELECTMASK;
3277 DWORD color_arg2 = state->texture_states[i][WINED3D_TSS_COLOR_ARG2] & WINED3DTA_SELECTMASK;
3278 DWORD color_arg3 = state->texture_states[i][WINED3D_TSS_COLOR_ARG0] & WINED3DTA_SELECTMASK;
3279 DWORD alpha_arg1 = state->texture_states[i][WINED3D_TSS_ALPHA_ARG1] & WINED3DTA_SELECTMASK;
3280 DWORD alpha_arg2 = state->texture_states[i][WINED3D_TSS_ALPHA_ARG2] & WINED3DTA_SELECTMASK;
3281 DWORD alpha_arg3 = state->texture_states[i][WINED3D_TSS_ALPHA_ARG0] & WINED3DTA_SELECTMASK;
3282
3283 /* Not used, and disable higher stages. */
3284 if (color_op == WINED3D_TOP_DISABLE)
3285 break;
3286
3287 if (((color_arg1 == WINED3DTA_TEXTURE) && color_op != WINED3D_TOP_SELECT_ARG2)
3288 || ((color_arg2 == WINED3DTA_TEXTURE) && color_op != WINED3D_TOP_SELECT_ARG1)
3289 || ((color_arg3 == WINED3DTA_TEXTURE)
3290 && (color_op == WINED3D_TOP_MULTIPLY_ADD || color_op == WINED3D_TOP_LERP))
3291 || ((alpha_arg1 == WINED3DTA_TEXTURE) && alpha_op != WINED3D_TOP_SELECT_ARG2)
3292 || ((alpha_arg2 == WINED3DTA_TEXTURE) && alpha_op != WINED3D_TOP_SELECT_ARG1)
3293 || ((alpha_arg3 == WINED3DTA_TEXTURE)
3294 && (alpha_op == WINED3D_TOP_MULTIPLY_ADD || alpha_op == WINED3D_TOP_LERP)))
3295 context->fixed_function_usage_map |= (1u << i);
3296
3297 if ((color_op == WINED3D_TOP_BUMPENVMAP || color_op == WINED3D_TOP_BUMPENVMAP_LUMINANCE)
3298 && i < MAX_TEXTURES - 1)
3299 context->fixed_function_usage_map |= (1u << (i + 1));
3300 }
3301
3302 if (i < context->lowest_disabled_stage)
3303 {
3304 start = i;
3305 end = context->lowest_disabled_stage;
3306 }
3307 else
3308 {
3309 start = context->lowest_disabled_stage;
3310 end = i;
3311 }
3312
3313 context->lowest_disabled_stage = i;
3314 for (i = start + 1; i < end; ++i)
3315 {
3317 }
3318}
3319
3321 const struct wined3d_state *state)
3322{
3323 const struct wined3d_d3d_info *d3d_info = context->d3d_info;
3324 unsigned int i, tex;
3325 WORD ffu_map;
3326
3327 ffu_map = context->fixed_function_usage_map;
3328
3329 if (d3d_info->limits.ffp_textures == d3d_info->limits.ffp_blend_stages
3330 || context->lowest_disabled_stage <= d3d_info->limits.ffp_textures)
3331 {
3332 for (i = 0; ffu_map; ffu_map >>= 1, ++i)
3333 {
3334 if (!(ffu_map & 1))
3335 continue;
3336
3337 if (context->tex_unit_map[i] != i)
3338 {
3342 }
3343 }
3344 return;
3345 }
3346
3347 /* Now work out the mapping */
3348 tex = 0;
3349 for (i = 0; ffu_map; ffu_map >>= 1, ++i)
3350 {
3351 if (!(ffu_map & 1))
3352 continue;
3353
3354 if (context->tex_unit_map[i] != tex)
3355 {
3359 }
3360
3361 ++tex;
3362 }
3363}
3364
3366{
3367 const struct wined3d_d3d_info *d3d_info = context->d3d_info;
3368 const struct wined3d_shader_resource_info *resource_info =
3369 state->shader[WINED3D_SHADER_TYPE_PIXEL]->reg_maps.resource_info;
3370 unsigned int i;
3371
3372 for (i = 0; i < MAX_FRAGMENT_SAMPLERS; ++i)
3373 {
3374 if (resource_info[i].type && context->tex_unit_map[i] != i)
3375 {
3378 if (i < d3d_info->limits.ffp_blend_stages)
3380 }
3381 }
3382}
3383
3385 const struct wined3d_shader_resource_info *ps_resource_info, DWORD unit)
3386{
3387 DWORD current_mapping = context->rev_tex_unit_map[unit];
3388
3389 /* Not currently used */
3390 if (current_mapping == WINED3D_UNMAPPED_STAGE)
3391 return TRUE;
3392
3393 if (current_mapping < MAX_FRAGMENT_SAMPLERS)
3394 {
3395 /* Used by a fragment sampler */
3396
3397 if (!ps_resource_info)
3398 {
3399 /* No pixel shader, check fixed function */
3400 return current_mapping >= MAX_TEXTURES || !(context->fixed_function_usage_map & (1u << current_mapping));
3401 }
3402
3403 /* Pixel shader, check the shader's sampler map */
3404 return !ps_resource_info[current_mapping].type;
3405 }
3406
3407 return TRUE;
3408}
3409
3411{
3412 const struct wined3d_shader_resource_info *vs_resource_info =
3413 state->shader[WINED3D_SHADER_TYPE_VERTEX]->reg_maps.resource_info;
3414 const struct wined3d_shader_resource_info *ps_resource_info = NULL;
3415 const struct wined3d_gl_info *gl_info = context->gl_info;
3416 int start = min(MAX_COMBINED_SAMPLERS, gl_info->limits.graphics_samplers) - 1;
3417 int i;
3418
3419 /* Note that we only care if a resource is used or not, not the
3420 * resource's specific type. Otherwise we'd need to call
3421 * shader_update_samplers() here for 1.x pixelshaders. */
3422 if (ps)
3423 ps_resource_info = state->shader[WINED3D_SHADER_TYPE_PIXEL]->reg_maps.resource_info;
3424
3425 for (i = 0; i < MAX_VERTEX_SAMPLERS; ++i)
3426 {
3427 DWORD vsampler_idx = i + MAX_FRAGMENT_SAMPLERS;
3428 if (vs_resource_info[i].type)
3429 {
3430 while (start >= 0)
3431 {
3432 if (context_unit_free_for_vs(context, ps_resource_info, start))
3433 {
3434 if (context->tex_unit_map[vsampler_idx] != start)
3435 {
3436 context_map_stage(context, vsampler_idx, start);
3438 }
3439
3440 --start;
3441 break;
3442 }
3443
3444 --start;
3445 }
3446 if (context->tex_unit_map[vsampler_idx] == WINED3D_UNMAPPED_STAGE)
3447 WARN("Couldn't find a free texture unit for vertex sampler %u.\n", i);
3448 }
3449 }
3450}
3451
3453{
3454 const struct wined3d_gl_info *gl_info = context->gl_info;
3455 BOOL vs = use_vs(state);
3456 BOOL ps = use_ps(state);
3457
3458 if (!ps)
3460
3461 /* Try to go for a 1:1 mapping of the samplers when possible. Pixel shaders
3462 * need a 1:1 map at the moment.
3463 * When the mapping of a stage is changed, sampler and ALL texture stage
3464 * states have to be reset. */
3465
3466 if (gl_info->limits.graphics_samplers >= MAX_COMBINED_SAMPLERS)
3467 return;
3468
3469 if (ps)
3471 else
3473
3474 if (vs)
3476}
3477
3478/* Context activation is done by the caller. */
3480{
3481 DWORD rt_mask, *cur_mask;
3482
3484
3485 cur_mask = context->current_fbo ? &context->current_fbo->rt_mask : &context->draw_buffers_mask;
3487 if (rt_mask != *cur_mask)
3488 {
3490 *cur_mask = rt_mask;
3491 }
3492}
3493
3494static BOOL fixed_get_input(BYTE usage, BYTE usage_idx, unsigned int *regnum)
3495{
3497 *regnum = WINED3D_FFP_POSITION;
3499 *regnum = WINED3D_FFP_BLENDWEIGHT;
3501 *regnum = WINED3D_FFP_BLENDINDICES;
3503 *regnum = WINED3D_FFP_NORMAL;
3505 *regnum = WINED3D_FFP_PSIZE;
3507 *regnum = WINED3D_FFP_DIFFUSE;
3508 else if (usage == WINED3D_DECL_USAGE_COLOR && usage_idx == 1)
3509 *regnum = WINED3D_FFP_SPECULAR;
3511 *regnum = WINED3D_FFP_TEXCOORD0 + usage_idx;
3512 else
3513 {
3514 WARN("Unsupported input stream [usage=%s, usage_idx=%u].\n", debug_d3ddeclusage(usage), usage_idx);
3515 *regnum = ~0u;
3516 return FALSE;
3517 }
3518
3519 return TRUE;
3520}
3521
3522/* Context activation is done by the caller. */
3524 const struct wined3d_state *state, const struct wined3d_gl_info *gl_info,
3525 const struct wined3d_d3d_info *d3d_info)
3526{
3527 /* We need to deal with frequency data! */
3528 struct wined3d_vertex_declaration *declaration = state->vertex_declaration;
3529 BOOL generic_attributes = d3d_info->ffp_generic_attributes;
3530 BOOL use_vshader = use_vs(state);
3531 unsigned int i;
3532
3533 stream_info->use_map = 0;
3534 stream_info->swizzle_map = 0;
3535 stream_info->position_transformed = 0;
3536
3537 if (!declaration)
3538 return;
3539
3540 stream_info->position_transformed = declaration->position_transformed;
3541
3542 /* Translate the declaration into strided data. */
3543 for (i = 0; i < declaration->element_count; ++i)
3544 {
3545 const struct wined3d_vertex_declaration_element *element = &declaration->elements[i];
3546 const struct wined3d_stream_state *stream = &state->streams[element->input_slot];
3547 BOOL stride_used;
3548 unsigned int idx;
3549
3550 TRACE("%p Element %p (%u of %u).\n", declaration->elements,
3551 element, i + 1, declaration->element_count);
3552
3553 if (!stream->buffer)
3554 continue;
3555
3556 TRACE("offset %u input_slot %u usage_idx %d.\n", element->offset, element->input_slot, element->usage_idx);
3557
3558 if (use_vshader)
3559 {
3560 if (element->output_slot == WINED3D_OUTPUT_SLOT_UNUSED)
3561 {
3562 stride_used = FALSE;
3563 }
3564 else if (element->output_slot == WINED3D_OUTPUT_SLOT_SEMANTIC)
3565 {
3566 /* TODO: Assuming vertexdeclarations are usually used with the
3567 * same or a similar shader, it might be worth it to store the
3568 * last used output slot and try that one first. */
3569 stride_used = vshader_get_input(state->shader[WINED3D_SHADER_TYPE_VERTEX],
3570 element->usage, element->usage_idx, &idx);
3571 }
3572 else
3573 {
3574 idx = element->output_slot;
3575 stride_used = TRUE;
3576 }
3577 }
3578 else
3579 {
3580 if (!generic_attributes && !element->ffp_valid)
3581 {
3582 WARN("Skipping unsupported fixed function element of format %s and usage %s.\n",
3583 debug_d3dformat(element->format->id), debug_d3ddeclusage(element->usage));
3584 stride_used = FALSE;
3585 }
3586 else
3587 {
3588 stride_used = fixed_get_input(element->usage, element->usage_idx, &idx);
3589 }
3590 }
3591
3592 if (stride_used)
3593 {
3594 TRACE("Load %s array %u [usage %s, usage_idx %u, "
3595 "input_slot %u, offset %u, stride %u, format %s, class %s, step_rate %u].\n",
3596 use_vshader ? "shader": "fixed function", idx,
3597 debug_d3ddeclusage(element->usage), element->usage_idx, element->input_slot,
3598 element->offset, stream->stride, debug_d3dformat(element->format->id),
3599 debug_d3dinput_classification(element->input_slot_class), element->instance_data_step_rate);
3600
3601 stream_info->elements[idx].format = element->format;
3602 stream_info->elements[idx].data.buffer_object = 0;
3603 stream_info->elements[idx].data.addr = (BYTE *)NULL + stream->offset + element->offset;
3604 stream_info->elements[idx].stride = stream->stride;
3605 stream_info->elements[idx].stream_idx = element->input_slot;
3607 {
3608 stream_info->elements[idx].divisor = 1;
3609 }
3610 else if (element->input_slot_class == WINED3D_INPUT_PER_INSTANCE_DATA)
3611 {
3612 stream_info->elements[idx].divisor = element->instance_data_step_rate;
3613 if (!element->instance_data_step_rate)
3614 FIXME("Instance step rate 0 not implemented.\n");
3615 }
3616 else
3617 {
3618 stream_info->elements[idx].divisor = 0;
3619 }
3620
3621 if (!gl_info->supported[ARB_VERTEX_ARRAY_BGRA]
3622 && element->format->id == WINED3DFMT_B8G8R8A8_UNORM)
3623 {
3624 stream_info->swizzle_map |= 1u << idx;
3625 }
3626 stream_info->use_map |= 1u << idx;
3627 }
3628 }
3629}
3630
3631/* Context activation is done by the caller. */
3633{
3634 struct wined3d_stream_info *stream_info = &context->stream_info;
3635 const struct wined3d_d3d_info *d3d_info = context->d3d_info;
3636 const struct wined3d_gl_info *gl_info = context->gl_info;
3637 DWORD prev_all_vbo = stream_info->all_vbo;
3638 unsigned int i;
3639 WORD map;
3640
3642
3643 stream_info->all_vbo = 1;
3644 context->buffer_fence_count = 0;
3645 for (i = 0, map = stream_info->use_map; map; map >>= 1, ++i)
3646 {
3648 struct wined3d_bo_address data;
3649 struct wined3d_buffer *buffer;
3650
3651 if (!(map & 1))
3652 continue;
3653
3654 element = &stream_info->elements[i];
3655 buffer = state->streams[element->stream_idx].buffer;
3656
3657 /* We can't use VBOs if the base vertex index is negative. OpenGL
3658 * doesn't accept negative offsets (or rather offsets bigger than the
3659 * VBO, because the pointer is unsigned), so use system memory
3660 * sources. In most sane cases the pointer - offset will still be > 0,
3661 * otherwise it will wrap around to some big value. Hope that with the
3662 * indices the driver wraps it back internally. If not,
3663 * draw_primitive_immediate_mode() is needed, including a vertex buffer
3664 * path. */
3665 if (state->load_base_vertex_index < 0)
3666 {
3667 WARN_(d3d_perf)("load_base_vertex_index is < 0 (%d), not using VBOs.\n",
3668 state->load_base_vertex_index);
3669 element->data.buffer_object = 0;
3671 if ((UINT_PTR)element->data.addr < -state->load_base_vertex_index * element->stride)
3672 FIXME("System memory vertex data load offset is negative!\n");
3673 }
3674 else
3675 {
3678 element->data.buffer_object = data.buffer_object;
3679 element->data.addr += (ULONG_PTR)data.addr;
3680 }
3681
3682 if (!element->data.buffer_object)
3683 stream_info->all_vbo = 0;
3684
3685 if (buffer->fence)
3686 context->buffer_fences[context->buffer_fence_count++] = buffer->fence;
3687
3688 TRACE("Load array %u {%#x:%p}.\n", i, element->data.buffer_object, element->data.addr);
3689 }
3690
3691 if (prev_all_vbo != stream_info->all_vbo)
3693
3694 context->use_immediate_mode_draw = FALSE;
3695
3696 if (stream_info->all_vbo)
3697 return;
3698
3699 if (use_vs(state))
3700 {
3701 if (state->vertex_declaration->half_float_conv_needed)
3702 {
3703 TRACE("Using immediate mode draw with vertex shaders for FLOAT16 conversion.\n");
3704 context->use_immediate_mode_draw = TRUE;
3705 }
3706 }
3707 else
3708 {
3709 WORD slow_mask = -!d3d_info->ffp_generic_attributes & (1u << WINED3D_FFP_PSIZE);
3710 slow_mask |= -(!gl_info->supported[ARB_VERTEX_ARRAY_BGRA] && !d3d_info->ffp_generic_attributes)
3712
3713 if ((stream_info->position_transformed && !d3d_info->xyzrhw)
3714 || (stream_info->use_map & slow_mask))
3715 context->use_immediate_mode_draw = TRUE;
3716 }
3717}
3718
3719/* Context activation is done by the caller. */
3721 const struct wined3d_state *state, unsigned int idx)
3722{
3723 struct wined3d_texture *texture;
3724
3725 if (!(texture = state->textures[idx]))
3726 return;
3727
3729}
3730
3731/* Context activation is done by the caller. */
3733{
3734 unsigned int i;
3735
3736 if (use_vs(state))
3737 {
3738 for (i = 0; i < MAX_VERTEX_SAMPLERS; ++i)
3739 {
3740 if (state->shader[WINED3D_SHADER_TYPE_VERTEX]->reg_maps.resource_info[i].type)
3742 }
3743 }
3744
3745 if (use_ps(state))
3746 {
3747 for (i = 0; i < MAX_FRAGMENT_SAMPLERS; ++i)
3748 {
3749 if (state->shader[WINED3D_SHADER_TYPE_PIXEL]->reg_maps.resource_info[i].type)
3751 }
3752 }
3753 else
3754 {
3755 WORD ffu_map = context->fixed_function_usage_map;
3756
3757 for (i = 0; ffu_map; ffu_map >>= 1, ++i)
3758 {
3759 if (ffu_map & 1)
3761 }
3762 }
3763}
3764
3766 unsigned int shader_mask)
3767{
3770 struct wined3d_shader *shader;
3771 unsigned int i, j;
3772
3773 for (i = 0; i < WINED3D_SHADER_TYPE_COUNT; ++i)
3774 {
3775 if (!(shader_mask & (1u << i)))
3776 continue;
3777
3778 if (!(shader = state->shader[i]))
3779 continue;
3780
3781 for (j = 0; j < WINED3D_MAX_CBS; ++j)
3782 {
3783 if (state->cb[i][j])
3785 }
3786
3787 for (j = 0; j < shader->reg_maps.sampler_map.count; ++j)
3788 {
3789 entry = &shader->reg_maps.sampler_map.entries[j];
3790
3791 if (!(view = state->shader_resource_view[i][entry->resource_idx]))
3792 continue;
3793
3794 if (view->resource->type == WINED3D_RTYPE_BUFFER)
3796 else
3798 }
3799 }
3800}
3801
3804{
3805 unsigned int bind_idx, shader_sampler_count, base, count, i;
3806 const struct wined3d_device *device = context->device;
3809 const struct wined3d_shader *shader;
3810 struct wined3d_sampler *sampler;
3811 const DWORD *tex_unit_map;
3812
3813 if (!(shader = state->shader[shader_type]))
3814 return;
3815
3817 &shader->reg_maps.shader_version, &base, &count);
3818
3819 shader_sampler_count = shader->reg_maps.sampler_map.count;
3820 if (shader_sampler_count > count)
3821 FIXME("Shader %p needs %u samplers, but only %u are supported.\n",
3822 shader, shader_sampler_count, count);
3823 count = min(shader_sampler_count, count);
3824
3825 for (i = 0; i < count; ++i)
3826 {
3827 entry = &shader->reg_maps.sampler_map.entries[i];
3828 bind_idx = base + entry->bind_idx;
3829 if (tex_unit_map)
3830 bind_idx = tex_unit_map[bind_idx];
3831
3832 if (!(view = state->shader_resource_view[shader_type][entry->resource_idx]))
3833 {
3834 WARN("No resource view bound at index %u, %u.\n", shader_type, entry->resource_idx);
3835 continue;
3836 }
3837
3838 if (entry->sampler_idx == WINED3D_SAMPLER_DEFAULT)
3839 sampler = device->default_sampler;