1 | #pragma once |
2 | /* |
3 | sokol_gfx.h -- simple 3D API wrapper |
4 | |
5 | Do this: |
6 | #define SOKOL_IMPL |
7 | before you include this file in *one* C or C++ file to create the |
8 | implementation. |
9 | |
10 | In the same place define one of the following to select the rendering |
11 | backend: |
12 | #define SOKOL_GLCORE33 |
13 | #define SOKOL_GLES2 |
14 | #define SOKOL_GLES3 |
15 | #define SOKOL_D3D11 |
16 | #define SOKOL_METAL |
17 | |
18 | I.e. for the GL 3.3 Core Profile it should look like this: |
19 | |
20 | #include ... |
21 | #include ... |
22 | #define SOKOL_IMPL |
23 | #define SOKOL_GLCORE33 |
24 | #include "sokol_gfx.h" |
25 | |
26 | To enable shader compilation support in the D3D11 backend: |
27 | #define SOKOL_D3D11_SHADER_COMPILER |
28 | |
29 | If SOKOL_D3D11_SHADER_COMPILER is enabled, the executable will link against |
30 | d3dcompiler.lib (d3dcompiler_47.dll). |
31 | |
32 | Optionally provide the following defines with your own implementations: |
33 | |
34 | SOKOL_ASSERT(c) - your own assert macro (default: assert(c)) |
35 | SOKOL_MALLOC(s) - your own malloc function (default: malloc(s)) |
36 | SOKOL_FREE(p) - your own free function (default: free(p)) |
37 | SOKOL_LOG(msg) - your own logging function (default: puts(msg)) |
38 | SOKOL_UNREACHABLE() - a guard macro for unreachable code (default: assert(false)) |
39 | SOKOL_API_DECL - public function declaration prefix (default: extern) |
40 | SOKOL_API_IMPL - public function implementation prefix (default: -) |
41 | |
42 | API usage validation macros: |
43 | |
44 | SOKOL_VALIDATE_BEGIN() - begin a validation block (default:_sg_validate_begin()) |
45 | SOKOL_VALIDATE(cond, err) - like assert but for API validation (default: _sg_validate(cond, err)) |
46 | SOKOL_VALIDATE_END() - end a validation block, return true if all checks in block passed (default: bool _sg_validate()) |
47 | |
48 | If you don't want validation errors to be fatal, define SOKOL_VALIDATE_NON_FATAL, |
49 | be aware though that this may spam SOKOL_LOG messages. |
50 | |
51 | Optionally define the following to force debug checks and validations |
52 | even in release mode: |
53 | |
54 | SOKOL_DEBUG - by default this is defined if _DEBUG is defined |
55 | |
56 | |
57 | sokol_gfx DOES NOT: |
58 | =================== |
59 | - create a window or the 3D-API context/device, you must do this |
60 | before sokol_gfx is initialized, and pass any required information |
61 | (like 3D device pointers) to the sokol_gfx initialization call |
62 | |
63 | - present the rendered frame, how this is done exactly usually depends |
64 | on how the window and 3D-API context/device was created |
65 | |
66 | - provide a unified shader language, instead 3D-API-specific shader |
67 | source-code or shader-bytecode must be provided |
68 | |
69 | For complete code examples using the various backend 3D-APIs, see: |
70 | |
71 | https://github.com/floooh/sokol-samples |
72 | |
73 | |
74 | STEP BY STEP |
75 | ============ |
76 | --- to initialize sokol_gfx, after creating a window and a 3D-API |
77 | context/device, call: |
78 | |
79 | sg_setup(const sg_desc*) |
80 | |
81 | --- create resource objects (at least buffers, shaders and pipelines, |
82 | and optionally images and passes): |
83 | |
84 | sg_buffer sg_make_buffer(const sg_buffer_desc*) |
85 | sg_image sg_make_image(const sg_image_desc*) |
86 | sg_shader sg_make_shader(const sg_shader_desc*) |
87 | sg_pipeline sg_make_pipeline(const sg_pipeline_desc*) |
88 | sg_pass sg_make_pass(const sg_pass_desc*) |
89 | |
90 | --- start rendering to the default frame buffer with: |
91 | |
92 | sg_begin_default_pass(const sg_pass_action* actions, int width, int height) |
93 | |
94 | --- or start rendering to an offscreen framebuffer with: |
95 | |
96 | sg_begin_pass(sg_pass pass, const sg_pass_action* actions) |
97 | |
98 | --- fill an sg_draw_state struct with the resource bindings for the next |
99 | draw call (one pipeline object, 1..N vertex buffers, 0 or 1 |
100 | index buffer, 0..N image objects to use as textures each on |
101 | the vertex-shader- and fragment-shader-stage and then call |
102 | |
103 | sg_apply_draw_state(const sg_draw_state* draw_state) |
104 | |
105 | to update the resource bindings |
106 | |
107 | --- optionally update shader uniform data with: |
108 | |
109 | sg_apply_uniform_block(sg_shader_stage stage, int ub_index, const void* data, int num_bytes) |
110 | |
111 | --- kick off a draw call with: |
112 | |
113 | sg_draw(int base_element, int num_elements, int num_instances) |
114 | |
115 | --- finish the current rendering pass with: |
116 | |
117 | sg_end_pass() |
118 | |
119 | --- when done with the current frame, call |
120 | |
121 | sg_commit() |
122 | |
123 | --- at the end of your program, shutdown sokol_gfx with: |
124 | |
125 | sg_shutdown() |
126 | |
127 | --- if you need to destroy resources before sg_shutdown(), call: |
128 | |
129 | sg_destroy_buffer(sg_buffer buf) |
130 | sg_destroy_image(sg_image img) |
131 | sg_destroy_shader(sg_shader shd) |
132 | sg_destroy_pipeline(sg_pipeline pip) |
133 | sg_destroy_pass(sg_pass pass) |
134 | |
135 | --- to set a new viewport rectangle, call |
136 | |
137 | sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) |
138 | |
139 | --- to set a new scissor rect, call: |
140 | |
141 | sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) |
142 | |
143 | both sg_apply_viewport() and sg_apply_scissor_rect() must be called |
144 | inside a rendering pass |
145 | |
146 | beginning a pass will reset the viewport to the size of the framebuffer used |
147 | in the new pass, |
148 | |
149 | --- to update (overwrite) the content of buffer and image resources, call: |
150 | |
151 | sg_update_buffer(sg_buffer buf, const void* ptr, int num_bytes) |
152 | sg_update_image(sg_image img, const sg_image_content* content) |
153 | |
154 | Buffers and images to be updated must have been created with |
155 | SG_USAGE_DYNAMIC or SG_USAGE_STREAM |
156 | |
157 | Only one update per frame is allowed for buffer and image resources. |
158 | The rationale is to have a simple countermeasure to avoid the CPU |
159 | scribbling over data the GPU is currently using, or the CPU having to |
160 | wait for the GPU |
161 | |
162 | Buffer and image updates can be partial, as long as a rendering |
163 | operation only references the valid (updated) data in the |
164 | buffer or image. |
165 | |
166 | --- to append a chunk of data to a buffer resource, call: |
167 | |
168 | int sg_append_buffer(sg_buffer buf, const void* ptr, int num_bytes) |
169 | |
170 | The difference to sg_update_buffer() is that sg_append_buffer() |
171 | can be called multiple times per frame to append new data to the |
172 | buffer piece by piece, optionally interleaved with draw calls referencing |
173 | the previously written data. |
174 | |
175 | sg_append_buffer() returns a byte offset to the start of the |
176 | written data, this offset can be assigned to |
177 | sg_draw_state.vertex_buffer_offsets[n] or |
178 | sg_draw_state.index_buffer_offset |
179 | |
180 | Code example: |
181 | |
182 | for (...) { |
183 | const void* data = ...; |
184 | const int num_bytes = ...; |
185 | int offset = sg_append_buffer(buf, data, num_bytes); |
186 | draw_state.vertex_buffer_offsets[0] = offset; |
187 | sg_apply_draw_state(&draw_state); |
188 | sg_apply_uniform_block(...); |
189 | sg_draw(...); |
190 | } |
191 | |
192 | A buffer to be used with sg_append_buffer() must have been created |
193 | with SG_USAGE_DYNAMIC or SG_USAGE_STREAM. |
194 | |
195 | If the application appends more data to the buffer then fits into |
196 | the buffer, the buffer will go into the "overflow" state for the |
197 | rest of the frame. |
198 | |
199 | Any draw calls attempting to render an overflown buffer will be |
200 | silently dropped (in debug mode this will also result in a |
201 | validation error). |
202 | |
203 | You can also check manually if a buffer is in overflow-state by calling |
204 | |
205 | bool sg_query_buffer_overflow(sg_buffer buf) |
206 | |
207 | --- to check for support of optional features: |
208 | |
209 | bool sg_query_feature(sg_feature feature) |
210 | |
211 | --- if you need to call into the underlying 3D-API directly, you must call: |
212 | |
213 | sg_reset_state_cache() |
214 | |
215 | ...before calling sokol_gfx functions again |
216 | |
217 | BACKEND-SPECIFIC TOPICS: |
218 | ======================== |
219 | --- the GL backends need to know about the internal structure of uniform |
220 | blocks, and the texture sampler-name and -type: |
221 | |
222 | typedef struct { |
223 | float mvp[16]; // model-view-projection matrix |
224 | float offset0[2]; // some 2D vectors |
225 | float offset1[2]; |
226 | float offset2[2]; |
227 | } params_t; |
228 | |
229 | // uniform block structure and texture image definition in sg_shader_desc: |
230 | sg_shader_desc desc = { |
231 | // uniform block description (size and internal structure) |
232 | .vs.uniform_blocks[0] = { |
233 | .size = sizeof(params_t), |
234 | .uniforms = { |
235 | [0] = { .name="mvp", .type=SG_UNIFORMTYPE_MAT4 }, |
236 | [1] = { .name="offset0", .type=SG_UNIFORMTYPE_VEC2 }, |
237 | ... |
238 | } |
239 | }, |
240 | // one texture on the fragment-shader-stage, GLES2/WebGL needs name and image type |
241 | .fs.images[0] = { .name="tex", .type=SG_IMAGETYPE_ARRAY } |
242 | ... |
243 | }; |
244 | |
245 | --- the Metal and D3D11 backends only need to know the size of uniform blocks, |
246 | not their internal member structure, and they only need to know |
247 | the type of a texture sampler, not its name: |
248 | |
249 | sg_shader_desc desc = { |
250 | .vs.uniform_blocks[0].size = sizeof(params_t), |
251 | .fs.images[0].type = SG_IMAGETYPE_ARRAY, |
252 | ... |
253 | }; |
254 | |
255 | --- when creating a pipeline object, GLES2/WebGL need to know the vertex |
256 | attribute names as used in the vertex shader when describing vertex |
257 | layouts: |
258 | |
259 | sg_pipeline_desc desc = { |
260 | .layout = { |
261 | .attrs = { |
262 | [0] = { .name="position", .format=SG_VERTEXFORMAT_FLOAT3 }, |
263 | [1] = { .name="color1", .format=SG_VERTEXFORMAT_FLOAT4 } |
264 | } |
265 | } |
266 | }; |
267 | |
268 | --- on D3D11 you need to provide a semantic name and semantic index in the |
269 | vertex attribute definition instead (see the D3D11 documentation on |
270 | D3D11_INPUT_ELEMENT_DESC for details): |
271 | |
272 | sg_pipeline_desc desc = { |
273 | .layout = { |
274 | .attrs = { |
275 | [0] = { .sem_name="POSITION", .sem_index=0, .format=SG_VERTEXFORMAT_FLOAT3 }, |
276 | [1] = { .sem_name="COLOR", .sem_index=1, .format=SG_VERTEXFORMAT_FLOAT4 } |
277 | } |
278 | } |
279 | }; |
280 | |
281 | --- on Metal, GL 3.3 or GLES3/WebGL2, you don't need to provide an attribute |
282 | name or semantic name, since vertex attributes can be bound by their slot index |
283 | (this is mandatory in Metal, and optional in GL): |
284 | |
285 | sg_pipeline_desc desc = { |
286 | .layout = { |
287 | .attrs = { |
288 | [0] = { .format=SG_VERTEXFORMAT_FLOAT3 }, |
289 | [1] = { .format=SG_VERTEXFORMAT_FLOAT4 } |
290 | } |
291 | } |
292 | }; |
293 | |
294 | WORKING WITH CONTEXTS |
295 | ===================== |
296 | sokol-gfx allows to switch between different rendering contexts and |
297 | associate resource objects with contexts. This is useful to |
298 | create GL applications that render into multiple windows. |
299 | |
300 | A rendering context keeps track of all resources created while |
301 | the context is active. When the context is destroyed, all resources |
302 | "belonging to the context" are destroyed as well. |
303 | |
304 | A default context will be created and activated implicitly in |
305 | sg_setup(), and destroyed in sg_shutdown(). So for a typical application |
306 | which *doesn't* use multiple contexts, nothing changes, and calling |
307 | the context functions isn't necessary. |
308 | |
309 | Three functions have been added to work with contexts: |
310 | |
311 | --- sg_context sg_setup_context(): |
312 | This must be called once after a GL context has been created and |
313 | made active. |
314 | |
315 | --- void sg_activate_context(sg_context ctx) |
316 | This must be called after making a different GL context active. |
317 | Apart from 3D-API-specific actions, the call to sg_activate_context() |
318 | will internally call sg_reset_state_cache(). |
319 | |
320 | --- void sg_discard_context(sg_context ctx) |
321 | This must be called right before a GL context is destroyed and |
322 | will destroy all resources associated with the context (that |
323 | have been created while the context was active) The GL context must be |
324 | active at the time sg_discard_context(sg_context ctx) is called. |
325 | |
326 | Also note that resources (buffers, images, shaders and pipelines) must |
327 | only be used or destroyed while the same GL context is active that |
328 | was also active while the resource was created (an exception is |
329 | resource sharing on GL, such resources can be used while |
330 | another context is active, but must still be destroyed under |
331 | the same context that was active during creation). |
332 | |
333 | For more information, check out the multiwindow-glfw sample: |
334 | |
335 | https://github.com/floooh/sokol-samples/blob/master/glfw/multiwindow-glfw.c |
336 | |
337 | TODO: |
338 | ==== |
339 | - talk about asynchronous resource creation |
340 | |
341 | zlib/libpng license |
342 | |
343 | Copyright (c) 2018 Andre Weissflog |
344 | |
345 | This software is provided 'as-is', without any express or implied warranty. |
346 | In no event will the authors be held liable for any damages arising from the |
347 | use of this software. |
348 | |
349 | Permission is granted to anyone to use this software for any purpose, |
350 | including commercial applications, and to alter it and redistribute it |
351 | freely, subject to the following restrictions: |
352 | |
353 | 1. The origin of this software must not be misrepresented; you must not |
354 | claim that you wrote the original software. If you use this software in a |
355 | product, an acknowledgment in the product documentation would be |
356 | appreciated but is not required. |
357 | |
358 | 2. Altered source versions must be plainly marked as such, and must not |
359 | be misrepresented as being the original software. |
360 | |
361 | 3. This notice may not be removed or altered from any source |
362 | distribution. |
363 | */ |
364 | #include <stdint.h> |
365 | #include <stdbool.h> |
366 | |
367 | #ifndef SOKOL_API_DECL |
368 | #define SOKOL_API_DECL extern |
369 | #endif |
370 | |
371 | #ifdef __cplusplus |
372 | extern "C" { |
373 | #endif |
374 | |
375 | #ifdef _MSC_VER |
376 | #pragma warning(push) |
377 | #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ |
378 | #endif |
379 | |
380 | /* |
381 | Resource id typedefs: |
382 | |
383 | sg_buffer: vertex- and index-buffers |
384 | sg_image: textures and render targets |
385 | sg_shader: vertex- and fragment-shaders, uniform blocks |
386 | sg_pipeline: associated shader and vertex-layouts, and render states |
387 | sg_pass: a bundle of render targets and actions on them |
388 | sg_context: a 'context handle' for switching between 3D-API contexts |
389 | |
390 | Instead of pointers, resource creation functions return a 32-bit |
391 | number which uniquely identifies the resource object. |
392 | |
393 | The 32-bit resource id is split into a 16-bit pool index in the lower bits, |
394 | and a 16-bit 'unique counter' in the upper bits. The index allows fast |
395 | pool lookups, and combined with the unique-mask it allows to detect |
396 | 'dangling accesses' (trying to use an object which no longer exists, and |
397 | its pool slot has been reused for a new object) |
398 | |
399 | The resource ids are wrapped into a struct so that the compiler |
400 | can complain when the wrong resource type is used. |
401 | */ |
402 | typedef struct { uint32_t id; } sg_buffer; |
403 | typedef struct { uint32_t id; } sg_image; |
404 | typedef struct { uint32_t id; } sg_shader; |
405 | typedef struct { uint32_t id; } sg_pipeline; |
406 | typedef struct { uint32_t id; } sg_pass; |
407 | typedef struct { uint32_t id; } sg_context; |
408 | |
409 | /* |
410 | various compile-time constants |
411 | |
412 | FIXME: it may make sense to convert some of those into defines so |
413 | that the user code can override them. |
414 | */ |
415 | enum { |
416 | SG_INVALID_ID = 0, |
417 | SG_NUM_SHADER_STAGES = 2, |
418 | SG_NUM_INFLIGHT_FRAMES = 2, |
419 | SG_MAX_COLOR_ATTACHMENTS = 4, |
420 | SG_MAX_SHADERSTAGE_BUFFERS = 4, |
421 | SG_MAX_SHADERSTAGE_IMAGES = 12, |
422 | SG_MAX_SHADERSTAGE_UBS = 4, |
423 | SG_MAX_UB_MEMBERS = 16, |
424 | SG_MAX_VERTEX_ATTRIBUTES = 16, |
425 | SG_MAX_MIPMAPS = 16, |
426 | SG_MAX_TEXTUREARRAY_LAYERS = 128 |
427 | }; |
428 | |
429 | /* |
430 | sg_feature |
431 | |
432 | These are optional features, use the function |
433 | sg_query_feature() to check whether the feature is supported. |
434 | */ |
435 | typedef enum { |
436 | SG_FEATURE_INSTANCING, |
437 | SG_FEATURE_TEXTURE_COMPRESSION_DXT, |
438 | SG_FEATURE_TEXTURE_COMPRESSION_PVRTC, |
439 | SG_FEATURE_TEXTURE_COMPRESSION_ATC, |
440 | SG_FEATURE_TEXTURE_COMPRESSION_ETC2, |
441 | SG_FEATURE_TEXTURE_FLOAT, |
442 | SG_FEATURE_TEXTURE_HALF_FLOAT, |
443 | SG_FEATURE_ORIGIN_BOTTOM_LEFT, |
444 | SG_FEATURE_ORIGIN_TOP_LEFT, |
445 | SG_FEATURE_MSAA_RENDER_TARGETS, |
446 | SG_FEATURE_PACKED_VERTEX_FORMAT_10_2, |
447 | SG_FEATURE_MULTIPLE_RENDER_TARGET, |
448 | SG_FEATURE_IMAGETYPE_3D, |
449 | SG_FEATURE_IMAGETYPE_ARRAY, |
450 | |
451 | SG_NUM_FEATURES |
452 | } sg_feature; |
453 | |
454 | /* |
455 | sg_resource_state |
456 | |
457 | The current state of a resource in its resource pool. |
458 | Resources start in the INITIAL state, which means the |
459 | pool slot is unoccupied and can be allocated. When a resource is |
460 | created, first an id is allocated, and the resource pool slot |
461 | is set to state ALLOC. After allocation, the resource is |
462 | initialized, which may result in the VALID or FAILED state. The |
463 | reason why allocation and initialization are separate is because |
464 | some resource types (e.g. buffers and images) might be asynchronously |
465 | initialized by the user application. If a resource which is not |
466 | in the VALID state is attempted to be used for rendering, rendering |
467 | operations will silently be dropped. |
468 | |
469 | The special INVALID state is returned in sg_query_xxx_state() if no |
470 | resource object exists for the provided resource id. |
471 | */ |
472 | typedef enum { |
473 | SG_RESOURCESTATE_INITIAL, |
474 | SG_RESOURCESTATE_ALLOC, |
475 | SG_RESOURCESTATE_VALID, |
476 | SG_RESOURCESTATE_FAILED, |
477 | SG_RESOURCESTATE_INVALID, |
478 | _SG_RESOURCESTATE_FORCE_U32 = 0x7FFFFFFF |
479 | } sg_resource_state; |
480 | |
481 | /* |
482 | sg_usage |
483 | |
484 | A resource usage hint describing the update strategy of |
485 | buffers and images. This is used in the sg_buffer_desc.usage |
486 | and sg_image_desc.usage members when creating buffers |
487 | and images: |
488 | |
489 | SG_USAGE_IMMUTABLE: the resource will never be updated with |
490 | new data, instead the data content of the |
491 | resource must be provided on creation |
492 | SG_USAGE_DYNAMIC: the resource will be updated infrequently |
493 | with new data (this could range from "once |
494 | after creation", to "quite often but not |
495 | every frame") |
496 | SG_USAGE_STREAM: the resource will be updated each frame |
497 | with new content |
498 | |
499 | The rendering backends use this hint to prevent that the |
500 | CPU needs to wait for the GPU when attempting to update |
501 | a resource that might be currently accessed by the GPU. |
502 | |
503 | Resource content is updated with the function sg_update_buffer() for |
504 | buffer objects, and sg_update_image() for image objects. Only |
505 | one update is allowed per frame and resource object. The |
506 | application must update all data required for rendering (this |
507 | means that the update data can be smaller than the resource size, |
508 | if only a part of the overall resource size is used for rendering, |
509 | you only need to make sure that the data that *is* used is valid. |
510 | |
511 | The default usage is SG_USAGE_IMMUTABLE. |
512 | */ |
513 | typedef enum { |
514 | _SG_USAGE_DEFAULT, /* value 0 reserved for default-init */ |
515 | SG_USAGE_IMMUTABLE, |
516 | SG_USAGE_DYNAMIC, |
517 | SG_USAGE_STREAM, |
518 | _SG_USAGE_NUM, |
519 | _SG_USAGE_FORCE_U32 = 0x7FFFFFFF |
520 | } sg_usage; |
521 | |
522 | /* |
523 | sg_buffer_type |
524 | |
525 | This indicates whether a buffer contains vertex- or index-data, |
526 | used in the sg_buffer_desc.type member when creating a buffer. |
527 | |
528 | The default value is SG_BUFFERTYPE_VERTEXBUFFER. |
529 | */ |
530 | typedef enum { |
531 | _SG_BUFFERTYPE_DEFAULT, /* value 0 reserved for default-init */ |
532 | SG_BUFFERTYPE_VERTEXBUFFER, |
533 | SG_BUFFERTYPE_INDEXBUFFER, |
534 | _SG_BUFFERTYPE_NUM, |
535 | _SG_BUFFERTYPE_FORCE_U32 = 0x7FFFFFFF |
536 | } sg_buffer_type; |
537 | |
538 | /* |
539 | sg_index_type |
540 | |
541 | Indicates whether indexed rendering (fetching vertex-indices from an |
542 | index buffer) is used, and if yes, the index data type (16- or 32-bits). |
543 | This is used in the sg_pipeline_desc.index_type member when creating a |
544 | pipeline object. |
545 | |
546 | The default index type is SG_INDEXTYPE_NONE. |
547 | */ |
548 | typedef enum { |
549 | _SG_INDEXTYPE_DEFAULT, /* value 0 reserved for default-init */ |
550 | SG_INDEXTYPE_NONE, |
551 | SG_INDEXTYPE_UINT16, |
552 | SG_INDEXTYPE_UINT32, |
553 | _SG_INDEXTYPE_NUM, |
554 | _SG_INDEXTYPE_FORCE_U32 = 0x7FFFFFFF |
555 | } sg_index_type; |
556 | |
557 | /* |
558 | sg_image_type |
559 | |
560 | Indicates the basic image type (2D-texture, cubemap, 3D-texture |
561 | or 2D-array-texture). 3D- and array-textures are not supported |
562 | on the GLES2/WebGL backend. The image type is used in the |
563 | sg_image_desc.type member when creating an image. |
564 | |
565 | The default image type when creating an image is SG_IMAGETYPE_2D. |
566 | */ |
567 | typedef enum { |
568 | _SG_IMAGETYPE_DEFAULT, /* value 0 reserved for default-init */ |
569 | SG_IMAGETYPE_2D, |
570 | SG_IMAGETYPE_CUBE, |
571 | SG_IMAGETYPE_3D, |
572 | SG_IMAGETYPE_ARRAY, |
573 | _SG_IMAGETYPE_NUM, |
574 | _SG_IMAGETYPE_FORCE_U32 = 0x7FFFFFFF |
575 | } sg_image_type; |
576 | |
577 | /* |
578 | sg_cube_face |
579 | |
580 | The cubemap faces. Use these as indices in the sg_image_desc.content |
581 | array. |
582 | */ |
583 | typedef enum { |
584 | SG_CUBEFACE_POS_X, |
585 | SG_CUBEFACE_NEG_X, |
586 | SG_CUBEFACE_POS_Y, |
587 | SG_CUBEFACE_NEG_Y, |
588 | SG_CUBEFACE_POS_Z, |
589 | SG_CUBEFACE_NEG_Z, |
590 | SG_CUBEFACE_NUM, |
591 | _SG_CUBEFACE_FORCE_U32 = 0x7FFFFFFF |
592 | } sg_cube_face; |
593 | |
594 | /* |
595 | sg_shader_stage |
596 | |
597 | There are 2 shader stages: vertex- and fragment-shader-stage. |
598 | Each shader stage consists of: |
599 | |
600 | - one slot for a shader function (provided as source- or byte-code) |
601 | - SG_MAX_SHADERSTAGE_UBS slots for uniform blocks |
602 | - SG_MAX_SHADERSTAGE_IMAGES slots for images used as textures by |
603 | the shader function |
604 | */ |
605 | typedef enum { |
606 | SG_SHADERSTAGE_VS, |
607 | SG_SHADERSTAGE_FS, |
608 | _SG_SHADERSTAGE_FORCE_U32 = 0x7FFFFFFF |
609 | } sg_shader_stage; |
610 | |
611 | /* |
612 | sg_pixel_format |
613 | |
614 | This is a common subset of useful and widely supported pixel formats. The |
615 | pixel format enum is mainly used when creating an image object in the |
616 | sg_image_desc.pixel_format member. |
617 | |
618 | The default pixel format when creating an image is SG_PIXELFORMAT_RGBA8. |
619 | */ |
620 | typedef enum { |
621 | _SG_PIXELFORMAT_DEFAULT, /* value 0 reserved for default-init */ |
622 | SG_PIXELFORMAT_NONE, |
623 | SG_PIXELFORMAT_RGBA8, |
624 | SG_PIXELFORMAT_RGB8, |
625 | SG_PIXELFORMAT_RGBA4, |
626 | SG_PIXELFORMAT_R5G6B5, |
627 | SG_PIXELFORMAT_R5G5B5A1, |
628 | SG_PIXELFORMAT_R10G10B10A2, |
629 | SG_PIXELFORMAT_RGBA32F, |
630 | SG_PIXELFORMAT_RGBA16F, |
631 | SG_PIXELFORMAT_R32F, |
632 | SG_PIXELFORMAT_R16F, |
633 | SG_PIXELFORMAT_L8, |
634 | SG_PIXELFORMAT_DXT1, |
635 | SG_PIXELFORMAT_DXT3, |
636 | SG_PIXELFORMAT_DXT5, |
637 | SG_PIXELFORMAT_DEPTH, |
638 | SG_PIXELFORMAT_DEPTHSTENCIL, |
639 | SG_PIXELFORMAT_PVRTC2_RGB, |
640 | SG_PIXELFORMAT_PVRTC4_RGB, |
641 | SG_PIXELFORMAT_PVRTC2_RGBA, |
642 | SG_PIXELFORMAT_PVRTC4_RGBA, |
643 | SG_PIXELFORMAT_ETC2_RGB8, |
644 | SG_PIXELFORMAT_ETC2_SRGB8, |
645 | _SG_PIXELFORMAT_NUM, |
646 | _SG_PIXELFORMAT_FORCE_U32 = 0x7FFFFFFF |
647 | } sg_pixel_format; |
648 | |
649 | /* |
650 | sg_primitive_type |
651 | |
652 | This is the common subset of 3D primitive types supported across all 3D |
653 | APIs. This is used in the sg_pipeline_desc.primitive_type member when |
654 | creating a pipeline object. |
655 | |
656 | The default primitive type is SG_PRIMITIVETYPE_TRIANGLES. |
657 | */ |
658 | typedef enum { |
659 | _SG_PRIMITIVETYPE_DEFAULT, /* value 0 reserved for default-init */ |
660 | SG_PRIMITIVETYPE_POINTS, |
661 | SG_PRIMITIVETYPE_LINES, |
662 | SG_PRIMITIVETYPE_LINE_STRIP, |
663 | SG_PRIMITIVETYPE_TRIANGLES, |
664 | SG_PRIMITIVETYPE_TRIANGLE_STRIP, |
665 | _SG_PRIMITIVETYPE_NUM, |
666 | _SG_PRIMITIVETYPE_FORCE_U32 = 0x7FFFFFFF |
667 | } sg_primitive_type; |
668 | |
669 | /* |
670 | sg_filter |
671 | |
672 | The filtering mode when sampling a texture image. This is |
673 | used in the sg_image_desc.min_filter and sg_image_desc.mag_filter |
674 | members when creating an image object. |
675 | |
676 | The default filter mode is SG_FILTER_NEAREST. |
677 | */ |
678 | typedef enum { |
679 | _SG_FILTER_DEFAULT, /* value 0 reserved for default-init */ |
680 | SG_FILTER_NEAREST, |
681 | SG_FILTER_LINEAR, |
682 | SG_FILTER_NEAREST_MIPMAP_NEAREST, |
683 | SG_FILTER_NEAREST_MIPMAP_LINEAR, |
684 | SG_FILTER_LINEAR_MIPMAP_NEAREST, |
685 | SG_FILTER_LINEAR_MIPMAP_LINEAR, |
686 | _SG_FILTER_NUM, |
687 | _SG_FILTER_FORCE_U32 = 0x7FFFFFFF |
688 | } sg_filter; |
689 | |
690 | /* |
691 | sg_wrap |
692 | |
693 | The texture coordinates wrapping mode when sampling a texture |
694 | image. This is used in the sg_image_desc.wrap_u, .wrap_v |
695 | and .wrap_w members when creating an image. |
696 | |
697 | The default wrap mode is SG_WRAP_REPEAT. |
698 | */ |
699 | typedef enum { |
700 | _SG_WRAP_DEFAULT, /* value 0 reserved for default-init */ |
701 | SG_WRAP_REPEAT, |
702 | SG_WRAP_CLAMP_TO_EDGE, |
703 | SG_WRAP_MIRRORED_REPEAT, |
704 | _SG_WRAP_NUM, |
705 | _SG_WRAP_FORCE_U32 = 0x7FFFFFFF |
706 | } sg_wrap; |
707 | |
708 | /* |
709 | sg_vertex_format |
710 | |
711 | The data type of a vertex component. This is used to describe |
712 | the layout of vertex data when creating a pipeline object. |
713 | */ |
714 | typedef enum { |
715 | SG_VERTEXFORMAT_INVALID, |
716 | SG_VERTEXFORMAT_FLOAT, |
717 | SG_VERTEXFORMAT_FLOAT2, |
718 | SG_VERTEXFORMAT_FLOAT3, |
719 | SG_VERTEXFORMAT_FLOAT4, |
720 | SG_VERTEXFORMAT_BYTE4, |
721 | SG_VERTEXFORMAT_BYTE4N, |
722 | SG_VERTEXFORMAT_UBYTE4, |
723 | SG_VERTEXFORMAT_UBYTE4N, |
724 | SG_VERTEXFORMAT_SHORT2, |
725 | SG_VERTEXFORMAT_SHORT2N, |
726 | SG_VERTEXFORMAT_SHORT4, |
727 | SG_VERTEXFORMAT_SHORT4N, |
728 | SG_VERTEXFORMAT_UINT10_N2, |
729 | _SG_VERTEXFORMAT_NUM, |
730 | _SG_VERTEXFORMAT_FORCE_U32 = 0x7FFFFFFF |
731 | } sg_vertex_format; |
732 | |
733 | /* |
734 | sg_vertex_step |
735 | |
736 | Defines whether the input pointer of a vertex input stream is advanced |
737 | 'per vertex' or 'per instance'. The default step-func is |
738 | SG_VERTEXSTEP_PER_VERTEX. SG_VERTEXSTEP_PER_INSTANCE is used with |
739 | instanced-rendering. |
740 | |
741 | The vertex-step is part of the vertex-layout definition |
742 | when creating pipeline objects. |
743 | */ |
744 | typedef enum { |
745 | _SG_VERTEXSTEP_DEFAULT, /* value 0 reserved for default-init */ |
746 | SG_VERTEXSTEP_PER_VERTEX, |
747 | SG_VERTEXSTEP_PER_INSTANCE, |
748 | _SG_VERTEXSTEP_NUM, |
749 | _SG_VERTEXSTEP_FORCE_U32 = 0x7FFFFFFF |
750 | } sg_vertex_step; |
751 | |
752 | /* |
753 | sg_uniform_type |
754 | |
755 | The data type of a uniform block member. This is used to |
756 | describe the internal layout of uniform blocks when creating |
757 | a shader object. |
758 | */ |
759 | typedef enum { |
760 | SG_UNIFORMTYPE_INVALID, |
761 | SG_UNIFORMTYPE_FLOAT, |
762 | SG_UNIFORMTYPE_FLOAT2, |
763 | SG_UNIFORMTYPE_FLOAT3, |
764 | SG_UNIFORMTYPE_FLOAT4, |
765 | SG_UNIFORMTYPE_MAT4, |
766 | _SG_UNIFORMTYPE_NUM, |
767 | _SG_UNIFORMTYPE_FORCE_U32 = 0x7FFFFFFF |
768 | } sg_uniform_type; |
769 | |
770 | /* |
771 | sg_cull_mode |
772 | |
773 | The face-culling mode, this is used in the |
774 | sg_pipeline_desc.rasterizer.cull_mode member when creating a |
775 | pipeline object. |
776 | |
777 | The default cull mode is SG_CULLMODE_NONE |
778 | */ |
779 | typedef enum { |
780 | _SG_CULLMODE_DEFAULT, /* value 0 reserved for default-init */ |
781 | SG_CULLMODE_NONE, |
782 | SG_CULLMODE_FRONT, |
783 | SG_CULLMODE_BACK, |
784 | _SG_CULLMODE_NUM, |
785 | _SG_CULLMODE_FORCE_U32 = 0x7FFFFFFF |
786 | } sg_cull_mode; |
787 | |
788 | /* |
789 | sg_face_winding |
790 | |
791 | The vertex-winding rule that determines a front-facing primitive. This |
792 | is used in the member sg_pipeline_desc.rasterizer.face_winding |
793 | when creating a pipeline object. |
794 | |
795 | The default winding is SG_FACEWINDING_CW (clockwise) |
796 | */ |
797 | typedef enum { |
798 | _SG_FACEWINDING_DEFAULT, /* value 0 reserved for default-init */ |
799 | SG_FACEWINDING_CCW, |
800 | SG_FACEWINDING_CW, |
801 | _SG_FACEWINDING_NUM, |
802 | _SG_FACEWINDING_FORCE_U32 = 0x7FFFFFFF |
803 | } sg_face_winding; |
804 | |
805 | /* |
806 | sg_compare_func |
807 | |
808 | The compare-function for depth- and stencil-ref tests. |
809 | This is used when creating pipeline objects in the members: |
810 | |
811 | sg_pipeline_desc |
812 | .depth_stencil |
813 | .depth_compare_func |
814 | .stencil_front.compare_func |
815 | .stencil_back.compare_func |
816 | |
817 | The default compare func for depth- and stencil-tests is |
818 | SG_COMPAREFUNC_ALWAYS. |
819 | */ |
820 | typedef enum { |
821 | _SG_COMPAREFUNC_DEFAULT, /* value 0 reserved for default-init */ |
822 | SG_COMPAREFUNC_NEVER, |
823 | SG_COMPAREFUNC_LESS, |
824 | SG_COMPAREFUNC_EQUAL, |
825 | SG_COMPAREFUNC_LESS_EQUAL, |
826 | SG_COMPAREFUNC_GREATER, |
827 | SG_COMPAREFUNC_NOT_EQUAL, |
828 | SG_COMPAREFUNC_GREATER_EQUAL, |
829 | SG_COMPAREFUNC_ALWAYS, |
830 | _SG_COMPAREFUNC_NUM, |
831 | _SG_COMPAREFUNC_FORCE_U32 = 0x7FFFFFFF |
832 | } sg_compare_func; |
833 | |
834 | /* |
835 | sg_stencil_op |
836 | |
837 | The operation performed on a currently stored stencil-value when a |
838 | comparison test passes or fails. This is used when creating a pipeline |
839 | object in the members: |
840 | |
841 | sg_pipeline_desc |
842 | .depth_stencil |
843 | .stencil_front |
844 | .fail_op |
845 | .depth_fail_op |
846 | .pass_op |
847 | .stencil_back |
848 | .fail_op |
849 | .depth_fail_op |
850 | .pass_op |
851 | |
852 | The default value is SG_STENCILOP_KEEP. |
853 | */ |
854 | typedef enum { |
855 | _SG_STENCILOP_DEFAULT, /* value 0 reserved for default-init */ |
856 | SG_STENCILOP_KEEP, |
857 | SG_STENCILOP_ZERO, |
858 | SG_STENCILOP_REPLACE, |
859 | SG_STENCILOP_INCR_CLAMP, |
860 | SG_STENCILOP_DECR_CLAMP, |
861 | SG_STENCILOP_INVERT, |
862 | SG_STENCILOP_INCR_WRAP, |
863 | SG_STENCILOP_DECR_WRAP, |
864 | _SG_STENCILOP_NUM, |
865 | _SG_STENCILOP_FORCE_U32 = 0x7FFFFFFF |
866 | } sg_stencil_op; |
867 | |
868 | /* |
869 | sg_blend_factor |
870 | |
871 | The source and destination factors in blending operations. |
872 | This is used in the following members when creating a pipeline object: |
873 | |
874 | sg_pipeline_desc |
875 | .blend |
876 | .src_factor_rgb |
877 | .dst_factor_rgb |
878 | .src_factor_alpha |
879 | .dst_factor_alpha |
880 | |
881 | The default value is SG_BLENDFACTOR_ONE for source |
882 | factors, and SG_BLENDFACTOR_ZERO for destination factors. |
883 | */ |
884 | typedef enum { |
885 | _SG_BLENDFACTOR_DEFAULT, /* value 0 reserved for default-init */ |
886 | SG_BLENDFACTOR_ZERO, |
887 | SG_BLENDFACTOR_ONE, |
888 | SG_BLENDFACTOR_SRC_COLOR, |
889 | SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR, |
890 | SG_BLENDFACTOR_SRC_ALPHA, |
891 | SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, |
892 | SG_BLENDFACTOR_DST_COLOR, |
893 | SG_BLENDFACTOR_ONE_MINUS_DST_COLOR, |
894 | SG_BLENDFACTOR_DST_ALPHA, |
895 | SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA, |
896 | SG_BLENDFACTOR_SRC_ALPHA_SATURATED, |
897 | SG_BLENDFACTOR_BLEND_COLOR, |
898 | SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR, |
899 | SG_BLENDFACTOR_BLEND_ALPHA, |
900 | SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA, |
901 | _SG_BLENDFACTOR_NUM, |
902 | _SG_BLENDFACTOR_FORCE_U32 = 0x7FFFFFFF |
903 | } sg_blend_factor; |
904 | |
905 | /* |
906 | sg_blend_op |
907 | |
908 | Describes how the source and destination values are combined in the |
909 | fragment blending operation. It is used in the following members when |
910 | creating a pipeline object: |
911 | |
912 | sg_pipeline_desc |
913 | .blend |
914 | .op_rgb |
915 | .op_alpha |
916 | |
917 | The default value is SG_BLENDOP_ADD. |
918 | */ |
919 | typedef enum { |
920 | _SG_BLENDOP_DEFAULT, /* value 0 reserved for default-init */ |
921 | SG_BLENDOP_ADD, |
922 | SG_BLENDOP_SUBTRACT, |
923 | SG_BLENDOP_REVERSE_SUBTRACT, |
924 | _SG_BLENDOP_NUM, |
925 | _SG_BLENDOP_FORCE_U32 = 0x7FFFFFFF |
926 | } sg_blend_op; |
927 | |
928 | /* |
929 | sg_color_mask |
930 | |
931 | Selects the color channels when writing a fragment color to the |
932 | framebuffer. This is used in the members |
933 | sg_pipeline_desc.blend.color_write_mask when creating a pipeline object. |
934 | |
935 | The default colormask is SG_COLORMASK_RGBA (write all colors channels) |
936 | */ |
937 | typedef enum { |
938 | _SG_COLORMASK_DEFAULT = 0, /* value 0 reserved for default-init */ |
939 | SG_COLORMASK_NONE = (0x10), /* special value for 'all channels disabled */ |
940 | SG_COLORMASK_R = (1<<0), |
941 | SG_COLORMASK_G = (1<<1), |
942 | SG_COLORMASK_B = (1<<2), |
943 | SG_COLORMASK_A = (1<<3), |
944 | SG_COLORMASK_RGB = 0x7, |
945 | SG_COLORMASK_RGBA = 0xF, |
946 | _SG_COLORMASK_FORCE_U32 = 0x7FFFFFFF |
947 | } sg_color_mask; |
948 | |
949 | /* |
950 | sg_action |
951 | |
952 | Defines what action should be performed at the start of a render pass: |
953 | |
954 | SG_ACTION_CLEAR: clear the render target image |
955 | SG_ACTION_LOAD: load the previous content of the render target image |
956 | SG_ACTION_DONTCARE: leave the render target image content undefined |
957 | |
958 | This is used in the sg_pass_action structure. |
959 | |
960 | The default action for all pass attachments is SG_ACTION_CLEAR, with the |
961 | clear color rgba = {0.5f, 0.5f, 0.5f, 1.0f], depth=1.0 and stencil=0. |
962 | |
963 | If you want to override the default behaviour, it is important to not |
964 | only set the clear color, but the 'action' field as well (as long as this |
965 | is in its _SG_ACTION_DEFAULT, the value fields will be ignored). |
966 | */ |
967 | typedef enum { |
968 | _SG_ACTION_DEFAULT, |
969 | SG_ACTION_CLEAR, |
970 | SG_ACTION_LOAD, |
971 | SG_ACTION_DONTCARE, |
972 | _SG_ACTION_NUM, |
973 | _SG_ACTION_FORCE_U32 = 0x7FFFFFFF |
974 | } sg_action; |
975 | |
976 | /* |
977 | sg_pass_action |
978 | |
979 | The sg_pass_action struct defines the actions to be performed |
980 | at the start of a rendering pass in the functions sg_begin_pass() |
981 | and sg_begin_default_pass(). |
982 | |
983 | A separate action and clear values can be defined for each |
984 | color attachment, and for the depth-stencil attachment. |
985 | |
986 | The default clear values are defined by the macros: |
987 | |
988 | - SG_DEFAULT_CLEAR_RED: 0.5f |
989 | - SG_DEFAULT_CLEAR_GREEN: 0.5f |
990 | - SG_DEFAULT_CLEAR_BLUE: 0.5f |
991 | - SG_DEFAULT_CLEAR_ALPHA: 1.0f |
992 | - SG_DEFAULT_CLEAR_DEPTH: 1.0f |
993 | - SG_DEFAULT_CLEAR_STENCIL: 0 |
994 | */ |
995 | typedef struct { |
996 | sg_action action; |
997 | float val[4]; |
998 | } sg_color_attachment_action; |
999 | |
1000 | typedef struct { |
1001 | sg_action action; |
1002 | float val; |
1003 | } sg_depth_attachment_action; |
1004 | |
1005 | typedef struct { |
1006 | sg_action action; |
1007 | uint8_t val; |
1008 | } sg_stencil_attachment_action; |
1009 | |
1010 | typedef struct { |
1011 | uint32_t _start_canary; |
1012 | sg_color_attachment_action colors[SG_MAX_COLOR_ATTACHMENTS]; |
1013 | sg_depth_attachment_action depth; |
1014 | sg_stencil_attachment_action stencil; |
1015 | uint32_t _end_canary; |
1016 | } sg_pass_action; |
1017 | |
1018 | /* |
1019 | sg_draw_state |
1020 | |
1021 | The sg_draw_state structure defines the resource binding slots |
1022 | of the sokol_gfx render pipeline, used as argument to the |
1023 | sg_apply_draw_state() function. |
1024 | |
1025 | A draw state contains: |
1026 | |
1027 | - 1 pipeline object |
1028 | - 1..N vertex buffers |
1029 | - 0..N vertex buffer offsets |
1030 | - 0..1 index buffers |
1031 | - 0..1 index buffer offsets |
1032 | - 0..N vertex shader stage images |
1033 | - 0..N fragment shader stage images |
1034 | |
1035 | The max number of vertex buffer and shader stage images |
1036 | are defined by the SG_MAX_SHADERSTAGE_BUFFERS and |
1037 | SG_MAX_SHADERSTAGE_IMAGES configuration constants. |
1038 | |
1039 | The optional buffer offsets can be used to group different chunks |
1040 | of vertex- and/or index-data into the same buffer objects. |
1041 | */ |
1042 | typedef struct { |
1043 | uint32_t _start_canary; |
1044 | sg_pipeline pipeline; |
1045 | sg_buffer vertex_buffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
1046 | int vertex_buffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
1047 | sg_buffer index_buffer; |
1048 | int index_buffer_offset; |
1049 | sg_image vs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
1050 | sg_image fs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
1051 | uint32_t _end_canary; |
1052 | } sg_draw_state; |
1053 | |
1054 | /* |
1055 | sg_desc |
1056 | |
1057 | The sg_desc struct contains configuration values for sokol_gfx, |
1058 | it is used as parameter to the sg_setup() call. |
1059 | |
1060 | The default configuration is: |
1061 | |
1062 | .buffer_pool_size: 128 |
1063 | .image_pool_size: 128 |
1064 | .shader_pool_size: 32 |
1065 | .pipeline_pool_size: 64 |
1066 | .pass_pool_size: 16 |
1067 | .context_pool_size: 16 |
1068 | |
1069 | GL specific: |
1070 | .gl_force_gles2 |
1071 | if this is true the GL backend will act in "GLES2 fallback mode" even |
1072 | when compiled with SOKOL_GLES3, this is useful to fall back |
1073 | to traditional WebGL if a browser doesn't support a WebGL2 context |
1074 | |
1075 | Metal specific: |
1076 | (NOTE: All Objective-C object references are transferred through |
1077 | a bridged (const void*) to sokol_gfx, which will use a unretained |
1078 | bridged cast (__bridged id<xxx>) to retrieve the Objective-C |
1079 | references back. Since the bridge cast is unretained, the caller |
1080 | must hold a strong reference to the Objective-C object for the |
1081 | duration of the sokol_gfx call! |
1082 | |
1083 | .mtl_device |
1084 | a pointer to the MTLDevice object |
1085 | .mtl_renderpass_descriptor_cb |
1086 | a C callback function to obtain the MTLRenderPassDescriptor for the |
1087 | current frame when rendering to the default framebuffer, will be called |
1088 | in sg_begin_default_pass() |
1089 | .mtl_drawable_cb |
1090 | a C callback function to obtain a MTLDrawable for the current |
1091 | frame when rendering to the default framebuffer, will be called in |
1092 | sg_end_pass() of the default pass |
1093 | .mtl_global_uniform_buffer_size |
1094 | the size of the global uniform buffer in bytes, this must be big |
1095 | enough to hold all uniform block updates for a single frame, |
1096 | the default value is 4 MByte (4 * 1024 * 1024) |
1097 | .mtl_sampler_cache_size |
1098 | the number of slots in the sampler cache, the Metal backend |
1099 | will share texture samplers with the same state in this |
1100 | cache, the default value is 64 |
1101 | |
1102 | D3D11 specific: |
1103 | .d3d11_device |
1104 | a pointer to the ID3D11Device object, this must have been created |
1105 | before sg_setup() is called |
1106 | .d3d11_device_context |
1107 | a pointer to the ID3D11DeviceContext object |
1108 | .d3d11_render_target_view_cb |
1109 | a C callback function to obtain a pointer to the current |
1110 | ID3D11RenderTargetView object of the default framebuffer, |
1111 | this function will be called in sg_begin_pass() when rendering |
1112 | to the default framebuffer |
1113 | .d3d11_depth_stencil_view_cb |
1114 | a C callback function to obtain a pointer to the current |
1115 | ID3D11DepthStencilView object of the default framebuffer, |
1116 | this function will be called in sg_begin_pass() when rendering |
1117 | to the default framebuffer |
1118 | */ |
1119 | typedef struct { |
1120 | uint32_t _start_canary; |
1121 | int buffer_pool_size; |
1122 | int image_pool_size; |
1123 | int shader_pool_size; |
1124 | int pipeline_pool_size; |
1125 | int pass_pool_size; |
1126 | int context_pool_size; |
1127 | /* GL specific */ |
1128 | bool gl_force_gles2; |
1129 | /* Metal-specific */ |
1130 | const void* mtl_device; |
1131 | const void* (*mtl_renderpass_descriptor_cb)(void); |
1132 | const void* (*mtl_drawable_cb)(void); |
1133 | int mtl_global_uniform_buffer_size; |
1134 | int mtl_sampler_cache_size; |
1135 | /* D3D11-specific */ |
1136 | const void* d3d11_device; |
1137 | const void* d3d11_device_context; |
1138 | const void* (*d3d11_render_target_view_cb)(void); |
1139 | const void* (*d3d11_depth_stencil_view_cb)(void); |
1140 | uint32_t _end_canary; |
1141 | } sg_desc; |
1142 | |
1143 | /* |
1144 | sg_buffer_desc |
1145 | |
1146 | Creation parameters for sg_buffer objects, used in the |
1147 | sg_make_buffer() call. |
1148 | |
1149 | The default configuration is: |
1150 | |
1151 | .size: 0 (this *must* be set to a valid size in bytes) |
1152 | .type: SG_BUFFERTYPE_VERTEXBUFFER |
1153 | .usage: SG_USAGE_IMMUTABLE |
1154 | .content 0 |
1155 | |
1156 | Buffers with the SG_USAGE_IMMUTABLE usage *must* fill the buffer |
1157 | with initial data (.content must point to a data chunk with |
1158 | exactly .size bytes). |
1159 | |
1160 | ADVANCED TOPIC: Injecting native 3D-API buffers: |
1161 | |
1162 | The following struct members allow to inject your own GL, Metal |
1163 | or D3D11 buffers into sokol_gfx: |
1164 | |
1165 | .gl_buffers[SG_NUM_INFLIGHT_FRAMES] |
1166 | .mtl_buffers[SG_NUM_INFLIGHT_FRAMES] |
1167 | .d3d11_buffer |
1168 | |
1169 | You must still provide all other members except the .content member, and |
1170 | these must match the creation parameters of the native buffers you |
1171 | provide. For SG_USAGE_IMMUTABLE, only provide a single native 3D-API |
1172 | buffer, otherwise you need to provide SG_NUM_INFLIGHT_FRAMES buffers |
1173 | (only for GL and Metal, not D3D11). Providing multiple buffers for GL and |
1174 | Metal is necessary because sokol_gfx will rotate through them when |
1175 | calling sg_update_buffer() to prevent lock-stalls. |
1176 | |
1177 | Note that it is expected that immutable injected buffer have already been |
1178 | initialized with content, and the .content member must be 0! |
1179 | |
1180 | Also you need to call sg_reset_state_cache() after calling native 3D-API |
1181 | functions, and before calling any sokol_gfx function. |
1182 | */ |
1183 | typedef struct { |
1184 | uint32_t _start_canary; |
1185 | int size; |
1186 | sg_buffer_type type; |
1187 | sg_usage usage; |
1188 | const void* content; |
1189 | /* GL specific */ |
1190 | uint32_t gl_buffers[SG_NUM_INFLIGHT_FRAMES]; |
1191 | /* Metal specific */ |
1192 | const void* mtl_buffers[SG_NUM_INFLIGHT_FRAMES]; |
1193 | /* D3D11 specific */ |
1194 | const void* d3d11_buffer; |
1195 | uint32_t _end_canary; |
1196 | } sg_buffer_desc; |
1197 | |
1198 | /* |
1199 | sg_subimage_content |
1200 | |
1201 | Pointer to and size of a subimage-surface data, this is |
1202 | used to describe the initial content of immutable-usage images, |
1203 | or for updating a dynamic- or stream-usage images. |
1204 | |
1205 | For 3D- or array-textures, one sg_subimage_content item |
1206 | describes an entire mipmap level consisting of all array- or |
1207 | 3D-slices of the mipmap level. It is only possible to update |
1208 | an entire mipmap level, not parts of it. |
1209 | */ |
1210 | typedef struct { |
1211 | const void* ptr; /* pointer to subimage data */ |
1212 | int size; /* size in bytes of pointed-to subimage data */ |
1213 | } sg_subimage_content; |
1214 | |
1215 | /* |
1216 | sg_image_content |
1217 | |
1218 | Defines the content of an image through a 2D array |
1219 | of sg_subimage_content structs. The first array dimension |
1220 | is the cubemap face, and the second array dimension the |
1221 | mipmap level. |
1222 | */ |
1223 | typedef struct { |
1224 | sg_subimage_content subimage[SG_CUBEFACE_NUM][SG_MAX_MIPMAPS]; |
1225 | } sg_image_content; |
1226 | |
1227 | /* |
1228 | sg_image_desc |
1229 | |
1230 | Creation parameters for sg_image objects, used in the |
1231 | sg_make_image() call. |
1232 | |
1233 | The default configuration is: |
1234 | |
1235 | .type: SG_IMAGETYPE_2D |
1236 | .render_target: false |
1237 | .width 0 (must be set to >0) |
1238 | .height 0 (must be set to >0) |
1239 | .depth/.layers: 1 |
1240 | .num_mipmaps: 1 |
1241 | .usage: SG_USAGE_IMMUTABLE |
1242 | .pixel_format: SG_PIXELFORMAT_RGBA8 |
1243 | .sample_count: 1 (only used in render_targets) |
1244 | .min_filter: SG_FILTER_NEAREST |
1245 | .mag_filter: SG_FILTER_NEAREST |
1246 | .wrap_u: SG_WRAP_REPEAT |
1247 | .wrap_v: SG_WRAP_REPEAT |
1248 | .wrap_w: SG_WRAP_REPEAT (only SG_IMAGETYPE_3D) |
1249 | .max_anisotropy 1 (must be 1..16) |
1250 | .min_lod 0.0f |
1251 | .max_lod FLT_MAX |
1252 | .content an sg_image_content struct to define the initial content |
1253 | |
1254 | SG_IMAGETYPE_ARRAY and SG_IMAGETYPE_3D are not supported on |
1255 | WebGL/GLES2, use sg_query_feature(SG_FEATURE_IMAGETYPE_ARRAY) and |
1256 | sg_query_feature(SG_FEATURE_IMAGETYPE_3D) at runtime to check |
1257 | if array- and 3D-textures are supported. |
1258 | |
1259 | Images with usage SG_USAGE_IMMUTABLE must be fully initialized by |
1260 | providing a valid .content member which points to |
1261 | initialization data. |
1262 | |
1263 | ADVANCED TOPIC: Injecting native 3D-API textures: |
1264 | |
1265 | The following struct members allow to inject your own GL, Metal |
1266 | or D3D11 textures into sokol_gfx: |
1267 | |
1268 | .gl_textures[SG_NUM_INFLIGHT_FRAMES] |
1269 | .mtl_textures[SG_NUM_INFLIGHT_FRAMES] |
1270 | .d3d11_texture |
1271 | |
1272 | The same rules apply as for injecting native buffers |
1273 | (see sg_buffer_desc documentation for more details). |
1274 | */ |
1275 | typedef struct { |
1276 | uint32_t _start_canary; |
1277 | sg_image_type type; |
1278 | bool render_target; |
1279 | int width; |
1280 | int height; |
1281 | union { |
1282 | int depth; |
1283 | int layers; |
1284 | }; |
1285 | int num_mipmaps; |
1286 | sg_usage usage; |
1287 | sg_pixel_format pixel_format; |
1288 | int sample_count; |
1289 | sg_filter min_filter; |
1290 | sg_filter mag_filter; |
1291 | sg_wrap wrap_u; |
1292 | sg_wrap wrap_v; |
1293 | sg_wrap wrap_w; |
1294 | uint32_t max_anisotropy; |
1295 | float min_lod; |
1296 | float max_lod; |
1297 | sg_image_content content; |
1298 | /* GL specific */ |
1299 | uint32_t gl_textures[SG_NUM_INFLIGHT_FRAMES]; |
1300 | /* Metal specific */ |
1301 | const void* mtl_textures[SG_NUM_INFLIGHT_FRAMES]; |
1302 | /* D3D11 specific */ |
1303 | const void* d3d11_texture; |
1304 | uint32_t _end_canary; |
1305 | } sg_image_desc; |
1306 | |
1307 | /* |
1308 | sg_shader_desc |
1309 | |
1310 | The structure sg_shader_desc describes the shaders, uniform blocks |
1311 | and texture images on the vertex- and fragment-shader stage. |
1312 | |
1313 | TODO: source code vs byte code, 3D backend API specifics. |
1314 | */ |
1315 | typedef struct { |
1316 | const char* name; |
1317 | sg_uniform_type type; |
1318 | int array_count; |
1319 | } sg_shader_uniform_desc; |
1320 | |
1321 | typedef struct { |
1322 | int size; |
1323 | sg_shader_uniform_desc uniforms[SG_MAX_UB_MEMBERS]; |
1324 | } sg_shader_uniform_block_desc; |
1325 | |
1326 | typedef struct { |
1327 | const char* name; |
1328 | sg_image_type type; |
1329 | } sg_shader_image_desc; |
1330 | |
1331 | typedef struct { |
1332 | const char* source; |
1333 | const uint8_t* byte_code; |
1334 | int byte_code_size; |
1335 | const char* entry; |
1336 | sg_shader_uniform_block_desc uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
1337 | sg_shader_image_desc images[SG_MAX_SHADERSTAGE_IMAGES]; |
1338 | } sg_shader_stage_desc; |
1339 | |
1340 | typedef struct { |
1341 | uint32_t _start_canary; |
1342 | sg_shader_stage_desc vs; |
1343 | sg_shader_stage_desc fs; |
1344 | uint32_t _end_canary; |
1345 | } sg_shader_desc; |
1346 | |
1347 | /* |
1348 | sg_pipeline_desc |
1349 | |
1350 | The sg_pipeline_desc struct defines all creation parameters |
1351 | for an sg_pipeline object, used as argument to the |
1352 | sg_make_pipeline() function: |
1353 | |
1354 | - the complete vertex layout for all input vertex buffers |
1355 | - a shader object |
1356 | - the 3D primitive type (points, lines, triangles, ...) |
1357 | - the index type (none, 16- or 32-bit) |
1358 | - depth-stencil state |
1359 | - alpha-blending state |
1360 | - rasterizer state |
1361 | |
1362 | If the vertex data has no gaps between vertex components, you can omit |
1363 | the .layout.buffers[].stride and layout.attrs[].offset items (leave them |
1364 | default-initialized to 0), sokol will then compute the offsets and strides |
1365 | from the vertex component formats (.layout.attrs[].offset). Please note |
1366 | that ALL vertex attribute offsets must be 0 in order for the the |
1367 | automatic offset computation to kick in. |
1368 | |
1369 | The default configuration is as follows: |
1370 | |
1371 | .layout: |
1372 | .buffers[]: vertex buffer layouts |
1373 | .stride: 0 (if no stride is given it will be computed) |
1374 | .step_func SG_VERTEXSTEP_PER_VERTEX |
1375 | .step_rate 1 |
1376 | .attrs[]: vertex attribute declarations |
1377 | .buffer_index 0 the vertex buffer bind slot |
1378 | .offset 0 (offsets can be omitted if the vertex layout has no gaps) |
1379 | .format SG_VERTEXFORMAT_INVALID (must be initialized!) |
1380 | .name 0 (GLES2 requires an attribute name here) |
1381 | .sem_name 0 (D3D11 requires a semantic name here) |
1382 | .sem_index 0 (D3D11 requires a semantic index here) |
1383 | .shader: 0 (must be intilized with a valid sg_shader id!) |
1384 | .primitive_type: SG_PRIMITIVETYPE_TRIANGLES |
1385 | .index_type: SG_INDEXTYPE_NONE |
1386 | .depth_stencil: |
1387 | .stencil_front, .stencil_back: |
1388 | .fail_op: SG_STENCILOP_KEEP |
1389 | .depth_fail_op: SG_STENCILOP_KEEP |
1390 | .pass_op: SG_STENCILOP_KEEP |
1391 | .compare_func SG_COMPAREFUNC_ALWAYS |
1392 | .depth_compare_func: SG_COMPAREFUNC_ALWAYS |
1393 | .depth_write_enabled: false |
1394 | .stencil_enabled: false |
1395 | .stencil_read_mask: 0 |
1396 | .stencil_write_mask: 0 |
1397 | .stencil_ref: 0 |
1398 | .blend: |
1399 | .enabled: false |
1400 | .src_factor_rgb: SG_BLENDFACTOR_ONE |
1401 | .dst_factor_rgb: SG_BLENDFACTOR_ZERO |
1402 | .op_rgb: SG_BLENDOP_ADD |
1403 | .src_factor_alpha: SG_BLENDFACTOR_ONE |
1404 | .dst_factor_alpha: SG_BLENDFACTOR_ZERO |
1405 | .op_alpha: SG_BLENDOP_ADD |
1406 | .color_write_mask: SG_COLORMASK_RGBA |
1407 | .color_attachment_count 1 |
1408 | .color_format SG_PIXELFORMAT_RGBA8 |
1409 | .depth_format SG_PIXELFORMAT_DEPTHSTENCIL |
1410 | .blend_color: { 0.0f, 0.0f, 0.0f, 0.0f } |
1411 | .rasterizer: |
1412 | .alpha_to_coverage_enabled: false |
1413 | .cull_mode: SG_CULLMODE_NONE |
1414 | .face_winding: SG_FACEWINDING_CW |
1415 | .sample_count: 1 |
1416 | .depth_bias: 0.0f |
1417 | .depth_bias_slope_scale: 0.0f |
1418 | .depth_bias_clamp: 0.0f |
1419 | */ |
1420 | typedef struct { |
1421 | int stride; |
1422 | sg_vertex_step step_func; |
1423 | int step_rate; |
1424 | } sg_buffer_layout_desc; |
1425 | |
1426 | typedef struct { |
1427 | const char* name; |
1428 | const char* sem_name; |
1429 | int sem_index; |
1430 | int buffer_index; |
1431 | int offset; |
1432 | sg_vertex_format format; |
1433 | } sg_vertex_attr_desc; |
1434 | |
1435 | typedef struct { |
1436 | sg_buffer_layout_desc buffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
1437 | sg_vertex_attr_desc attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
1438 | } sg_layout_desc; |
1439 | |
1440 | typedef struct { |
1441 | sg_stencil_op fail_op; |
1442 | sg_stencil_op depth_fail_op; |
1443 | sg_stencil_op pass_op; |
1444 | sg_compare_func compare_func; |
1445 | } sg_stencil_state; |
1446 | |
1447 | typedef struct { |
1448 | sg_stencil_state stencil_front; |
1449 | sg_stencil_state stencil_back; |
1450 | sg_compare_func depth_compare_func; |
1451 | bool depth_write_enabled; |
1452 | bool stencil_enabled; |
1453 | uint8_t stencil_read_mask; |
1454 | uint8_t stencil_write_mask; |
1455 | uint8_t stencil_ref; |
1456 | } sg_depth_stencil_state; |
1457 | |
1458 | typedef struct { |
1459 | bool enabled; |
1460 | sg_blend_factor src_factor_rgb; |
1461 | sg_blend_factor dst_factor_rgb; |
1462 | sg_blend_op op_rgb; |
1463 | sg_blend_factor src_factor_alpha; |
1464 | sg_blend_factor dst_factor_alpha; |
1465 | sg_blend_op op_alpha; |
1466 | uint8_t color_write_mask; |
1467 | int color_attachment_count; |
1468 | sg_pixel_format color_format; |
1469 | sg_pixel_format depth_format; |
1470 | float blend_color[4]; |
1471 | } sg_blend_state; |
1472 | |
1473 | typedef struct { |
1474 | bool alpha_to_coverage_enabled; |
1475 | sg_cull_mode cull_mode; |
1476 | sg_face_winding face_winding; |
1477 | int sample_count; |
1478 | float depth_bias; |
1479 | float depth_bias_slope_scale; |
1480 | float depth_bias_clamp; |
1481 | } sg_rasterizer_state; |
1482 | |
1483 | typedef struct { |
1484 | uint32_t _start_canary; |
1485 | sg_layout_desc layout; |
1486 | sg_shader shader; |
1487 | sg_primitive_type primitive_type; |
1488 | sg_index_type index_type; |
1489 | sg_depth_stencil_state depth_stencil; |
1490 | sg_blend_state blend; |
1491 | sg_rasterizer_state rasterizer; |
1492 | uint32_t _end_canary; |
1493 | } sg_pipeline_desc; |
1494 | |
1495 | /* |
1496 | sg_pass_desc |
1497 | |
1498 | Creation parameters for an sg_pass object, used as argument |
1499 | to the sg_make_pass() function. |
1500 | |
1501 | A pass object contains 1..4 color-attachments and none, or one, |
1502 | depth-stencil-attachment. Each attachment consists of |
1503 | an image, and two additional indices describing |
1504 | which subimage the pass will render: one mipmap index, and |
1505 | if the image is a cubemap, array-texture or 3D-texture, the |
1506 | face-index, array-layer or depth-slice. |
1507 | |
1508 | Pass images must fulfill the following requirements: |
1509 | |
1510 | All images must have: |
1511 | - been created as render target (sg_image_desc.render_target = true) |
1512 | - the same size |
1513 | - the same sample count |
1514 | |
1515 | In addition, all color-attachment images must have the same |
1516 | pixel format. |
1517 | */ |
1518 | typedef struct { |
1519 | sg_image image; |
1520 | int mip_level; |
1521 | union { |
1522 | int face; |
1523 | int layer; |
1524 | int slice; |
1525 | }; |
1526 | } sg_attachment_desc; |
1527 | |
1528 | typedef struct { |
1529 | uint32_t _start_canary; |
1530 | sg_attachment_desc color_attachments[SG_MAX_COLOR_ATTACHMENTS]; |
1531 | sg_attachment_desc depth_stencil_attachment; |
1532 | uint32_t _end_canary; |
1533 | } sg_pass_desc; |
1534 | |
1535 | /* setup and misc functions */ |
1536 | SOKOL_API_DECL void sg_setup(const sg_desc* desc); |
1537 | SOKOL_API_DECL void sg_shutdown(void); |
1538 | SOKOL_API_DECL bool sg_isvalid(void); |
1539 | SOKOL_API_DECL bool sg_query_feature(sg_feature feature); |
1540 | SOKOL_API_DECL void sg_reset_state_cache(void); |
1541 | |
1542 | /* resource creation, destruction and updating */ |
1543 | SOKOL_API_DECL sg_buffer sg_make_buffer(const sg_buffer_desc* desc); |
1544 | SOKOL_API_DECL sg_image sg_make_image(const sg_image_desc* desc); |
1545 | SOKOL_API_DECL sg_shader sg_make_shader(const sg_shader_desc* desc); |
1546 | SOKOL_API_DECL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc); |
1547 | SOKOL_API_DECL sg_pass sg_make_pass(const sg_pass_desc* desc); |
1548 | SOKOL_API_DECL void sg_destroy_buffer(sg_buffer buf); |
1549 | SOKOL_API_DECL void sg_destroy_image(sg_image img); |
1550 | SOKOL_API_DECL void sg_destroy_shader(sg_shader shd); |
1551 | SOKOL_API_DECL void sg_destroy_pipeline(sg_pipeline pip); |
1552 | SOKOL_API_DECL void sg_destroy_pass(sg_pass pass); |
1553 | SOKOL_API_DECL void sg_update_buffer(sg_buffer buf, const void* data_ptr, int data_size); |
1554 | SOKOL_API_DECL void sg_update_image(sg_image img, const sg_image_content* data); |
1555 | SOKOL_API_DECL int sg_append_buffer(sg_buffer buf, const void* data_ptr, int data_size); |
1556 | SOKOL_API_DECL bool sg_query_buffer_overflow(sg_buffer buf); |
1557 | |
1558 | /* get resource state (initial, alloc, valid, failed) */ |
1559 | SOKOL_API_DECL sg_resource_state sg_query_buffer_state(sg_buffer buf); |
1560 | SOKOL_API_DECL sg_resource_state sg_query_image_state(sg_image img); |
1561 | SOKOL_API_DECL sg_resource_state sg_query_shader_state(sg_shader shd); |
1562 | SOKOL_API_DECL sg_resource_state sg_query_pipeline_state(sg_pipeline pip); |
1563 | SOKOL_API_DECL sg_resource_state sg_query_pass_state(sg_pass pass); |
1564 | |
1565 | /* rendering functions */ |
1566 | SOKOL_API_DECL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height); |
1567 | SOKOL_API_DECL void sg_begin_pass(sg_pass pass, const sg_pass_action* pass_action); |
1568 | SOKOL_API_DECL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left); |
1569 | SOKOL_API_DECL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left); |
1570 | SOKOL_API_DECL void sg_apply_draw_state(const sg_draw_state* ds); |
1571 | SOKOL_API_DECL void sg_apply_uniform_block(sg_shader_stage stage, int ub_index, const void* data, int num_bytes); |
1572 | SOKOL_API_DECL void sg_draw(int base_element, int num_elements, int num_instances); |
1573 | SOKOL_API_DECL void sg_end_pass(void); |
1574 | SOKOL_API_DECL void sg_commit(void); |
1575 | |
1576 | /* separate resource allocation and initialization (for async setup) */ |
1577 | SOKOL_API_DECL sg_buffer sg_alloc_buffer(void); |
1578 | SOKOL_API_DECL sg_image sg_alloc_image(void); |
1579 | SOKOL_API_DECL sg_shader sg_alloc_shader(void); |
1580 | SOKOL_API_DECL sg_pipeline sg_alloc_pipeline(void); |
1581 | SOKOL_API_DECL sg_pass sg_alloc_pass(void); |
1582 | SOKOL_API_DECL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc); |
1583 | SOKOL_API_DECL void sg_init_image(sg_image img_id, const sg_image_desc* desc); |
1584 | SOKOL_API_DECL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc); |
1585 | SOKOL_API_DECL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc); |
1586 | SOKOL_API_DECL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc); |
1587 | SOKOL_API_DECL void sg_fail_buffer(sg_buffer buf_id); |
1588 | SOKOL_API_DECL void sg_fail_image(sg_image img_id); |
1589 | SOKOL_API_DECL void sg_fail_shader(sg_shader shd_id); |
1590 | SOKOL_API_DECL void sg_fail_pipeline(sg_pipeline pip_id); |
1591 | SOKOL_API_DECL void sg_fail_pass(sg_pass pass_id); |
1592 | |
1593 | /* rendering contexts (optional) */ |
1594 | SOKOL_API_DECL sg_context sg_setup_context(void); |
1595 | SOKOL_API_DECL void sg_activate_context(sg_context ctx_id); |
1596 | SOKOL_API_DECL void sg_discard_context(sg_context ctx_id); |
1597 | |
1598 | #ifdef _MSC_VER |
1599 | #pragma warning(pop) |
1600 | #endif |
1601 | #ifdef __cplusplus |
1602 | } /* extern "C" */ |
1603 | #endif |
1604 | |
1605 | /*--- IMPLEMENTATION ---------------------------------------------------------*/ |
1606 | #ifdef SOKOL_IMPL |
1607 | |
1608 | #ifdef _MSC_VER |
1609 | #pragma warning(push) |
1610 | #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ |
1611 | #pragma warning(disable:4115) /* named type definition in parentheses */ |
1612 | #pragma warning(disable:4505) /* unreferenced local function has been removed */ |
1613 | #endif |
1614 | |
1615 | #ifndef SOKOL_API_IMPL |
1616 | #define SOKOL_API_IMPL |
1617 | #endif |
1618 | #ifndef SOKOL_DEBUG |
1619 | #ifdef _DEBUG |
1620 | #define SOKOL_DEBUG (1) |
1621 | #endif |
1622 | #endif |
1623 | #ifndef SOKOL_ASSERT |
1624 | #include <assert.h> |
1625 | #define SOKOL_ASSERT(c) assert(c) |
1626 | #endif |
1627 | #ifndef SOKOL_VALIDATE_BEGIN |
1628 | #define SOKOL_VALIDATE_BEGIN() _sg_validate_begin() |
1629 | #endif |
1630 | #ifndef SOKOL_VALIDATE |
1631 | #define SOKOL_VALIDATE(cond, err) _sg_validate((cond), err) |
1632 | #endif |
1633 | #ifndef SOKOL_VALIDATE_END |
1634 | #define SOKOL_VALIDATE_END() _sg_validate_end() |
1635 | #endif |
1636 | #ifndef SOKOL_UNREACHABLE |
1637 | #define SOKOL_UNREACHABLE SOKOL_ASSERT(false) |
1638 | #endif |
1639 | #ifndef SOKOL_MALLOC |
1640 | #include <stdlib.h> |
1641 | #define SOKOL_MALLOC(s) malloc(s) |
1642 | #define SOKOL_FREE(p) free(p) |
1643 | #endif |
1644 | #ifndef SOKOL_LOG |
1645 | #ifdef SOKOL_DEBUG |
1646 | #include <stdio.h> |
1647 | #define SOKOL_LOG(s) { SOKOL_ASSERT(s); puts(s); } |
1648 | #else |
1649 | #define SOKOL_LOG(s) |
1650 | #endif |
1651 | #endif |
1652 | #if !(defined(SOKOL_GLCORE33)||defined(SOKOL_GLES2)||defined(SOKOL_GLES3)||defined(SOKOL_D3D11)||defined(SOKOL_METAL)) |
1653 | #error "Please select a backend with SOKOL_GLCORE33, SOKOL_GLES2, SOKOL_GLES3, SOKOL_D3D11 or SOKOL_METAL" |
1654 | #endif |
1655 | |
1656 | #ifndef _SOKOL_PRIVATE |
1657 | #if defined(__GNUC__) |
1658 | #define _SOKOL_PRIVATE __attribute__((unused)) static |
1659 | #else |
1660 | #define _SOKOL_PRIVATE static |
1661 | #endif |
1662 | #endif |
1663 | |
1664 | #ifndef _SOKOL_UNUSED |
1665 | #define _SOKOL_UNUSED(x) (void)(x) |
1666 | #endif |
1667 | |
1668 | /* default clear values */ |
1669 | #ifndef SG_DEFAULT_CLEAR_RED |
1670 | #define SG_DEFAULT_CLEAR_RED (0.5f) |
1671 | #endif |
1672 | #ifndef SG_DEFAULT_CLEAR_GREEN |
1673 | #define SG_DEFAULT_CLEAR_GREEN (0.5f) |
1674 | #endif |
1675 | #ifndef SG_DEFAULT_CLEAR_BLUE |
1676 | #define SG_DEFAULT_CLEAR_BLUE (0.5f) |
1677 | #endif |
1678 | #ifndef SG_DEFAULT_CLEAR_ALPHA |
1679 | #define SG_DEFAULT_CLEAR_ALPHA (1.0f) |
1680 | #endif |
1681 | #ifndef SG_DEFAULT_CLEAR_DEPTH |
1682 | #define SG_DEFAULT_CLEAR_DEPTH (1.0f) |
1683 | #endif |
1684 | #ifndef SG_DEFAULT_CLEAR_STENCIL |
1685 | #define SG_DEFAULT_CLEAR_STENCIL (0) |
1686 | #endif |
1687 | |
1688 | enum { |
1689 | _SG_SLOT_SHIFT = 16, |
1690 | _SG_SLOT_MASK = (1<<_SG_SLOT_SHIFT)-1, |
1691 | _SG_MAX_POOL_SIZE = (1<<_SG_SLOT_SHIFT), |
1692 | _SG_DEFAULT_BUFFER_POOL_SIZE = 128, |
1693 | _SG_DEFAULT_IMAGE_POOL_SIZE = 128, |
1694 | _SG_DEFAULT_SHADER_POOL_SIZE = 32, |
1695 | _SG_DEFAULT_PIPELINE_POOL_SIZE = 64, |
1696 | _SG_DEFAULT_PASS_POOL_SIZE = 16, |
1697 | _SG_DEFAULT_CONTEXT_POOL_SIZE = 16 |
1698 | }; |
1699 | |
1700 | /* helper macros */ |
1701 | #define _sg_def(val, def) (((val) == 0) ? (def) : (val)) |
1702 | #define _sg_def_flt(val, def) (((val) == 0.0f) ? (def) : (val)) |
1703 | #define _sg_min(a,b) ((a<b)?a:b) |
1704 | #define _sg_max(a,b) ((a>b)?a:b) |
1705 | #define _sg_clamp(v,v0,v1) ((v<v0)?(v0):((v>v1)?(v1):(v))) |
1706 | #define _sg_fequal(val,cmp,delta) (((val-cmp)> -delta)&&((val-cmp)<delta)) |
1707 | |
1708 | |
1709 | /*-- helper functions --------------------------------------------------------*/ |
1710 | |
1711 | /* return byte size of a vertex format */ |
1712 | _SOKOL_PRIVATE int _sg_vertexformat_bytesize(sg_vertex_format fmt) { |
1713 | switch (fmt) { |
1714 | case SG_VERTEXFORMAT_FLOAT: return 4; |
1715 | case SG_VERTEXFORMAT_FLOAT2: return 8; |
1716 | case SG_VERTEXFORMAT_FLOAT3: return 12; |
1717 | case SG_VERTEXFORMAT_FLOAT4: return 16; |
1718 | case SG_VERTEXFORMAT_BYTE4: return 4; |
1719 | case SG_VERTEXFORMAT_BYTE4N: return 4; |
1720 | case SG_VERTEXFORMAT_UBYTE4: return 4; |
1721 | case SG_VERTEXFORMAT_UBYTE4N: return 4; |
1722 | case SG_VERTEXFORMAT_SHORT2: return 4; |
1723 | case SG_VERTEXFORMAT_SHORT2N: return 4; |
1724 | case SG_VERTEXFORMAT_SHORT4: return 8; |
1725 | case SG_VERTEXFORMAT_SHORT4N: return 8; |
1726 | case SG_VERTEXFORMAT_UINT10_N2: return 4; |
1727 | case SG_VERTEXFORMAT_INVALID: return 0; |
1728 | default: |
1729 | SOKOL_UNREACHABLE; |
1730 | return -1; |
1731 | } |
1732 | } |
1733 | |
1734 | /* return the byte size of a shader uniform */ |
1735 | _SOKOL_PRIVATE int _sg_uniform_size(sg_uniform_type type, int count) { |
1736 | switch (type) { |
1737 | case SG_UNIFORMTYPE_INVALID: return 0; |
1738 | case SG_UNIFORMTYPE_FLOAT: return 4 * count; |
1739 | case SG_UNIFORMTYPE_FLOAT2: return 8 * count; |
1740 | case SG_UNIFORMTYPE_FLOAT3: return 12 * count; /* FIXME: std140??? */ |
1741 | case SG_UNIFORMTYPE_FLOAT4: return 16 * count; |
1742 | case SG_UNIFORMTYPE_MAT4: return 64 * count; |
1743 | default: |
1744 | SOKOL_UNREACHABLE; |
1745 | return -1; |
1746 | } |
1747 | } |
1748 | |
1749 | /* return true if pixel format is a compressed format */ |
1750 | _SOKOL_PRIVATE bool _sg_is_compressed_pixel_format(sg_pixel_format fmt) { |
1751 | switch (fmt) { |
1752 | case SG_PIXELFORMAT_DXT1: |
1753 | case SG_PIXELFORMAT_DXT3: |
1754 | case SG_PIXELFORMAT_DXT5: |
1755 | case SG_PIXELFORMAT_PVRTC2_RGB: |
1756 | case SG_PIXELFORMAT_PVRTC4_RGB: |
1757 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
1758 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
1759 | case SG_PIXELFORMAT_ETC2_RGB8: |
1760 | case SG_PIXELFORMAT_ETC2_SRGB8: |
1761 | return true; |
1762 | default: |
1763 | return false; |
1764 | } |
1765 | } |
1766 | |
1767 | /* return true if pixel format is a valid render target format */ |
1768 | _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_color_format(sg_pixel_format fmt) { |
1769 | switch (fmt) { |
1770 | case SG_PIXELFORMAT_RGBA8: |
1771 | case SG_PIXELFORMAT_R10G10B10A2: |
1772 | case SG_PIXELFORMAT_RGBA32F: |
1773 | case SG_PIXELFORMAT_RGBA16F: |
1774 | return true; |
1775 | default: |
1776 | return false; |
1777 | } |
1778 | } |
1779 | |
1780 | /* return true if pixel format is a valid depth format */ |
1781 | _SOKOL_PRIVATE bool _sg_is_valid_rendertarget_depth_format(sg_pixel_format fmt) { |
1782 | switch (fmt) { |
1783 | case SG_PIXELFORMAT_DEPTH: |
1784 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
1785 | return true; |
1786 | default: |
1787 | return false; |
1788 | } |
1789 | } |
1790 | |
1791 | /* return true if pixel format is a depth-stencil format */ |
1792 | _SOKOL_PRIVATE bool _sg_is_depth_stencil_format(sg_pixel_format fmt) { |
1793 | /* FIXME: more depth stencil formats? */ |
1794 | return (SG_PIXELFORMAT_DEPTHSTENCIL == fmt); |
1795 | } |
1796 | |
1797 | /* return the bytes-per-pixel for a pixel format */ |
1798 | _SOKOL_PRIVATE int _sg_pixelformat_bytesize(sg_pixel_format fmt) { |
1799 | switch (fmt) { |
1800 | case SG_PIXELFORMAT_RGBA32F: |
1801 | return 16; |
1802 | case SG_PIXELFORMAT_RGBA16F: |
1803 | return 8; |
1804 | case SG_PIXELFORMAT_RGBA8: |
1805 | case SG_PIXELFORMAT_R10G10B10A2: |
1806 | case SG_PIXELFORMAT_R32F: |
1807 | return 4; |
1808 | case SG_PIXELFORMAT_RGB8: |
1809 | return 3; |
1810 | case SG_PIXELFORMAT_R5G5B5A1: |
1811 | case SG_PIXELFORMAT_R5G6B5: |
1812 | case SG_PIXELFORMAT_RGBA4: |
1813 | case SG_PIXELFORMAT_R16F: |
1814 | return 2; |
1815 | case SG_PIXELFORMAT_L8: |
1816 | return 1; |
1817 | default: |
1818 | SOKOL_UNREACHABLE; |
1819 | return 0; |
1820 | } |
1821 | } |
1822 | |
1823 | /* return row pitch for an image */ |
1824 | _SOKOL_PRIVATE int _sg_row_pitch(sg_pixel_format fmt, int width) { |
1825 | int pitch; |
1826 | switch (fmt) { |
1827 | case SG_PIXELFORMAT_DXT1: |
1828 | case SG_PIXELFORMAT_ETC2_RGB8: |
1829 | case SG_PIXELFORMAT_ETC2_SRGB8: |
1830 | pitch = ((width + 3) / 4) * 8; |
1831 | pitch = pitch < 8 ? 8 : pitch; |
1832 | break; |
1833 | case SG_PIXELFORMAT_DXT3: |
1834 | case SG_PIXELFORMAT_DXT5: |
1835 | pitch = ((width + 3) / 4) * 16; |
1836 | pitch = pitch < 16 ? 16 : pitch; |
1837 | break; |
1838 | case SG_PIXELFORMAT_PVRTC4_RGB: |
1839 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
1840 | { |
1841 | const int block_size = 4*4; |
1842 | const int bpp = 4; |
1843 | int width_blocks = width / 4; |
1844 | width_blocks = width_blocks < 2 ? 2 : width_blocks; |
1845 | pitch = width_blocks * ((block_size * bpp) / 8); |
1846 | } |
1847 | break; |
1848 | case SG_PIXELFORMAT_PVRTC2_RGB: |
1849 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
1850 | { |
1851 | const int block_size = 8*4; |
1852 | const int bpp = 2; |
1853 | int width_blocks = width / 4; |
1854 | width_blocks = width_blocks < 2 ? 2 : width_blocks; |
1855 | pitch = width_blocks * ((block_size * bpp) / 8); |
1856 | } |
1857 | break; |
1858 | default: |
1859 | pitch = width * _sg_pixelformat_bytesize(fmt); |
1860 | break; |
1861 | } |
1862 | return pitch; |
1863 | } |
1864 | |
1865 | /* return pitch of a 2D subimage / texture slice */ |
1866 | _SOKOL_PRIVATE int _sg_surface_pitch(sg_pixel_format fmt, int width, int height) { |
1867 | int num_rows = 0; |
1868 | switch (fmt) { |
1869 | case SG_PIXELFORMAT_DXT1: |
1870 | case SG_PIXELFORMAT_DXT3: |
1871 | case SG_PIXELFORMAT_DXT5: |
1872 | case SG_PIXELFORMAT_ETC2_RGB8: |
1873 | case SG_PIXELFORMAT_ETC2_SRGB8: |
1874 | case SG_PIXELFORMAT_PVRTC2_RGB: |
1875 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
1876 | case SG_PIXELFORMAT_PVRTC4_RGB: |
1877 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
1878 | num_rows = ((height + 3) / 4); |
1879 | break; |
1880 | default: |
1881 | num_rows = height; |
1882 | break; |
1883 | } |
1884 | if (num_rows < 1) { |
1885 | num_rows = 1; |
1886 | } |
1887 | return num_rows * _sg_row_pitch(fmt, width); |
1888 | } |
1889 | |
1890 | /* resolve pass action defaults into a new pass action struct */ |
1891 | _SOKOL_PRIVATE void _sg_resolve_default_pass_action(const sg_pass_action* from, sg_pass_action* to) { |
1892 | SOKOL_ASSERT(from && to); |
1893 | *to = *from; |
1894 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
1895 | if (to->colors[i].action == _SG_ACTION_DEFAULT) { |
1896 | to->colors[i].action = SG_ACTION_CLEAR; |
1897 | to->colors[i].val[0] = SG_DEFAULT_CLEAR_RED; |
1898 | to->colors[i].val[1] = SG_DEFAULT_CLEAR_GREEN; |
1899 | to->colors[i].val[2] = SG_DEFAULT_CLEAR_BLUE; |
1900 | to->colors[i].val[3] = SG_DEFAULT_CLEAR_ALPHA; |
1901 | } |
1902 | } |
1903 | if (to->depth.action == _SG_ACTION_DEFAULT) { |
1904 | to->depth.action = SG_ACTION_CLEAR; |
1905 | to->depth.val = SG_DEFAULT_CLEAR_DEPTH; |
1906 | } |
1907 | if (to->stencil.action == _SG_ACTION_DEFAULT) { |
1908 | to->stencil.action = SG_ACTION_CLEAR; |
1909 | to->stencil.val = SG_DEFAULT_CLEAR_STENCIL; |
1910 | } |
1911 | } |
1912 | |
1913 | /*-- resource pool slots (must be defined before rendering backend) ----------*/ |
1914 | typedef struct { |
1915 | uint32_t id; |
1916 | uint32_t ctx_id; |
1917 | sg_resource_state state; |
1918 | } _sg_slot; |
1919 | |
1920 | _SOKOL_PRIVATE int _sg_slot_index(uint32_t id) { |
1921 | return id & _SG_SLOT_MASK; |
1922 | } |
1923 | |
1924 | /*== GL BACKEND ==============================================================*/ |
1925 | #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
1926 | /* strstr(), memset() */ |
1927 | #include <string.h> |
1928 | |
1929 | #ifndef GL_UNSIGNED_INT_2_10_10_10_REV |
1930 | #define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368 |
1931 | #endif |
1932 | #ifndef GL_UNSIGNED_INT_24_8 |
1933 | #define GL_UNSIGNED_INT_24_8 0x84FA |
1934 | #endif |
1935 | #ifndef GL_TEXTURE_MAX_ANISOTROPY_EXT |
1936 | #define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE |
1937 | #endif |
1938 | #ifndef GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT |
1939 | #define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF |
1940 | #endif |
1941 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT1_EXT |
1942 | #define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1 |
1943 | #endif |
1944 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT |
1945 | #define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2 |
1946 | #endif |
1947 | #ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT |
1948 | #define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3 |
1949 | #endif |
1950 | #ifndef GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG |
1951 | #define GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG 0x8C01 |
1952 | #endif |
1953 | #ifndef GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG |
1954 | #define GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG 0x8C00 |
1955 | #endif |
1956 | #ifndef GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG |
1957 | #define GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG 0x8C03 |
1958 | #endif |
1959 | #ifndef GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG |
1960 | #define GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG 0x8C02 |
1961 | #endif |
1962 | #ifndef GL_COMPRESSED_RGB8_ETC2 |
1963 | #define GL_COMPRESSED_RGB8_ETC2 0x9274 |
1964 | #endif |
1965 | #ifndef GL_COMPRESSED_SRGB8_ETC2 |
1966 | #define GL_COMPRESSED_SRGB8_ETC2 0x9275 |
1967 | #endif |
1968 | #ifndef GL_DEPTH24_STENCIL8 |
1969 | #define GL_DEPTH24_STENCIL8 0x88F0 |
1970 | #endif |
1971 | #ifndef GL_HALF_FLOAT |
1972 | #define GL_HALF_FLOAT 0x140B |
1973 | #endif |
1974 | #ifndef GL_DEPTH_STENCIL |
1975 | #define GL_DEPTH_STENCIL 0x84F9 |
1976 | #endif |
1977 | #ifndef GL_LUMINANCE |
1978 | #define GL_LUMINANCE 0x1909 |
1979 | #endif |
1980 | #ifdef SOKOL_GLES2 |
1981 | # ifdef GL_ANGLE_instanced_arrays |
1982 | # define SOKOL_INSTANCING_ENABLED 1 |
1983 | # define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedANGLE(mode, first, count, instancecount) |
1984 | # define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedANGLE(mode, count, type, indices, instancecount) |
1985 | # define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorANGLE(index, divisor) |
1986 | # elif defined(GL_EXT_draw_instanced) && defined(GL_EXT_instanced_arrays) |
1987 | # define SOKOL_INSTANCING_ENABLED 1 |
1988 | # define glDrawArraysInstanced(mode, first, count, instancecount) glDrawArraysInstancedEXT(mode, first, count, instancecount) |
1989 | # define glDrawElementsInstanced(mode, count, type, indices, instancecount) glDrawElementsInstancedEXT(mode, count, type, indices, instancecount) |
1990 | # define glVertexAttribDivisor(index, divisor) glVertexAttribDivisorEXT(index, divisor) |
1991 | # else |
1992 | # define SOKOL_GLES2_INSTANCING_ERROR "Select GL_ANGLE_instanced_arrays or (GL_EXT_draw_instanced & GL_EXT_instanced_arrays) to enable instancing in GLES2" |
1993 | # define glDrawArraysInstanced(mode, first, count, instancecount) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR) |
1994 | # define glDrawElementsInstanced(mode, count, type, indices, instancecount) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR) |
1995 | # define glVertexAttribDivisor(index, divisor) SOKOL_ASSERT(0 && SOKOL_GLES2_INSTANCING_ERROR) |
1996 | # endif |
1997 | #else |
1998 | # define SOKOL_INSTANCING_ENABLED 1 |
1999 | #endif |
2000 | |
2001 | #define _SG_GL_CHECK_ERROR() { SOKOL_ASSERT(glGetError() == GL_NO_ERROR); } |
2002 | |
2003 | /* true if running in GLES2-fallback mode */ |
2004 | static bool _sg_gl_gles2; |
2005 | |
2006 | /*-- type translation --------------------------------------------------------*/ |
2007 | _SOKOL_PRIVATE GLenum _sg_gl_buffer_target(sg_buffer_type t) { |
2008 | switch (t) { |
2009 | case SG_BUFFERTYPE_VERTEXBUFFER: return GL_ARRAY_BUFFER; |
2010 | case SG_BUFFERTYPE_INDEXBUFFER: return GL_ELEMENT_ARRAY_BUFFER; |
2011 | default: SOKOL_UNREACHABLE; return 0; |
2012 | } |
2013 | } |
2014 | |
2015 | _SOKOL_PRIVATE GLenum _sg_gl_texture_target(sg_image_type t) { |
2016 | switch (t) { |
2017 | case SG_IMAGETYPE_2D: return GL_TEXTURE_2D; |
2018 | case SG_IMAGETYPE_CUBE: return GL_TEXTURE_CUBE_MAP; |
2019 | #if !defined(SOKOL_GLES2) |
2020 | case SG_IMAGETYPE_3D: return GL_TEXTURE_3D; |
2021 | case SG_IMAGETYPE_ARRAY: return GL_TEXTURE_2D_ARRAY; |
2022 | #endif |
2023 | default: SOKOL_UNREACHABLE; return 0; |
2024 | } |
2025 | } |
2026 | |
2027 | _SOKOL_PRIVATE GLenum _sg_gl_usage(sg_usage u) { |
2028 | switch (u) { |
2029 | case SG_USAGE_IMMUTABLE: return GL_STATIC_DRAW; |
2030 | case SG_USAGE_DYNAMIC: return GL_DYNAMIC_DRAW; |
2031 | case SG_USAGE_STREAM: return GL_STREAM_DRAW; |
2032 | default: SOKOL_UNREACHABLE; return 0; |
2033 | } |
2034 | } |
2035 | |
2036 | _SOKOL_PRIVATE GLenum _sg_gl_shader_stage(sg_shader_stage stage) { |
2037 | switch (stage) { |
2038 | case SG_SHADERSTAGE_VS: return GL_VERTEX_SHADER; |
2039 | case SG_SHADERSTAGE_FS: return GL_FRAGMENT_SHADER; |
2040 | default: SOKOL_UNREACHABLE; return 0; |
2041 | } |
2042 | } |
2043 | |
2044 | _SOKOL_PRIVATE GLint _sg_gl_vertexformat_size(sg_vertex_format fmt) { |
2045 | switch (fmt) { |
2046 | case SG_VERTEXFORMAT_FLOAT: return 1; |
2047 | case SG_VERTEXFORMAT_FLOAT2: return 2; |
2048 | case SG_VERTEXFORMAT_FLOAT3: return 3; |
2049 | case SG_VERTEXFORMAT_FLOAT4: return 4; |
2050 | case SG_VERTEXFORMAT_BYTE4: return 4; |
2051 | case SG_VERTEXFORMAT_BYTE4N: return 4; |
2052 | case SG_VERTEXFORMAT_UBYTE4: return 4; |
2053 | case SG_VERTEXFORMAT_UBYTE4N: return 4; |
2054 | case SG_VERTEXFORMAT_SHORT2: return 2; |
2055 | case SG_VERTEXFORMAT_SHORT2N: return 2; |
2056 | case SG_VERTEXFORMAT_SHORT4: return 4; |
2057 | case SG_VERTEXFORMAT_SHORT4N: return 4; |
2058 | case SG_VERTEXFORMAT_UINT10_N2: return 4; |
2059 | default: SOKOL_UNREACHABLE; return 0; |
2060 | } |
2061 | } |
2062 | |
2063 | _SOKOL_PRIVATE GLenum _sg_gl_vertexformat_type(sg_vertex_format fmt) { |
2064 | switch (fmt) { |
2065 | case SG_VERTEXFORMAT_FLOAT: |
2066 | case SG_VERTEXFORMAT_FLOAT2: |
2067 | case SG_VERTEXFORMAT_FLOAT3: |
2068 | case SG_VERTEXFORMAT_FLOAT4: |
2069 | return GL_FLOAT; |
2070 | case SG_VERTEXFORMAT_BYTE4: |
2071 | case SG_VERTEXFORMAT_BYTE4N: |
2072 | return GL_BYTE; |
2073 | case SG_VERTEXFORMAT_UBYTE4: |
2074 | case SG_VERTEXFORMAT_UBYTE4N: |
2075 | return GL_UNSIGNED_BYTE; |
2076 | case SG_VERTEXFORMAT_SHORT2: |
2077 | case SG_VERTEXFORMAT_SHORT2N: |
2078 | case SG_VERTEXFORMAT_SHORT4: |
2079 | case SG_VERTEXFORMAT_SHORT4N: |
2080 | return GL_SHORT; |
2081 | case SG_VERTEXFORMAT_UINT10_N2: |
2082 | return GL_UNSIGNED_INT_2_10_10_10_REV; |
2083 | default: |
2084 | SOKOL_UNREACHABLE; return 0; |
2085 | } |
2086 | } |
2087 | |
2088 | _SOKOL_PRIVATE GLboolean _sg_gl_vertexformat_normalized(sg_vertex_format fmt) { |
2089 | switch (fmt) { |
2090 | case SG_VERTEXFORMAT_BYTE4N: |
2091 | case SG_VERTEXFORMAT_UBYTE4N: |
2092 | case SG_VERTEXFORMAT_SHORT2N: |
2093 | case SG_VERTEXFORMAT_SHORT4N: |
2094 | case SG_VERTEXFORMAT_UINT10_N2: |
2095 | return GL_TRUE; |
2096 | default: |
2097 | return GL_FALSE; |
2098 | } |
2099 | } |
2100 | |
2101 | _SOKOL_PRIVATE GLenum _sg_gl_primitive_type(sg_primitive_type t) { |
2102 | switch (t) { |
2103 | case SG_PRIMITIVETYPE_POINTS: return GL_POINTS; |
2104 | case SG_PRIMITIVETYPE_LINES: return GL_LINES; |
2105 | case SG_PRIMITIVETYPE_LINE_STRIP: return GL_LINE_STRIP; |
2106 | case SG_PRIMITIVETYPE_TRIANGLES: return GL_TRIANGLES; |
2107 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return GL_TRIANGLE_STRIP; |
2108 | default: SOKOL_UNREACHABLE; return 0; |
2109 | } |
2110 | } |
2111 | |
2112 | _SOKOL_PRIVATE GLenum _sg_gl_index_type(sg_index_type t) { |
2113 | switch (t) { |
2114 | case SG_INDEXTYPE_NONE: return 0; |
2115 | case SG_INDEXTYPE_UINT16: return GL_UNSIGNED_SHORT; |
2116 | case SG_INDEXTYPE_UINT32: return GL_UNSIGNED_INT; |
2117 | default: SOKOL_UNREACHABLE; return 0; |
2118 | } |
2119 | } |
2120 | |
2121 | _SOKOL_PRIVATE GLenum _sg_gl_compare_func(sg_compare_func cmp) { |
2122 | switch (cmp) { |
2123 | case SG_COMPAREFUNC_NEVER: return GL_NEVER; |
2124 | case SG_COMPAREFUNC_LESS: return GL_LESS; |
2125 | case SG_COMPAREFUNC_EQUAL: return GL_EQUAL; |
2126 | case SG_COMPAREFUNC_LESS_EQUAL: return GL_LEQUAL; |
2127 | case SG_COMPAREFUNC_GREATER: return GL_GREATER; |
2128 | case SG_COMPAREFUNC_NOT_EQUAL: return GL_NOTEQUAL; |
2129 | case SG_COMPAREFUNC_GREATER_EQUAL: return GL_GEQUAL; |
2130 | case SG_COMPAREFUNC_ALWAYS: return GL_ALWAYS; |
2131 | default: SOKOL_UNREACHABLE; return 0; |
2132 | } |
2133 | } |
2134 | |
2135 | _SOKOL_PRIVATE GLenum _sg_gl_stencil_op(sg_stencil_op op) { |
2136 | switch (op) { |
2137 | case SG_STENCILOP_KEEP: return GL_KEEP; |
2138 | case SG_STENCILOP_ZERO: return GL_ZERO; |
2139 | case SG_STENCILOP_REPLACE: return GL_REPLACE; |
2140 | case SG_STENCILOP_INCR_CLAMP: return GL_INCR; |
2141 | case SG_STENCILOP_DECR_CLAMP: return GL_DECR; |
2142 | case SG_STENCILOP_INVERT: return GL_INVERT; |
2143 | case SG_STENCILOP_INCR_WRAP: return GL_INCR_WRAP; |
2144 | case SG_STENCILOP_DECR_WRAP: return GL_DECR_WRAP; |
2145 | default: SOKOL_UNREACHABLE; return 0; |
2146 | } |
2147 | } |
2148 | |
2149 | _SOKOL_PRIVATE GLenum _sg_gl_blend_factor(sg_blend_factor f) { |
2150 | switch (f) { |
2151 | case SG_BLENDFACTOR_ZERO: return GL_ZERO; |
2152 | case SG_BLENDFACTOR_ONE: return GL_ONE; |
2153 | case SG_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR; |
2154 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; |
2155 | case SG_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA; |
2156 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; |
2157 | case SG_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR; |
2158 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; |
2159 | case SG_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA; |
2160 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; |
2161 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return GL_SRC_ALPHA_SATURATE; |
2162 | case SG_BLENDFACTOR_BLEND_COLOR: return GL_CONSTANT_COLOR; |
2163 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; |
2164 | case SG_BLENDFACTOR_BLEND_ALPHA: return GL_CONSTANT_ALPHA; |
2165 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; |
2166 | default: SOKOL_UNREACHABLE; return 0; |
2167 | } |
2168 | } |
2169 | |
2170 | _SOKOL_PRIVATE GLenum _sg_gl_blend_op(sg_blend_op op) { |
2171 | switch (op) { |
2172 | case SG_BLENDOP_ADD: return GL_FUNC_ADD; |
2173 | case SG_BLENDOP_SUBTRACT: return GL_FUNC_SUBTRACT; |
2174 | case SG_BLENDOP_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; |
2175 | default: SOKOL_UNREACHABLE; return 0; |
2176 | } |
2177 | } |
2178 | |
2179 | _SOKOL_PRIVATE GLenum _sg_gl_filter(sg_filter f) { |
2180 | switch (f) { |
2181 | case SG_FILTER_NEAREST: return GL_NEAREST; |
2182 | case SG_FILTER_LINEAR: return GL_LINEAR; |
2183 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: return GL_NEAREST_MIPMAP_NEAREST; |
2184 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: return GL_NEAREST_MIPMAP_LINEAR; |
2185 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: return GL_LINEAR_MIPMAP_NEAREST; |
2186 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: return GL_LINEAR_MIPMAP_LINEAR; |
2187 | default: SOKOL_UNREACHABLE; return 0; |
2188 | } |
2189 | } |
2190 | |
2191 | _SOKOL_PRIVATE GLenum _sg_gl_wrap(sg_wrap w) { |
2192 | switch (w) { |
2193 | case SG_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE; |
2194 | case SG_WRAP_REPEAT: return GL_REPEAT; |
2195 | case SG_WRAP_MIRRORED_REPEAT: return GL_MIRRORED_REPEAT; |
2196 | default: SOKOL_UNREACHABLE; return 0; |
2197 | } |
2198 | } |
2199 | |
2200 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_type(sg_pixel_format fmt) { |
2201 | switch (fmt) { |
2202 | case SG_PIXELFORMAT_RGBA32F: |
2203 | case SG_PIXELFORMAT_R32F: |
2204 | return GL_FLOAT; |
2205 | case SG_PIXELFORMAT_RGBA16F: |
2206 | case SG_PIXELFORMAT_R16F: |
2207 | return GL_HALF_FLOAT; |
2208 | case SG_PIXELFORMAT_RGBA8: |
2209 | case SG_PIXELFORMAT_RGB8: |
2210 | case SG_PIXELFORMAT_L8: |
2211 | return GL_UNSIGNED_BYTE; |
2212 | case SG_PIXELFORMAT_R10G10B10A2: |
2213 | return GL_UNSIGNED_INT_2_10_10_10_REV; |
2214 | case SG_PIXELFORMAT_R5G5B5A1: |
2215 | return GL_UNSIGNED_SHORT_5_5_5_1; |
2216 | case SG_PIXELFORMAT_R5G6B5: |
2217 | return GL_UNSIGNED_SHORT_5_6_5; |
2218 | case SG_PIXELFORMAT_RGBA4: |
2219 | return GL_UNSIGNED_SHORT_4_4_4_4; |
2220 | case SG_PIXELFORMAT_DEPTH: |
2221 | /* FIXME */ |
2222 | return GL_UNSIGNED_SHORT; |
2223 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
2224 | /* FIXME */ |
2225 | return GL_UNSIGNED_INT_24_8; |
2226 | default: |
2227 | SOKOL_UNREACHABLE; return 0; |
2228 | } |
2229 | } |
2230 | |
2231 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_format(sg_pixel_format fmt) { |
2232 | switch (fmt) { |
2233 | case SG_PIXELFORMAT_NONE: |
2234 | return 0; |
2235 | case SG_PIXELFORMAT_RGBA8: |
2236 | case SG_PIXELFORMAT_R5G5B5A1: |
2237 | case SG_PIXELFORMAT_RGBA4: |
2238 | case SG_PIXELFORMAT_RGBA32F: |
2239 | case SG_PIXELFORMAT_RGBA16F: |
2240 | case SG_PIXELFORMAT_R10G10B10A2: |
2241 | return GL_RGBA; |
2242 | case SG_PIXELFORMAT_RGB8: |
2243 | case SG_PIXELFORMAT_R5G6B5: |
2244 | return GL_RGB; |
2245 | case SG_PIXELFORMAT_L8: |
2246 | case SG_PIXELFORMAT_R32F: |
2247 | case SG_PIXELFORMAT_R16F: |
2248 | #if defined(SOKOL_GLES2) |
2249 | return GL_LUMINANCE; |
2250 | #else |
2251 | if (_sg_gl_gles2) { |
2252 | return GL_LUMINANCE; |
2253 | } |
2254 | else { |
2255 | return GL_RED; |
2256 | } |
2257 | #endif |
2258 | case SG_PIXELFORMAT_DEPTH: |
2259 | return GL_DEPTH_COMPONENT; |
2260 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
2261 | return GL_DEPTH_STENCIL; |
2262 | case SG_PIXELFORMAT_DXT1: |
2263 | return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; |
2264 | case SG_PIXELFORMAT_DXT3: |
2265 | return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; |
2266 | case SG_PIXELFORMAT_DXT5: |
2267 | return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; |
2268 | case SG_PIXELFORMAT_PVRTC2_RGB: |
2269 | return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; |
2270 | case SG_PIXELFORMAT_PVRTC4_RGB: |
2271 | return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; |
2272 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
2273 | return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; |
2274 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
2275 | return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; |
2276 | case SG_PIXELFORMAT_ETC2_RGB8: |
2277 | return GL_COMPRESSED_RGB8_ETC2; |
2278 | case SG_PIXELFORMAT_ETC2_SRGB8: |
2279 | return GL_COMPRESSED_SRGB8_ETC2; |
2280 | default: |
2281 | SOKOL_UNREACHABLE; return 0; |
2282 | } |
2283 | } |
2284 | |
2285 | _SOKOL_PRIVATE GLenum _sg_gl_teximage_internal_format(sg_pixel_format fmt) { |
2286 | #if defined(SOKOL_GLES2) |
2287 | return _sg_gl_teximage_format(fmt); |
2288 | #else |
2289 | if (_sg_gl_gles2) { |
2290 | return _sg_gl_teximage_format(fmt); |
2291 | } |
2292 | else { |
2293 | switch (fmt) { |
2294 | case SG_PIXELFORMAT_NONE: |
2295 | return 0; |
2296 | case SG_PIXELFORMAT_RGBA8: |
2297 | return GL_RGBA8; |
2298 | case SG_PIXELFORMAT_RGB8: |
2299 | return GL_RGB8; |
2300 | case SG_PIXELFORMAT_RGBA4: |
2301 | return GL_RGBA4; |
2302 | case SG_PIXELFORMAT_R5G6B5: |
2303 | #if defined(SOKOL_GLES3) |
2304 | return GL_RGB565; |
2305 | #else |
2306 | return GL_RGB5; |
2307 | #endif |
2308 | case SG_PIXELFORMAT_R5G5B5A1: |
2309 | return GL_RGB5_A1; |
2310 | case SG_PIXELFORMAT_R10G10B10A2: |
2311 | return GL_RGB10_A2; |
2312 | case SG_PIXELFORMAT_RGBA32F: |
2313 | return GL_RGBA32F; |
2314 | case SG_PIXELFORMAT_RGBA16F: |
2315 | return GL_RGBA16F; |
2316 | case SG_PIXELFORMAT_R32F: |
2317 | return GL_R32F; |
2318 | case SG_PIXELFORMAT_R16F: |
2319 | return GL_R16F; |
2320 | case SG_PIXELFORMAT_L8: |
2321 | return GL_R8; |
2322 | case SG_PIXELFORMAT_DEPTH: |
2323 | /* FIXME */ |
2324 | return GL_DEPTH_COMPONENT16; |
2325 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
2326 | return GL_DEPTH24_STENCIL8; |
2327 | case SG_PIXELFORMAT_DXT1: |
2328 | return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; |
2329 | case SG_PIXELFORMAT_DXT3: |
2330 | return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT; |
2331 | case SG_PIXELFORMAT_DXT5: |
2332 | return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; |
2333 | case SG_PIXELFORMAT_PVRTC2_RGB: |
2334 | return GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG; |
2335 | case SG_PIXELFORMAT_PVRTC4_RGB: |
2336 | return GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG; |
2337 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
2338 | return GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG; |
2339 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
2340 | return GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG; |
2341 | case SG_PIXELFORMAT_ETC2_RGB8: |
2342 | return GL_COMPRESSED_RGB8_ETC2; |
2343 | case SG_PIXELFORMAT_ETC2_SRGB8: |
2344 | return GL_COMPRESSED_SRGB8_ETC2; |
2345 | default: |
2346 | SOKOL_UNREACHABLE; return 0; |
2347 | } |
2348 | } |
2349 | #endif |
2350 | } |
2351 | |
2352 | _SOKOL_PRIVATE GLenum _sg_gl_cubeface_target(int face_index) { |
2353 | switch (face_index) { |
2354 | case 0: return GL_TEXTURE_CUBE_MAP_POSITIVE_X; |
2355 | case 1: return GL_TEXTURE_CUBE_MAP_NEGATIVE_X; |
2356 | case 2: return GL_TEXTURE_CUBE_MAP_POSITIVE_Y; |
2357 | case 3: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Y; |
2358 | case 4: return GL_TEXTURE_CUBE_MAP_POSITIVE_Z; |
2359 | case 5: return GL_TEXTURE_CUBE_MAP_NEGATIVE_Z; |
2360 | default: SOKOL_UNREACHABLE; return 0; |
2361 | } |
2362 | } |
2363 | |
2364 | _SOKOL_PRIVATE GLenum _sg_gl_depth_attachment_format(sg_pixel_format fmt) { |
2365 | switch (fmt) { |
2366 | case SG_PIXELFORMAT_DEPTH: return GL_DEPTH_COMPONENT16; |
2367 | case SG_PIXELFORMAT_DEPTHSTENCIL: return GL_DEPTH24_STENCIL8; |
2368 | default: SOKOL_UNREACHABLE; return 0; |
2369 | } |
2370 | } |
2371 | |
2372 | /*-- GL backend resource declarations ----------------------------------------*/ |
2373 | typedef struct { |
2374 | _sg_slot slot; |
2375 | int size; |
2376 | int append_pos; |
2377 | bool append_overflow; |
2378 | sg_buffer_type type; |
2379 | sg_usage usage; |
2380 | uint32_t update_frame_index; |
2381 | uint32_t append_frame_index; |
2382 | int num_slots; |
2383 | int active_slot; |
2384 | GLuint gl_buf[SG_NUM_INFLIGHT_FRAMES]; |
2385 | bool ext_buffers; /* if true, external buffers were injected with sg_buffer_desc.gl_buffers */ |
2386 | } _sg_buffer; |
2387 | |
2388 | _SOKOL_PRIVATE void _sg_init_buffer_slot(_sg_buffer* buf) { |
2389 | SOKOL_ASSERT(buf); |
2390 | memset(buf, 0, sizeof(_sg_buffer)); |
2391 | } |
2392 | |
2393 | typedef struct { |
2394 | _sg_slot slot; |
2395 | sg_image_type type; |
2396 | bool render_target; |
2397 | int width; |
2398 | int height; |
2399 | int depth; |
2400 | int num_mipmaps; |
2401 | sg_usage usage; |
2402 | sg_pixel_format pixel_format; |
2403 | int sample_count; |
2404 | sg_filter min_filter; |
2405 | sg_filter mag_filter; |
2406 | sg_wrap wrap_u; |
2407 | sg_wrap wrap_v; |
2408 | sg_wrap wrap_w; |
2409 | uint32_t max_anisotropy; |
2410 | GLenum gl_target; |
2411 | GLuint gl_depth_render_buffer; |
2412 | GLuint gl_msaa_render_buffer; |
2413 | uint32_t upd_frame_index; |
2414 | int num_slots; |
2415 | int active_slot; |
2416 | GLuint gl_tex[SG_NUM_INFLIGHT_FRAMES]; |
2417 | bool ext_textures; /* if true, external textures were injected with sg_image_desc.gl_textures */ |
2418 | } _sg_image; |
2419 | |
2420 | _SOKOL_PRIVATE void _sg_init_image_slot(_sg_image* img) { |
2421 | SOKOL_ASSERT(img); |
2422 | memset(img, 0, sizeof(_sg_image)); |
2423 | } |
2424 | |
2425 | typedef struct { |
2426 | GLint gl_loc; |
2427 | sg_uniform_type type; |
2428 | uint8_t count; |
2429 | uint16_t offset; |
2430 | } _sg_uniform; |
2431 | |
2432 | typedef struct { |
2433 | int size; |
2434 | int num_uniforms; |
2435 | _sg_uniform uniforms[SG_MAX_UB_MEMBERS]; |
2436 | } _sg_uniform_block; |
2437 | |
2438 | typedef struct { |
2439 | sg_image_type type; |
2440 | GLint gl_loc; |
2441 | int gl_tex_slot; |
2442 | } _sg_shader_image; |
2443 | |
2444 | typedef struct { |
2445 | int num_uniform_blocks; |
2446 | int num_images; |
2447 | _sg_uniform_block uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
2448 | _sg_shader_image images[SG_MAX_SHADERSTAGE_IMAGES]; |
2449 | } _sg_shader_stage; |
2450 | |
2451 | typedef struct { |
2452 | _sg_slot slot; |
2453 | GLuint gl_prog; |
2454 | _sg_shader_stage stage[SG_NUM_SHADER_STAGES]; |
2455 | } _sg_shader; |
2456 | |
2457 | _SOKOL_PRIVATE void _sg_init_shader_slot(_sg_shader* shd) { |
2458 | SOKOL_ASSERT(shd); |
2459 | memset(shd, 0, sizeof(_sg_shader)); |
2460 | } |
2461 | |
2462 | typedef struct { |
2463 | int8_t vb_index; /* -1 if attr is not enabled */ |
2464 | int8_t divisor; /* -1 if not initialized */ |
2465 | uint8_t stride; |
2466 | uint8_t size; |
2467 | uint8_t normalized; |
2468 | int offset; |
2469 | GLenum type; |
2470 | } _sg_gl_attr; |
2471 | |
2472 | _SOKOL_PRIVATE void _sg_gl_init_attr(_sg_gl_attr* attr) { |
2473 | attr->vb_index = -1; |
2474 | attr->divisor = -1; |
2475 | attr->stride = 0; |
2476 | attr->size = 0; |
2477 | attr->normalized = 0; |
2478 | attr->offset = 0; |
2479 | attr->type = 0; |
2480 | } |
2481 | |
2482 | typedef struct { |
2483 | _sg_slot slot; |
2484 | _sg_shader* shader; |
2485 | sg_shader shader_id; |
2486 | sg_primitive_type primitive_type; |
2487 | sg_index_type index_type; |
2488 | bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; |
2489 | int color_attachment_count; |
2490 | sg_pixel_format color_format; |
2491 | sg_pixel_format depth_format; |
2492 | int sample_count; |
2493 | _sg_gl_attr gl_attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
2494 | sg_depth_stencil_state depth_stencil; |
2495 | sg_blend_state blend; |
2496 | sg_rasterizer_state rast; |
2497 | } _sg_pipeline; |
2498 | |
2499 | _SOKOL_PRIVATE void _sg_init_pipeline_slot(_sg_pipeline* pip) { |
2500 | SOKOL_ASSERT(pip); |
2501 | memset(pip, 0, sizeof(_sg_pipeline)); |
2502 | } |
2503 | |
2504 | typedef struct { |
2505 | _sg_image* image; |
2506 | sg_image image_id; |
2507 | int mip_level; |
2508 | int slice; |
2509 | GLuint gl_msaa_resolve_buffer; |
2510 | } _sg_attachment; |
2511 | |
2512 | typedef struct { |
2513 | _sg_slot slot; |
2514 | GLuint gl_fb; |
2515 | int num_color_atts; |
2516 | _sg_attachment color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
2517 | _sg_attachment ds_att; |
2518 | } _sg_pass; |
2519 | |
2520 | _SOKOL_PRIVATE void _sg_init_pass_slot(_sg_pass* pass) { |
2521 | SOKOL_ASSERT(pass); |
2522 | memset(pass, 0, sizeof(_sg_pass)); |
2523 | } |
2524 | |
2525 | typedef struct { |
2526 | _sg_slot slot; |
2527 | #if !defined(SOKOL_GLES2) |
2528 | GLuint vao; |
2529 | #endif |
2530 | GLuint default_framebuffer; |
2531 | } _sg_context; |
2532 | |
2533 | _SOKOL_PRIVATE void _sg_init_context_slot(_sg_context* ctx) { |
2534 | SOKOL_ASSERT(ctx); |
2535 | memset(ctx, 0, sizeof(_sg_context)); |
2536 | } |
2537 | |
2538 | _SOKOL_PRIVATE void _sg_gl_init_stencil_state(sg_stencil_state* s) { |
2539 | SOKOL_ASSERT(s); |
2540 | s->fail_op = SG_STENCILOP_KEEP; |
2541 | s->depth_fail_op = SG_STENCILOP_KEEP; |
2542 | s->pass_op = SG_STENCILOP_KEEP; |
2543 | s->compare_func = SG_COMPAREFUNC_ALWAYS; |
2544 | } |
2545 | |
2546 | _SOKOL_PRIVATE void _sg_gl_init_depth_stencil_state(sg_depth_stencil_state* s) { |
2547 | SOKOL_ASSERT(s); |
2548 | _sg_gl_init_stencil_state(&s->stencil_front); |
2549 | _sg_gl_init_stencil_state(&s->stencil_back); |
2550 | s->depth_compare_func = SG_COMPAREFUNC_ALWAYS; |
2551 | s->depth_write_enabled = false; |
2552 | s->stencil_enabled = false; |
2553 | s->stencil_read_mask = 0; |
2554 | s->stencil_write_mask = 0; |
2555 | s->stencil_ref = 0; |
2556 | } |
2557 | |
2558 | _SOKOL_PRIVATE void _sg_gl_init_blend_state(sg_blend_state* s) { |
2559 | SOKOL_ASSERT(s); |
2560 | s->enabled = false; |
2561 | s->src_factor_rgb = SG_BLENDFACTOR_ONE; |
2562 | s->dst_factor_rgb = SG_BLENDFACTOR_ZERO; |
2563 | s->op_rgb = SG_BLENDOP_ADD; |
2564 | s->src_factor_alpha = SG_BLENDFACTOR_ONE; |
2565 | s->dst_factor_alpha = SG_BLENDFACTOR_ZERO; |
2566 | s->op_alpha = SG_BLENDOP_ADD; |
2567 | s->color_write_mask = SG_COLORMASK_RGBA; |
2568 | for (int i = 0; i < 4; i++) { |
2569 | s->blend_color[i] = 0.0f; |
2570 | } |
2571 | } |
2572 | |
2573 | _SOKOL_PRIVATE void _sg_gl_init_rasterizer_state(sg_rasterizer_state* s) { |
2574 | SOKOL_ASSERT(s); |
2575 | s->alpha_to_coverage_enabled = false; |
2576 | s->cull_mode = SG_CULLMODE_NONE; |
2577 | s->face_winding = SG_FACEWINDING_CW; |
2578 | s->sample_count = 1; |
2579 | s->depth_bias = 0.0f; |
2580 | s->depth_bias_slope_scale = 0.0f; |
2581 | s->depth_bias_clamp = 0.0f; |
2582 | } |
2583 | |
2584 | /*-- state cache implementation ----------------------------------------------*/ |
2585 | typedef struct { |
2586 | _sg_gl_attr gl_attr; |
2587 | GLuint gl_vbuf; |
2588 | } _sg_gl_cache_attr; |
2589 | |
2590 | typedef struct { |
2591 | sg_depth_stencil_state ds; |
2592 | sg_blend_state blend; |
2593 | sg_rasterizer_state rast; |
2594 | bool polygon_offset_enabled; |
2595 | _sg_gl_cache_attr attrs[SG_MAX_VERTEX_ATTRIBUTES]; |
2596 | GLuint cur_gl_vb; |
2597 | GLuint cur_gl_ib; |
2598 | int cur_ib_offset; |
2599 | GLenum cur_primitive_type; |
2600 | GLenum cur_index_type; |
2601 | _sg_pipeline* cur_pipeline; |
2602 | sg_pipeline cur_pipeline_id; |
2603 | } _sg_state_cache; |
2604 | |
2605 | /* cached wrapper for glBindBuffer */ |
2606 | _SOKOL_PRIVATE void _sg_gl_bind_buffer(GLenum target, GLuint buffer, _sg_state_cache* cache) { |
2607 | SOKOL_ASSERT((GL_ARRAY_BUFFER == target) || (GL_ELEMENT_ARRAY_BUFFER == target)); |
2608 | if (target == GL_ARRAY_BUFFER) { |
2609 | if (cache->cur_gl_vb != buffer) { |
2610 | cache->cur_gl_vb = buffer; |
2611 | glBindBuffer(target, buffer); |
2612 | } |
2613 | } |
2614 | else { |
2615 | if (cache->cur_gl_ib != buffer) { |
2616 | cache->cur_gl_ib = buffer; |
2617 | glBindBuffer(target, buffer); |
2618 | } |
2619 | } |
2620 | } |
2621 | |
2622 | _SOKOL_PRIVATE void _sg_gl_reset_state_cache(_sg_state_cache* cache) { |
2623 | SOKOL_ASSERT(cache); |
2624 | _SG_GL_CHECK_ERROR(); |
2625 | glBindBuffer(GL_ARRAY_BUFFER, 0); |
2626 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); |
2627 | _SG_GL_CHECK_ERROR(); |
2628 | for (int i = 0; i < SG_MAX_VERTEX_ATTRIBUTES; i++) { |
2629 | _sg_gl_init_attr(&cache->attrs[i].gl_attr); |
2630 | cache->attrs[i].gl_vbuf = 0; |
2631 | glDisableVertexAttribArray(i); |
2632 | _SG_GL_CHECK_ERROR(); |
2633 | } |
2634 | cache->cur_gl_vb = 0; |
2635 | cache->cur_gl_ib = 0; |
2636 | cache->cur_ib_offset = 0; |
2637 | cache->cur_primitive_type = GL_TRIANGLES; |
2638 | cache->cur_index_type = 0; |
2639 | |
2640 | /* resource bindings */ |
2641 | cache->cur_pipeline = 0; |
2642 | cache->cur_pipeline_id.id = SG_INVALID_ID; |
2643 | |
2644 | /* depth-stencil state */ |
2645 | _sg_gl_init_depth_stencil_state(&cache->ds); |
2646 | glEnable(GL_DEPTH_TEST); |
2647 | glDepthFunc(GL_ALWAYS); |
2648 | glDepthMask(GL_FALSE); |
2649 | glDisable(GL_STENCIL_TEST); |
2650 | glStencilFunc(GL_ALWAYS, 0, 0); |
2651 | glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); |
2652 | glStencilMask(0); |
2653 | |
2654 | /* blend state */ |
2655 | _sg_gl_init_blend_state(&cache->blend); |
2656 | glDisable(GL_BLEND); |
2657 | glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO); |
2658 | glBlendEquationSeparate(GL_FUNC_ADD, GL_FUNC_ADD); |
2659 | glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
2660 | glBlendColor(0.0f, 0.0f, 0.0f, 0.0f); |
2661 | |
2662 | /* rasterizer state */ |
2663 | _sg_gl_init_rasterizer_state(&cache->rast); |
2664 | cache->polygon_offset_enabled = false; |
2665 | glPolygonOffset(0.0f, 0.0f); |
2666 | glDisable(GL_POLYGON_OFFSET_FILL); |
2667 | glDisable(GL_CULL_FACE); |
2668 | glFrontFace(GL_CW); |
2669 | glCullFace(GL_BACK); |
2670 | glEnable(GL_SCISSOR_TEST); |
2671 | glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
2672 | glEnable(GL_DITHER); |
2673 | glDisable(GL_POLYGON_OFFSET_FILL); |
2674 | #if defined(SOKOL_GLCORE33) |
2675 | glEnable(GL_MULTISAMPLE); |
2676 | glEnable(GL_PROGRAM_POINT_SIZE); |
2677 | #endif |
2678 | } |
2679 | |
2680 | /*-- main GL backend state and functions -------------------------------------*/ |
2681 | typedef struct { |
2682 | bool valid; |
2683 | bool in_pass; |
2684 | int cur_pass_width; |
2685 | int cur_pass_height; |
2686 | _sg_context* cur_context; |
2687 | _sg_pass* cur_pass; |
2688 | sg_pass cur_pass_id; |
2689 | _sg_state_cache cache; |
2690 | bool features[SG_NUM_FEATURES]; |
2691 | bool ext_anisotropic; |
2692 | GLint max_anisotropy; |
2693 | } _sg_backend; |
2694 | |
2695 | static _sg_backend _sg_gl; |
2696 | |
2697 | _SOKOL_PRIVATE void _sg_setup_backend(const sg_desc* desc) { |
2698 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
2699 | _sg_gl_gles2 = desc->gl_force_gles2; |
2700 | #else |
2701 | _sg_gl_gles2 = false; |
2702 | #endif |
2703 | memset(&_sg_gl, 0, sizeof(_sg_gl)); |
2704 | _sg_gl.valid = true; |
2705 | _sg_gl.in_pass = false; |
2706 | _sg_gl.cur_pass_width = 0; |
2707 | _sg_gl.cur_pass_height = 0; |
2708 | _sg_gl.cur_pass = 0; |
2709 | _sg_gl.cur_pass_id.id = SG_INVALID_ID; |
2710 | |
2711 | /* clear initial GL error state */ |
2712 | #if defined(SOKOL_DEBUG) |
2713 | while (glGetError() != GL_NO_ERROR); |
2714 | #endif |
2715 | |
2716 | /* initialize feature flags */ |
2717 | for (int i = 0; i < SG_NUM_FEATURES; i++) { |
2718 | _sg_gl.features[i] = false; |
2719 | } |
2720 | _sg_gl.ext_anisotropic = false; |
2721 | _sg_gl.features[SG_FEATURE_ORIGIN_BOTTOM_LEFT] = true; |
2722 | #if defined(SOKOL_GLCORE33) |
2723 | _sg_gl.features[SG_FEATURE_INSTANCING] = true; |
2724 | _sg_gl.features[SG_FEATURE_TEXTURE_FLOAT] = true; |
2725 | _sg_gl.features[SG_FEATURE_TEXTURE_HALF_FLOAT] = true; |
2726 | _sg_gl.features[SG_FEATURE_MSAA_RENDER_TARGETS] = true; |
2727 | _sg_gl.features[SG_FEATURE_PACKED_VERTEX_FORMAT_10_2] = true; |
2728 | _sg_gl.features[SG_FEATURE_MULTIPLE_RENDER_TARGET] = true; |
2729 | _sg_gl.features[SG_FEATURE_IMAGETYPE_3D] = true; |
2730 | _sg_gl.features[SG_FEATURE_IMAGETYPE_ARRAY] = true; |
2731 | GLint num_ext = 0; |
2732 | glGetIntegerv(GL_NUM_EXTENSIONS, &num_ext); |
2733 | for (int i = 0; i < num_ext; i++) { |
2734 | const char* ext = (const char*) glGetStringi(GL_EXTENSIONS, i); |
2735 | if (strstr(ext, "_texture_compression_s3tc" )) { |
2736 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_DXT] = true; |
2737 | continue; |
2738 | } |
2739 | else if (strstr(ext, "_texture_filter_anisotropic" )) { |
2740 | _sg_gl.ext_anisotropic = true; |
2741 | continue; |
2742 | } |
2743 | } |
2744 | #elif defined(SOKOL_GLES3) |
2745 | const char* ext = (const char*) glGetString(GL_EXTENSIONS); |
2746 | if (!_sg_gl_gles2) { |
2747 | _sg_gl.features[SG_FEATURE_INSTANCING] = true; |
2748 | _sg_gl.features[SG_FEATURE_TEXTURE_FLOAT] = true; |
2749 | _sg_gl.features[SG_FEATURE_TEXTURE_HALF_FLOAT] = true; |
2750 | _sg_gl.features[SG_FEATURE_IMAGETYPE_3D] = true; |
2751 | _sg_gl.features[SG_FEATURE_IMAGETYPE_ARRAY] = true; |
2752 | _sg_gl.features[SG_FEATURE_MSAA_RENDER_TARGETS] = true; |
2753 | _sg_gl.features[SG_FEATURE_PACKED_VERTEX_FORMAT_10_2] = true; |
2754 | _sg_gl.features[SG_FEATURE_MULTIPLE_RENDER_TARGET] = true; |
2755 | } |
2756 | else { |
2757 | _sg_gl.features[SG_FEATURE_INSTANCING] = strstr(ext, "_instanced_arrays" ); |
2758 | _sg_gl.features[SG_FEATURE_TEXTURE_FLOAT] = strstr(ext, "_texture_float" ); |
2759 | _sg_gl.features[SG_FEATURE_TEXTURE_HALF_FLOAT] = strstr(ext, "_texture_half_float" ); |
2760 | } |
2761 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_DXT] = |
2762 | strstr(ext, "_texture_compression_s3tc" ) || |
2763 | strstr(ext, "_compressed_texture_s3tc" ) || |
2764 | strstr(ext, "texture_compression_dxt1" ); |
2765 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_PVRTC] = |
2766 | strstr(ext, "_texture_compression_pvrtc" ) || |
2767 | strstr(ext, "_compressed_texture_pvrtc" ); |
2768 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_ATC] = |
2769 | strstr(ext, "_compressed_texture_atc" ); |
2770 | _sg_gl.ext_anisotropic = |
2771 | strstr(ext, "_texture_filter_anisotropic" ); |
2772 | #elif defined(SOKOL_GLES2) |
2773 | const char* ext = (const char*) glGetString(GL_EXTENSIONS); |
2774 | _sg_gl.features[SG_FEATURE_INSTANCING] = |
2775 | strstr(ext, "_instanced_arrays" ); |
2776 | _sg_gl.features[SG_FEATURE_TEXTURE_FLOAT] = |
2777 | strstr(ext, "_texture_float" ); |
2778 | _sg_gl.features[SG_FEATURE_TEXTURE_HALF_FLOAT] = |
2779 | strstr(ext, "_texture_half_float" ); |
2780 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_DXT] = |
2781 | strstr(ext, "_texture_compression_s3tc" ) || |
2782 | strstr(ext, "_compressed_texture_s3tc" ) || |
2783 | strstr(ext, "texture_compression_dxt1" ); |
2784 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_PVRTC] = |
2785 | strstr(ext, "_texture_compression_pvrtc" ) || |
2786 | strstr(ext, "_compressed_texture_pvrtc" ); |
2787 | _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_ATC] = |
2788 | strstr(ext, "_compressed_texture_atc" ); |
2789 | _sg_gl.ext_anisotropic = |
2790 | strstr(ext, "_texture_filter_anisotropic" ); |
2791 | #endif |
2792 | _sg_gl.max_anisotropy = 1; |
2793 | if (_sg_gl.ext_anisotropic) { |
2794 | glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &_sg_gl.max_anisotropy); |
2795 | } |
2796 | } |
2797 | |
2798 | _SOKOL_PRIVATE void _sg_discard_backend() { |
2799 | SOKOL_ASSERT(_sg_gl.valid); |
2800 | _sg_gl.valid = false; |
2801 | } |
2802 | |
2803 | _SOKOL_PRIVATE void _sg_reset_state_cache() { |
2804 | if (_sg_gl.cur_context) { |
2805 | #if !defined(SOKOL_GLES2) |
2806 | if (!_sg_gl_gles2) { |
2807 | _SG_GL_CHECK_ERROR(); |
2808 | glBindVertexArray(_sg_gl.cur_context->vao); |
2809 | _SG_GL_CHECK_ERROR(); |
2810 | } |
2811 | #endif |
2812 | _sg_gl_reset_state_cache(&_sg_gl.cache); |
2813 | } |
2814 | } |
2815 | |
2816 | _SOKOL_PRIVATE bool _sg_query_feature(sg_feature f) { |
2817 | SOKOL_ASSERT((f>=0) && (f<SG_NUM_FEATURES)); |
2818 | return _sg_gl.features[f]; |
2819 | } |
2820 | |
2821 | _SOKOL_PRIVATE void _sg_activate_context(_sg_context* ctx) { |
2822 | SOKOL_ASSERT(_sg_gl.valid); |
2823 | /* NOTE: ctx can be 0 to unset the current context */ |
2824 | _sg_gl.cur_context = ctx; |
2825 | _sg_reset_state_cache(); |
2826 | } |
2827 | |
2828 | /*-- GL backend resource creation and destruction ----------------------------*/ |
2829 | _SOKOL_PRIVATE void _sg_create_context(_sg_context* ctx) { |
2830 | SOKOL_ASSERT(ctx); |
2831 | SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_ALLOC); |
2832 | SOKOL_ASSERT(0 == ctx->default_framebuffer); |
2833 | _SG_GL_CHECK_ERROR(); |
2834 | glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&ctx->default_framebuffer); |
2835 | _SG_GL_CHECK_ERROR(); |
2836 | #if !defined(SOKOL_GLES2) |
2837 | if (!_sg_gl_gles2) { |
2838 | SOKOL_ASSERT(0 == ctx->vao); |
2839 | glGenVertexArrays(1, &ctx->vao); |
2840 | glBindVertexArray(ctx->vao); |
2841 | _SG_GL_CHECK_ERROR(); |
2842 | } |
2843 | #endif |
2844 | ctx->slot.state = SG_RESOURCESTATE_VALID; |
2845 | } |
2846 | |
2847 | _SOKOL_PRIVATE void _sg_destroy_context(_sg_context* ctx) { |
2848 | SOKOL_ASSERT(ctx); |
2849 | #if !defined(SOKOL_GLES2) |
2850 | if (!_sg_gl_gles2) { |
2851 | if (ctx->vao) { |
2852 | glDeleteVertexArrays(1, &ctx->vao); |
2853 | } |
2854 | _SG_GL_CHECK_ERROR(); |
2855 | } |
2856 | #endif |
2857 | _sg_init_context_slot(ctx); |
2858 | } |
2859 | |
2860 | _SOKOL_PRIVATE void _sg_create_buffer(_sg_buffer* buf, const sg_buffer_desc* desc) { |
2861 | SOKOL_ASSERT(buf && desc); |
2862 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_ALLOC); |
2863 | _SG_GL_CHECK_ERROR(); |
2864 | buf->size = desc->size; |
2865 | buf->append_pos = 0; |
2866 | buf->append_overflow = false; |
2867 | buf->type = _sg_def(desc->type, SG_BUFFERTYPE_VERTEXBUFFER); |
2868 | buf->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
2869 | buf->update_frame_index = 0; |
2870 | buf->append_frame_index = 0; |
2871 | buf->num_slots = (buf->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; |
2872 | buf->active_slot = 0; |
2873 | buf->ext_buffers = (0 != desc->gl_buffers[0]); |
2874 | GLenum gl_target = _sg_gl_buffer_target(buf->type); |
2875 | GLenum gl_usage = _sg_gl_usage(buf->usage); |
2876 | for (int slot = 0; slot < buf->num_slots; slot++) { |
2877 | GLuint gl_buf = 0; |
2878 | if (buf->ext_buffers) { |
2879 | SOKOL_ASSERT(desc->gl_buffers[slot]); |
2880 | gl_buf = desc->gl_buffers[slot]; |
2881 | } |
2882 | else { |
2883 | glGenBuffers(1, &gl_buf); |
2884 | _sg_gl_bind_buffer(gl_target, gl_buf, &_sg_gl.cache); |
2885 | glBufferData(gl_target, buf->size, 0, gl_usage); |
2886 | if (buf->usage == SG_USAGE_IMMUTABLE) { |
2887 | SOKOL_ASSERT(desc->content); |
2888 | glBufferSubData(gl_target, 0, buf->size, desc->content); |
2889 | } |
2890 | } |
2891 | buf->gl_buf[slot] = gl_buf; |
2892 | } |
2893 | _SG_GL_CHECK_ERROR(); |
2894 | buf->slot.state = SG_RESOURCESTATE_VALID; |
2895 | } |
2896 | |
2897 | _SOKOL_PRIVATE void _sg_destroy_buffer(_sg_buffer* buf) { |
2898 | SOKOL_ASSERT(buf); |
2899 | _SG_GL_CHECK_ERROR(); |
2900 | if (!buf->ext_buffers) { |
2901 | for (int slot = 0; slot < buf->num_slots; slot++) { |
2902 | if (buf->gl_buf[slot]) { |
2903 | glDeleteBuffers(1, &buf->gl_buf[slot]); |
2904 | } |
2905 | } |
2906 | _SG_GL_CHECK_ERROR(); |
2907 | } |
2908 | _sg_init_buffer_slot(buf); |
2909 | } |
2910 | |
2911 | _SOKOL_PRIVATE bool _sg_gl_supported_texture_format(sg_pixel_format fmt) { |
2912 | switch (fmt) { |
2913 | case SG_PIXELFORMAT_DXT1: |
2914 | case SG_PIXELFORMAT_DXT3: |
2915 | case SG_PIXELFORMAT_DXT5: |
2916 | return _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_DXT]; |
2917 | case SG_PIXELFORMAT_PVRTC2_RGB: |
2918 | case SG_PIXELFORMAT_PVRTC4_RGB: |
2919 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
2920 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
2921 | return _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_PVRTC]; |
2922 | case SG_PIXELFORMAT_ETC2_RGB8: |
2923 | case SG_PIXELFORMAT_ETC2_SRGB8: |
2924 | return _sg_gl.features[SG_FEATURE_TEXTURE_COMPRESSION_ETC2]; |
2925 | default: |
2926 | return true; |
2927 | } |
2928 | } |
2929 | |
2930 | _SOKOL_PRIVATE void _sg_create_image(_sg_image* img, const sg_image_desc* desc) { |
2931 | SOKOL_ASSERT(img && desc); |
2932 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_ALLOC); |
2933 | _SG_GL_CHECK_ERROR(); |
2934 | img->type = _sg_def(desc->type, SG_IMAGETYPE_2D); |
2935 | img->render_target = desc->render_target; |
2936 | img->width = desc->width; |
2937 | img->height = desc->height; |
2938 | img->depth = _sg_def(desc->depth, 1); |
2939 | img->num_mipmaps = _sg_def(desc->num_mipmaps, 1); |
2940 | img->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
2941 | img->pixel_format = _sg_def(desc->pixel_format, SG_PIXELFORMAT_RGBA8); |
2942 | img->sample_count = _sg_def(desc->sample_count, 1); |
2943 | img->min_filter = _sg_def(desc->min_filter, SG_FILTER_NEAREST); |
2944 | img->mag_filter = _sg_def(desc->mag_filter, SG_FILTER_NEAREST); |
2945 | img->wrap_u = _sg_def(desc->wrap_u, SG_WRAP_REPEAT); |
2946 | img->wrap_v = _sg_def(desc->wrap_v, SG_WRAP_REPEAT); |
2947 | img->wrap_w = _sg_def(desc->wrap_w, SG_WRAP_REPEAT); |
2948 | img->max_anisotropy = _sg_def(desc->max_anisotropy, 1); |
2949 | img->upd_frame_index = 0; |
2950 | |
2951 | /* check if texture format is support */ |
2952 | if (!_sg_gl_supported_texture_format(img->pixel_format)) { |
2953 | SOKOL_LOG("compressed texture format not supported by GL context\n" ); |
2954 | img->slot.state = SG_RESOURCESTATE_FAILED; |
2955 | return; |
2956 | } |
2957 | /* check for optional texture types */ |
2958 | if ((img->type == SG_IMAGETYPE_3D) && !_sg_gl.features[SG_FEATURE_IMAGETYPE_3D]) { |
2959 | SOKOL_LOG("3D textures not supported by GL context\n" ); |
2960 | img->slot.state = SG_RESOURCESTATE_FAILED; |
2961 | return; |
2962 | } |
2963 | if ((img->type == SG_IMAGETYPE_ARRAY) && !_sg_gl.features[SG_FEATURE_IMAGETYPE_ARRAY]) { |
2964 | SOKOL_LOG("array textures not supported by GL context\n" ); |
2965 | img->slot.state = SG_RESOURCESTATE_FAILED; |
2966 | return; |
2967 | } |
2968 | |
2969 | /* create 1 or 2 GL textures, depending on requested update strategy */ |
2970 | img->num_slots = (img->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; |
2971 | img->active_slot = 0; |
2972 | img->ext_textures = (0 != desc->gl_textures[0]); |
2973 | |
2974 | #if !defined(SOKOL_GLES2) |
2975 | bool msaa = false; |
2976 | if (!_sg_gl_gles2) { |
2977 | msaa = (img->sample_count > 1) && (_sg_gl.features[SG_FEATURE_MSAA_RENDER_TARGETS]); |
2978 | } |
2979 | #endif |
2980 | |
2981 | if (_sg_is_valid_rendertarget_depth_format(img->pixel_format)) { |
2982 | /* special case depth-stencil-buffer? */ |
2983 | SOKOL_ASSERT((img->usage == SG_USAGE_IMMUTABLE) && (img->num_slots == 1)); |
2984 | SOKOL_ASSERT(!img->ext_textures); /* cannot provide external texture for depth images */ |
2985 | glGenRenderbuffers(1, &img->gl_depth_render_buffer); |
2986 | glBindRenderbuffer(GL_RENDERBUFFER, img->gl_depth_render_buffer); |
2987 | GLenum gl_depth_format = _sg_gl_depth_attachment_format(img->pixel_format); |
2988 | #if !defined(SOKOL_GLES2) |
2989 | if (!_sg_gl_gles2 && msaa) { |
2990 | glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->sample_count, gl_depth_format, img->width, img->height); |
2991 | } |
2992 | else |
2993 | #endif |
2994 | { |
2995 | glRenderbufferStorage(GL_RENDERBUFFER, gl_depth_format, img->width, img->height); |
2996 | } |
2997 | } |
2998 | else { |
2999 | /* regular color texture */ |
3000 | img->gl_target = _sg_gl_texture_target(img->type); |
3001 | const GLenum gl_internal_format = _sg_gl_teximage_internal_format(img->pixel_format); |
3002 | |
3003 | /* if this is a MSAA render target, need to create a separate render buffer */ |
3004 | #if !defined(SOKOL_GLES2) |
3005 | if (!_sg_gl_gles2 && img->render_target && msaa) { |
3006 | glGenRenderbuffers(1, &img->gl_msaa_render_buffer); |
3007 | glBindRenderbuffer(GL_RENDERBUFFER, img->gl_msaa_render_buffer); |
3008 | glRenderbufferStorageMultisample(GL_RENDERBUFFER, img->sample_count, gl_internal_format, img->width, img->height); |
3009 | } |
3010 | #endif |
3011 | |
3012 | if (img->ext_textures) { |
3013 | /* inject externally GL textures */ |
3014 | for (int slot = 0; slot < img->num_slots; slot++) { |
3015 | SOKOL_ASSERT(desc->gl_textures[slot]); |
3016 | img->gl_tex[slot] = desc->gl_textures[slot]; |
3017 | } |
3018 | } |
3019 | else { |
3020 | /* create our own GL texture(s) */ |
3021 | const GLenum gl_format = _sg_gl_teximage_format(img->pixel_format); |
3022 | const bool is_compressed = _sg_is_compressed_pixel_format(img->pixel_format); |
3023 | for (int slot = 0; slot < img->num_slots; slot++) { |
3024 | glGenTextures(1, &img->gl_tex[slot]); |
3025 | glActiveTexture(GL_TEXTURE0); |
3026 | glBindTexture(img->gl_target, img->gl_tex[slot]); |
3027 | GLenum gl_min_filter = _sg_gl_filter(img->min_filter); |
3028 | GLenum gl_mag_filter = _sg_gl_filter(img->mag_filter); |
3029 | glTexParameteri(img->gl_target, GL_TEXTURE_MIN_FILTER, gl_min_filter); |
3030 | glTexParameteri(img->gl_target, GL_TEXTURE_MAG_FILTER, gl_mag_filter); |
3031 | if (_sg_gl.ext_anisotropic && (img->max_anisotropy > 1)) { |
3032 | GLint max_aniso = (GLint) img->max_anisotropy; |
3033 | if (max_aniso > _sg_gl.max_anisotropy) { |
3034 | max_aniso = _sg_gl.max_anisotropy; |
3035 | } |
3036 | glTexParameteri(img->gl_target, GL_TEXTURE_MAX_ANISOTROPY_EXT, max_aniso); |
3037 | } |
3038 | if (img->type == SG_IMAGETYPE_CUBE) { |
3039 | glTexParameteri(img->gl_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
3040 | glTexParameteri(img->gl_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
3041 | } |
3042 | else { |
3043 | glTexParameteri(img->gl_target, GL_TEXTURE_WRAP_S, _sg_gl_wrap(img->wrap_u)); |
3044 | glTexParameteri(img->gl_target, GL_TEXTURE_WRAP_T, _sg_gl_wrap(img->wrap_v)); |
3045 | #if !defined(SOKOL_GLES2) |
3046 | if (!_sg_gl_gles2 && (img->type == SG_IMAGETYPE_3D)) { |
3047 | glTexParameteri(img->gl_target, GL_TEXTURE_WRAP_R, _sg_gl_wrap(img->wrap_w)); |
3048 | } |
3049 | #endif |
3050 | } |
3051 | #if !defined(SOKOL_GLES2) |
3052 | if (!_sg_gl_gles2) { |
3053 | /* GL spec has strange defaults for mipmap min/max lod: -1000 to +1000 */ |
3054 | const float min_lod = _sg_clamp(desc->min_lod, 0.0f, 1000.0f); |
3055 | const float max_lod = _sg_clamp(_sg_def_flt(desc->max_lod, 1000.0f), 0.0f, 1000.0f); |
3056 | glTexParameterf(img->gl_target, GL_TEXTURE_MIN_LOD, min_lod); |
3057 | glTexParameterf(img->gl_target, GL_TEXTURE_MAX_LOD, max_lod); |
3058 | } |
3059 | #endif |
3060 | const int num_faces = img->type == SG_IMAGETYPE_CUBE ? 6 : 1; |
3061 | int data_index = 0; |
3062 | for (int face_index = 0; face_index < num_faces; face_index++) { |
3063 | for (int mip_index = 0; mip_index < img->num_mipmaps; mip_index++, data_index++) { |
3064 | GLenum gl_img_target = img->gl_target; |
3065 | if (SG_IMAGETYPE_CUBE == img->type) { |
3066 | gl_img_target = _sg_gl_cubeface_target(face_index); |
3067 | } |
3068 | const GLvoid* data_ptr = desc->content.subimage[face_index][mip_index].ptr; |
3069 | const int data_size = desc->content.subimage[face_index][mip_index].size; |
3070 | int mip_width = img->width >> mip_index; |
3071 | if (mip_width == 0) { |
3072 | mip_width = 1; |
3073 | } |
3074 | int mip_height = img->height >> mip_index; |
3075 | if (mip_height == 0) { |
3076 | mip_height = 1; |
3077 | } |
3078 | if ((SG_IMAGETYPE_2D == img->type) || (SG_IMAGETYPE_CUBE == img->type)) { |
3079 | if (is_compressed) { |
3080 | glCompressedTexImage2D(gl_img_target, mip_index, gl_internal_format, |
3081 | mip_width, mip_height, 0, data_size, data_ptr); |
3082 | } |
3083 | else { |
3084 | const GLenum gl_type = _sg_gl_teximage_type(img->pixel_format); |
3085 | glTexImage2D(gl_img_target, mip_index, gl_internal_format, |
3086 | mip_width, mip_height, 0, gl_format, gl_type, data_ptr); |
3087 | } |
3088 | } |
3089 | #if !defined(SOKOL_GLES2) |
3090 | else if (!_sg_gl_gles2 && ((SG_IMAGETYPE_3D == img->type) || (SG_IMAGETYPE_ARRAY == img->type))) { |
3091 | int mip_depth = img->depth; |
3092 | if (SG_IMAGETYPE_3D == img->type) { |
3093 | mip_depth >>= mip_index; |
3094 | } |
3095 | if (mip_depth == 0) { |
3096 | mip_depth = 1; |
3097 | } |
3098 | if (is_compressed) { |
3099 | glCompressedTexImage3D(gl_img_target, mip_index, gl_internal_format, |
3100 | mip_width, mip_height, mip_depth, 0, data_size, data_ptr); |
3101 | } |
3102 | else { |
3103 | const GLenum gl_type = _sg_gl_teximage_type(img->pixel_format); |
3104 | glTexImage3D(gl_img_target, mip_index, gl_internal_format, |
3105 | mip_width, mip_height, mip_depth, 0, gl_format, gl_type, data_ptr); |
3106 | } |
3107 | } |
3108 | #endif |
3109 | } |
3110 | } |
3111 | } |
3112 | } |
3113 | } |
3114 | _SG_GL_CHECK_ERROR(); |
3115 | img->slot.state = SG_RESOURCESTATE_VALID; |
3116 | } |
3117 | |
3118 | _SOKOL_PRIVATE void _sg_destroy_image(_sg_image* img) { |
3119 | SOKOL_ASSERT(img); |
3120 | _SG_GL_CHECK_ERROR(); |
3121 | if (!img->ext_textures) { |
3122 | for (int slot = 0; slot < img->num_slots; slot++) { |
3123 | if (img->gl_tex[slot]) { |
3124 | glDeleteTextures(1, &img->gl_tex[slot]); |
3125 | } |
3126 | } |
3127 | } |
3128 | if (img->gl_depth_render_buffer) { |
3129 | glDeleteRenderbuffers(1, &img->gl_depth_render_buffer); |
3130 | } |
3131 | if (img->gl_msaa_render_buffer) { |
3132 | glDeleteRenderbuffers(1, &img->gl_msaa_render_buffer); |
3133 | } |
3134 | _SG_GL_CHECK_ERROR(); |
3135 | _sg_init_image_slot(img); |
3136 | } |
3137 | |
3138 | _SOKOL_PRIVATE GLuint _sg_gl_compile_shader(sg_shader_stage stage, const char* src) { |
3139 | SOKOL_ASSERT(src); |
3140 | _SG_GL_CHECK_ERROR(); |
3141 | GLuint gl_shd = glCreateShader(_sg_gl_shader_stage(stage)); |
3142 | glShaderSource(gl_shd, 1, &src, 0); |
3143 | glCompileShader(gl_shd); |
3144 | GLint compile_status = 0; |
3145 | glGetShaderiv(gl_shd, GL_COMPILE_STATUS, &compile_status); |
3146 | if (!compile_status) { |
3147 | /* compilation failed, log error and delete shader */ |
3148 | GLint log_len = 0; |
3149 | glGetShaderiv(gl_shd, GL_INFO_LOG_LENGTH, &log_len); |
3150 | if (log_len > 0) { |
3151 | GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len); |
3152 | glGetShaderInfoLog(gl_shd, log_len, &log_len, log_buf); |
3153 | SOKOL_LOG(log_buf); |
3154 | SOKOL_FREE(log_buf); |
3155 | } |
3156 | glDeleteShader(gl_shd); |
3157 | gl_shd = 0; |
3158 | } |
3159 | _SG_GL_CHECK_ERROR(); |
3160 | return gl_shd; |
3161 | } |
3162 | |
3163 | _SOKOL_PRIVATE void _sg_create_shader(_sg_shader* shd, const sg_shader_desc* desc) { |
3164 | SOKOL_ASSERT(shd && desc); |
3165 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_ALLOC); |
3166 | SOKOL_ASSERT(!shd->gl_prog); |
3167 | _SG_GL_CHECK_ERROR(); |
3168 | GLuint gl_vs = _sg_gl_compile_shader(SG_SHADERSTAGE_VS, desc->vs.source); |
3169 | GLuint gl_fs = _sg_gl_compile_shader(SG_SHADERSTAGE_FS, desc->fs.source); |
3170 | if (!(gl_vs && gl_fs)) { |
3171 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
3172 | return; |
3173 | } |
3174 | GLuint gl_prog = glCreateProgram(); |
3175 | glAttachShader(gl_prog, gl_vs); |
3176 | glAttachShader(gl_prog, gl_fs); |
3177 | glLinkProgram(gl_prog); |
3178 | glDeleteShader(gl_vs); |
3179 | glDeleteShader(gl_fs); |
3180 | _SG_GL_CHECK_ERROR(); |
3181 | |
3182 | GLint link_status; |
3183 | glGetProgramiv(gl_prog, GL_LINK_STATUS, &link_status); |
3184 | if (!link_status) { |
3185 | GLint log_len = 0; |
3186 | glGetProgramiv(gl_prog, GL_INFO_LOG_LENGTH, &log_len); |
3187 | if (log_len > 0) { |
3188 | GLchar* log_buf = (GLchar*) SOKOL_MALLOC(log_len); |
3189 | glGetProgramInfoLog(gl_prog, log_len, &log_len, log_buf); |
3190 | SOKOL_LOG(log_buf); |
3191 | SOKOL_FREE(log_buf); |
3192 | } |
3193 | glDeleteProgram(gl_prog); |
3194 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
3195 | return; |
3196 | } |
3197 | shd->gl_prog = gl_prog; |
3198 | |
3199 | /* resolve uniforms */ |
3200 | _SG_GL_CHECK_ERROR(); |
3201 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
3202 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; |
3203 | _sg_shader_stage* stage = &shd->stage[stage_index]; |
3204 | SOKOL_ASSERT(stage->num_uniform_blocks == 0); |
3205 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
3206 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
3207 | if (0 == ub_desc->size) { |
3208 | break; |
3209 | } |
3210 | _sg_uniform_block* ub = &stage->uniform_blocks[ub_index]; |
3211 | ub->size = ub_desc->size; |
3212 | SOKOL_ASSERT(ub->num_uniforms == 0); |
3213 | int cur_uniform_offset = 0; |
3214 | for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { |
3215 | const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; |
3216 | if (u_desc->type == SG_UNIFORMTYPE_INVALID) { |
3217 | break; |
3218 | } |
3219 | _sg_uniform* u = &ub->uniforms[u_index]; |
3220 | u->type = u_desc->type; |
3221 | u->count = (uint8_t) _sg_def(u_desc->array_count, 1); |
3222 | u->offset = (uint16_t) cur_uniform_offset; |
3223 | cur_uniform_offset += _sg_uniform_size(u->type, u->count); |
3224 | if (u_desc->name) { |
3225 | u->gl_loc = glGetUniformLocation(gl_prog, u_desc->name); |
3226 | } |
3227 | else { |
3228 | u->gl_loc = u_index; |
3229 | } |
3230 | ub->num_uniforms++; |
3231 | } |
3232 | SOKOL_ASSERT(ub_desc->size == cur_uniform_offset); |
3233 | stage->num_uniform_blocks++; |
3234 | } |
3235 | } |
3236 | |
3237 | /* resolve image locations */ |
3238 | _SG_GL_CHECK_ERROR(); |
3239 | int gl_tex_slot = 0; |
3240 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
3241 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS)? &desc->vs : &desc->fs; |
3242 | _sg_shader_stage* stage = &shd->stage[stage_index]; |
3243 | SOKOL_ASSERT(stage->num_images == 0); |
3244 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
3245 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
3246 | if (img_desc->type == _SG_IMAGETYPE_DEFAULT) { |
3247 | break; |
3248 | } |
3249 | _sg_shader_image* img = &stage->images[img_index]; |
3250 | img->type = img_desc->type; |
3251 | img->gl_loc = img_index; |
3252 | if (img_desc->name) { |
3253 | img->gl_loc = glGetUniformLocation(gl_prog, img_desc->name); |
3254 | } |
3255 | if (img->gl_loc != -1) { |
3256 | img->gl_tex_slot = gl_tex_slot++; |
3257 | } |
3258 | else { |
3259 | img->gl_tex_slot = -1; |
3260 | } |
3261 | stage->num_images++; |
3262 | } |
3263 | } |
3264 | _SG_GL_CHECK_ERROR(); |
3265 | shd->slot.state = SG_RESOURCESTATE_VALID; |
3266 | } |
3267 | |
3268 | _SOKOL_PRIVATE void _sg_destroy_shader(_sg_shader* shd) { |
3269 | SOKOL_ASSERT(shd); |
3270 | _SG_GL_CHECK_ERROR(); |
3271 | if (shd->gl_prog) { |
3272 | glDeleteProgram(shd->gl_prog); |
3273 | } |
3274 | _SG_GL_CHECK_ERROR(); |
3275 | _sg_init_shader_slot(shd); |
3276 | } |
3277 | |
3278 | _SOKOL_PRIVATE void _sg_gl_load_stencil(const sg_stencil_state* src, sg_stencil_state* dst) { |
3279 | dst->fail_op = _sg_def(src->fail_op, SG_STENCILOP_KEEP); |
3280 | dst->depth_fail_op = _sg_def(src->depth_fail_op, SG_STENCILOP_KEEP); |
3281 | dst->pass_op = _sg_def(src->pass_op, SG_STENCILOP_KEEP); |
3282 | dst->compare_func = _sg_def(src->compare_func, SG_COMPAREFUNC_ALWAYS); |
3283 | } |
3284 | |
3285 | _SOKOL_PRIVATE void _sg_gl_load_depth_stencil(const sg_depth_stencil_state* src, sg_depth_stencil_state* dst) { |
3286 | _sg_gl_load_stencil(&src->stencil_front, &dst->stencil_front); |
3287 | _sg_gl_load_stencil(&src->stencil_back, &dst->stencil_back); |
3288 | dst->depth_compare_func = _sg_def(src->depth_compare_func, SG_COMPAREFUNC_ALWAYS); |
3289 | dst->depth_write_enabled = src->depth_write_enabled; |
3290 | dst->stencil_enabled = src->stencil_enabled; |
3291 | dst->stencil_read_mask = src->stencil_read_mask; |
3292 | dst->stencil_write_mask = src->stencil_write_mask; |
3293 | dst->stencil_ref = src->stencil_ref; |
3294 | } |
3295 | |
3296 | _SOKOL_PRIVATE void _sg_gl_load_blend(const sg_blend_state* src, sg_blend_state* dst) { |
3297 | dst->enabled = src->enabled; |
3298 | dst->src_factor_rgb = _sg_def(src->src_factor_rgb, SG_BLENDFACTOR_ONE); |
3299 | dst->dst_factor_rgb = _sg_def(src->dst_factor_rgb, SG_BLENDFACTOR_ZERO); |
3300 | dst->op_rgb = _sg_def(src->op_rgb, SG_BLENDOP_ADD); |
3301 | dst->src_factor_alpha = _sg_def(src->src_factor_alpha, SG_BLENDFACTOR_ONE); |
3302 | dst->dst_factor_alpha = _sg_def(src->dst_factor_alpha, SG_BLENDFACTOR_ZERO); |
3303 | dst->op_alpha = _sg_def(src->op_alpha, SG_BLENDOP_ADD); |
3304 | if (src->color_write_mask == SG_COLORMASK_NONE) { |
3305 | dst->color_write_mask = 0; |
3306 | } |
3307 | else { |
3308 | dst->color_write_mask = (uint8_t) _sg_def((sg_color_mask)src->color_write_mask, SG_COLORMASK_RGBA); |
3309 | } |
3310 | for (int i = 0; i < 4; i++) { |
3311 | dst->blend_color[i] = src->blend_color[i]; |
3312 | } |
3313 | } |
3314 | |
3315 | _SOKOL_PRIVATE void _sg_gl_load_rasterizer(const sg_rasterizer_state* src, sg_rasterizer_state* dst) { |
3316 | dst->alpha_to_coverage_enabled = src->alpha_to_coverage_enabled; |
3317 | dst->cull_mode = _sg_def(src->cull_mode, SG_CULLMODE_NONE); |
3318 | dst->face_winding = _sg_def(src->face_winding, SG_FACEWINDING_CW); |
3319 | dst->sample_count = _sg_def(src->sample_count, 1); |
3320 | dst->depth_bias = src->depth_bias; |
3321 | dst->depth_bias_slope_scale = src->depth_bias_slope_scale; |
3322 | dst->depth_bias_clamp = src->depth_bias_clamp; |
3323 | } |
3324 | |
3325 | _SOKOL_PRIVATE void _sg_create_pipeline(_sg_pipeline* pip, _sg_shader* shd, const sg_pipeline_desc* desc) { |
3326 | SOKOL_ASSERT(pip && shd && desc); |
3327 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_ALLOC); |
3328 | SOKOL_ASSERT(!pip->shader && pip->shader_id.id == SG_INVALID_ID); |
3329 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
3330 | SOKOL_ASSERT(shd->gl_prog); |
3331 | pip->shader = shd; |
3332 | pip->shader_id = desc->shader; |
3333 | pip->primitive_type = _sg_def(desc->primitive_type, SG_PRIMITIVETYPE_TRIANGLES); |
3334 | pip->index_type = _sg_def(desc->index_type, SG_INDEXTYPE_NONE); |
3335 | pip->color_attachment_count = _sg_def(desc->blend.color_attachment_count, 1); |
3336 | pip->color_format = _sg_def(desc->blend.color_format, SG_PIXELFORMAT_RGBA8); |
3337 | pip->depth_format = _sg_def(desc->blend.depth_format, SG_PIXELFORMAT_DEPTHSTENCIL); |
3338 | pip->sample_count = _sg_def(desc->rasterizer.sample_count, 1); |
3339 | _sg_gl_load_depth_stencil(&desc->depth_stencil, &pip->depth_stencil); |
3340 | _sg_gl_load_blend(&desc->blend, &pip->blend); |
3341 | _sg_gl_load_rasterizer(&desc->rasterizer, &pip->rast); |
3342 | |
3343 | /* resolve vertex attributes */ |
3344 | int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS]; |
3345 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
3346 | auto_offset[layout_index] = 0; |
3347 | } |
3348 | bool use_auto_offset = true; |
3349 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
3350 | pip->gl_attrs[attr_index].vb_index = -1; |
3351 | /* to use computed offsets, *all* attr offsets must be 0 */ |
3352 | if (desc->layout.attrs[attr_index].offset != 0) { |
3353 | use_auto_offset = false; |
3354 | } |
3355 | } |
3356 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
3357 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
3358 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
3359 | break; |
3360 | } |
3361 | SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); |
3362 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; |
3363 | const sg_vertex_step step_func = _sg_def(l_desc->step_func, SG_VERTEXSTEP_PER_VERTEX); |
3364 | const int step_rate = _sg_def(l_desc->step_rate, 1); |
3365 | GLint attr_loc = attr_index; |
3366 | if (a_desc->name) { |
3367 | attr_loc = glGetAttribLocation(pip->shader->gl_prog, a_desc->name); |
3368 | } |
3369 | SOKOL_ASSERT(attr_loc < SG_MAX_VERTEX_ATTRIBUTES); |
3370 | if (attr_loc != -1) { |
3371 | _sg_gl_attr* gl_attr = &pip->gl_attrs[attr_loc]; |
3372 | SOKOL_ASSERT(gl_attr->vb_index == -1); |
3373 | gl_attr->vb_index = (int8_t) a_desc->buffer_index; |
3374 | if (step_func == SG_VERTEXSTEP_PER_VERTEX) { |
3375 | gl_attr->divisor = 0; |
3376 | } |
3377 | else { |
3378 | gl_attr->divisor = (int8_t) step_rate; |
3379 | } |
3380 | gl_attr->stride = (uint8_t) l_desc->stride; |
3381 | gl_attr->offset = use_auto_offset ? auto_offset[a_desc->buffer_index] : a_desc->offset; |
3382 | gl_attr->size = (uint8_t) _sg_gl_vertexformat_size(a_desc->format); |
3383 | gl_attr->type = _sg_gl_vertexformat_type(a_desc->format); |
3384 | gl_attr->normalized = _sg_gl_vertexformat_normalized(a_desc->format); |
3385 | pip->vertex_layout_valid[a_desc->buffer_index] = true; |
3386 | } |
3387 | else { |
3388 | SOKOL_LOG("Vertex attribute not found in shader: " ); |
3389 | SOKOL_LOG(a_desc->name); |
3390 | } |
3391 | auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format); |
3392 | } |
3393 | /* fill computed vertex strides that haven't been explicitely provided */ |
3394 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
3395 | _sg_gl_attr* gl_attr = &pip->gl_attrs[attr_index]; |
3396 | if ((gl_attr->vb_index != -1) && (0 == gl_attr->stride)) { |
3397 | gl_attr->stride = (uint8_t) auto_offset[gl_attr->vb_index]; |
3398 | } |
3399 | } |
3400 | pip->slot.state = SG_RESOURCESTATE_VALID; |
3401 | } |
3402 | |
3403 | _SOKOL_PRIVATE void _sg_destroy_pipeline(_sg_pipeline* pip) { |
3404 | SOKOL_ASSERT(pip); |
3405 | _sg_init_pipeline_slot(pip); |
3406 | } |
3407 | |
3408 | /* |
3409 | _sg_create_pass |
3410 | |
3411 | att_imgs must point to a _sg_image* att_imgs[SG_MAX_COLOR_ATTACHMENTS+1] array, |
3412 | first entries are the color attachment images (or nullptr), last entry |
3413 | is the depth-stencil image (or nullptr). |
3414 | */ |
3415 | _SOKOL_PRIVATE void _sg_create_pass(_sg_pass* pass, _sg_image** att_images, const sg_pass_desc* desc) { |
3416 | SOKOL_ASSERT(pass && att_images && desc); |
3417 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_ALLOC); |
3418 | SOKOL_ASSERT(att_images && att_images[0]); |
3419 | _SG_GL_CHECK_ERROR(); |
3420 | |
3421 | /* copy image pointers and desc attributes */ |
3422 | const sg_attachment_desc* att_desc; |
3423 | _sg_attachment* att; |
3424 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3425 | SOKOL_ASSERT(0 == pass->color_atts[i].image); |
3426 | att_desc = &desc->color_attachments[i]; |
3427 | if (att_desc->image.id != SG_INVALID_ID) { |
3428 | pass->num_color_atts++; |
3429 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
3430 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->pixel_format)); |
3431 | att = &pass->color_atts[i]; |
3432 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
3433 | att->image = att_images[i]; |
3434 | att->image_id = att_desc->image; |
3435 | att->mip_level = att_desc->mip_level; |
3436 | att->slice = att_desc->slice; |
3437 | } |
3438 | } |
3439 | SOKOL_ASSERT(0 == pass->ds_att.image); |
3440 | att_desc = &desc->depth_stencil_attachment; |
3441 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
3442 | if (att_desc->image.id != SG_INVALID_ID) { |
3443 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
3444 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->pixel_format)); |
3445 | att = &pass->ds_att; |
3446 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
3447 | att->image = att_images[ds_img_index]; |
3448 | att->image_id = att_desc->image; |
3449 | att->mip_level = att_desc->mip_level; |
3450 | att->slice = att_desc->slice; |
3451 | } |
3452 | |
3453 | /* store current framebuffer binding (restored at end of function) */ |
3454 | GLuint gl_orig_fb; |
3455 | glGetIntegerv(GL_FRAMEBUFFER_BINDING, (GLint*)&gl_orig_fb); |
3456 | |
3457 | /* create a framebuffer object */ |
3458 | glGenFramebuffers(1, &pass->gl_fb); |
3459 | glBindFramebuffer(GL_FRAMEBUFFER, pass->gl_fb); |
3460 | |
3461 | /* attach msaa render buffer or textures */ |
3462 | const bool is_msaa = (0 != att_images[0]->gl_msaa_render_buffer); |
3463 | if (is_msaa) { |
3464 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3465 | const _sg_image* att_img = pass->color_atts[i].image; |
3466 | if (att_img) { |
3467 | const GLuint gl_render_buffer = att_img->gl_msaa_render_buffer; |
3468 | SOKOL_ASSERT(gl_render_buffer); |
3469 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0+i, GL_RENDERBUFFER, gl_render_buffer); |
3470 | } |
3471 | } |
3472 | } |
3473 | else { |
3474 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3475 | const _sg_image* att_img = pass->color_atts[i].image; |
3476 | const int mip_level = pass->color_atts[i].mip_level; |
3477 | const int slice = pass->color_atts[i].slice; |
3478 | if (att_img) { |
3479 | const GLuint gl_tex = att_img->gl_tex[0]; |
3480 | SOKOL_ASSERT(gl_tex); |
3481 | const GLenum gl_att = GL_COLOR_ATTACHMENT0 + i; |
3482 | switch (att_img->type) { |
3483 | case SG_IMAGETYPE_2D: |
3484 | glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, GL_TEXTURE_2D, gl_tex, mip_level); |
3485 | break; |
3486 | case SG_IMAGETYPE_CUBE: |
3487 | glFramebufferTexture2D(GL_FRAMEBUFFER, gl_att, _sg_gl_cubeface_target(slice), gl_tex, mip_level); |
3488 | break; |
3489 | default: |
3490 | /* 3D- or array-texture */ |
3491 | #if !defined(SOKOL_GLES2) |
3492 | if (!_sg_gl_gles2) { |
3493 | glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_att, gl_tex, mip_level, slice); |
3494 | } |
3495 | #endif |
3496 | break; |
3497 | } |
3498 | } |
3499 | } |
3500 | } |
3501 | |
3502 | /* attach depth-stencil buffer to framebuffer */ |
3503 | if (pass->ds_att.image) { |
3504 | const GLuint gl_render_buffer = pass->ds_att.image->gl_depth_render_buffer; |
3505 | SOKOL_ASSERT(gl_render_buffer); |
3506 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); |
3507 | if (_sg_is_depth_stencil_format(pass->ds_att.image->pixel_format)) { |
3508 | glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, gl_render_buffer); |
3509 | } |
3510 | } |
3511 | |
3512 | /* check if framebuffer is complete */ |
3513 | if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { |
3514 | SOKOL_LOG("Framebuffer completeness check failed!\n" ); |
3515 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
3516 | return; |
3517 | } |
3518 | |
3519 | /* create MSAA resolve framebuffers if necessary */ |
3520 | if (is_msaa) { |
3521 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3522 | att = &pass->color_atts[i]; |
3523 | if (att->image) { |
3524 | SOKOL_ASSERT(0 == att->gl_msaa_resolve_buffer); |
3525 | glGenFramebuffers(1, &att->gl_msaa_resolve_buffer); |
3526 | glBindFramebuffer(GL_FRAMEBUFFER, att->gl_msaa_resolve_buffer); |
3527 | const GLuint gl_tex = att->image->gl_tex[0]; |
3528 | SOKOL_ASSERT(gl_tex); |
3529 | switch (att->image->type) { |
3530 | case SG_IMAGETYPE_2D: |
3531 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, |
3532 | GL_TEXTURE_2D, gl_tex, att->mip_level); |
3533 | break; |
3534 | case SG_IMAGETYPE_CUBE: |
3535 | glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, |
3536 | _sg_gl_cubeface_target(att->slice), gl_tex, att->mip_level); |
3537 | break; |
3538 | default: |
3539 | #if !defined(SOKOL_GLES2) |
3540 | if (!_sg_gl_gles2) { |
3541 | glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, gl_tex, att->mip_level, att->slice); |
3542 | } |
3543 | #endif |
3544 | break; |
3545 | } |
3546 | /* check if framebuffer is complete */ |
3547 | if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { |
3548 | SOKOL_LOG("Framebuffer completeness check failed (msaa resolve buffer)!\n" ); |
3549 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
3550 | return; |
3551 | } |
3552 | } |
3553 | } |
3554 | } |
3555 | |
3556 | /* restore original framebuffer binding */ |
3557 | glBindFramebuffer(GL_FRAMEBUFFER, gl_orig_fb); |
3558 | _SG_GL_CHECK_ERROR(); |
3559 | pass->slot.state = SG_RESOURCESTATE_VALID; |
3560 | } |
3561 | |
3562 | _SOKOL_PRIVATE void _sg_destroy_pass(_sg_pass* pass) { |
3563 | SOKOL_ASSERT(pass); |
3564 | _SG_GL_CHECK_ERROR(); |
3565 | if (0 != pass->gl_fb) { |
3566 | glDeleteFramebuffers(1, &pass->gl_fb); |
3567 | } |
3568 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3569 | if (pass->color_atts[i].gl_msaa_resolve_buffer) { |
3570 | glDeleteFramebuffers(1, &pass->color_atts[i].gl_msaa_resolve_buffer); |
3571 | } |
3572 | } |
3573 | if (pass->ds_att.gl_msaa_resolve_buffer) { |
3574 | glDeleteFramebuffers(1, &pass->ds_att.gl_msaa_resolve_buffer); |
3575 | } |
3576 | _SG_GL_CHECK_ERROR(); |
3577 | _sg_init_pass_slot(pass); |
3578 | } |
3579 | |
3580 | /*-- GL backend rendering functions ------------------------------------------*/ |
3581 | _SOKOL_PRIVATE void _sg_begin_pass(_sg_pass* pass, const sg_pass_action* action, int w, int h) { |
3582 | /* FIXME: what if a texture used as render target is still bound, should we |
3583 | unbind all currently bound textures in begin pass? */ |
3584 | SOKOL_ASSERT(action); |
3585 | SOKOL_ASSERT(!_sg_gl.in_pass); |
3586 | _SG_GL_CHECK_ERROR(); |
3587 | _sg_gl.in_pass = true; |
3588 | _sg_gl.cur_pass = pass; /* can be 0 */ |
3589 | if (pass) { |
3590 | _sg_gl.cur_pass_id.id = pass->slot.id; |
3591 | } |
3592 | else { |
3593 | _sg_gl.cur_pass_id.id = SG_INVALID_ID; |
3594 | } |
3595 | _sg_gl.cur_pass_width = w; |
3596 | _sg_gl.cur_pass_height = h; |
3597 | if (pass) { |
3598 | /* offscreen pass */ |
3599 | SOKOL_ASSERT(pass->gl_fb); |
3600 | glBindFramebuffer(GL_FRAMEBUFFER, pass->gl_fb); |
3601 | #if !defined(SOKOL_GLES2) |
3602 | if (!_sg_gl_gles2) { |
3603 | GLenum att[SG_MAX_COLOR_ATTACHMENTS] = { |
3604 | GL_COLOR_ATTACHMENT0, |
3605 | GL_COLOR_ATTACHMENT1, |
3606 | GL_COLOR_ATTACHMENT2, |
3607 | GL_COLOR_ATTACHMENT3 |
3608 | }; |
3609 | int num_attrs = 0; |
3610 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3611 | if (pass->color_atts[num_attrs].image) { |
3612 | num_attrs++; |
3613 | } |
3614 | else { |
3615 | break; |
3616 | } |
3617 | } |
3618 | glDrawBuffers(num_attrs, att); |
3619 | } |
3620 | #endif |
3621 | } |
3622 | else { |
3623 | /* default pass */ |
3624 | SOKOL_ASSERT(_sg_gl.cur_context); |
3625 | glBindFramebuffer(GL_FRAMEBUFFER, _sg_gl.cur_context->default_framebuffer); |
3626 | } |
3627 | glViewport(0, 0, w, h); |
3628 | glScissor(0, 0, w, h); |
3629 | bool need_pip_cache_flush = false; |
3630 | if (_sg_gl.cache.blend.color_write_mask != SG_COLORMASK_RGBA) { |
3631 | need_pip_cache_flush = true; |
3632 | _sg_gl.cache.blend.color_write_mask = SG_COLORMASK_RGBA; |
3633 | glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
3634 | } |
3635 | if (!_sg_gl.cache.ds.depth_write_enabled) { |
3636 | need_pip_cache_flush = true; |
3637 | _sg_gl.cache.ds.depth_write_enabled = true; |
3638 | glDepthMask(GL_TRUE); |
3639 | } |
3640 | if (_sg_gl.cache.ds.depth_compare_func != SG_COMPAREFUNC_ALWAYS) { |
3641 | need_pip_cache_flush = true; |
3642 | _sg_gl.cache.ds.depth_compare_func = SG_COMPAREFUNC_ALWAYS; |
3643 | glDepthFunc(GL_ALWAYS); |
3644 | } |
3645 | if (_sg_gl.cache.ds.stencil_write_mask != 0xFF) { |
3646 | need_pip_cache_flush = true; |
3647 | _sg_gl.cache.ds.stencil_write_mask = 0xFF; |
3648 | glStencilMask(0xFF); |
3649 | } |
3650 | if (need_pip_cache_flush) { |
3651 | /* we messed with the state cache directly, need to clear cached |
3652 | pipeline to force re-evaluation in next sg_apply_draw_state() */ |
3653 | _sg_gl.cache.cur_pipeline = 0; |
3654 | _sg_gl.cache.cur_pipeline_id.id = SG_INVALID_ID; |
3655 | } |
3656 | bool use_mrt_clear = (0 != pass); |
3657 | #if defined(SOKOL_GLES2) |
3658 | use_mrt_clear = false; |
3659 | #else |
3660 | if (_sg_gl_gles2) { |
3661 | use_mrt_clear = false; |
3662 | } |
3663 | #endif |
3664 | if (!use_mrt_clear) { |
3665 | GLbitfield clear_mask = 0; |
3666 | if (action->colors[0].action == SG_ACTION_CLEAR) { |
3667 | clear_mask |= GL_COLOR_BUFFER_BIT; |
3668 | const float* c = action->colors[0].val; |
3669 | glClearColor(c[0], c[1], c[2], c[3]); |
3670 | } |
3671 | if (action->depth.action == SG_ACTION_CLEAR) { |
3672 | clear_mask |= GL_DEPTH_BUFFER_BIT; |
3673 | #ifdef SOKOL_GLCORE33 |
3674 | glClearDepth(action->depth.val); |
3675 | #else |
3676 | glClearDepthf(action->depth.val); |
3677 | #endif |
3678 | } |
3679 | if (action->stencil.action == SG_ACTION_CLEAR) { |
3680 | clear_mask |= GL_STENCIL_BUFFER_BIT; |
3681 | glClearStencil(action->stencil.val); |
3682 | } |
3683 | if (0 != clear_mask) { |
3684 | glClear(clear_mask); |
3685 | } |
3686 | } |
3687 | #if !defined SOKOL_GLES2 |
3688 | else { |
3689 | SOKOL_ASSERT(pass); |
3690 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
3691 | if (pass->color_atts[i].image) { |
3692 | if (action->colors[i].action == SG_ACTION_CLEAR) { |
3693 | glClearBufferfv(GL_COLOR, i, action->colors[i].val); |
3694 | } |
3695 | } |
3696 | else { |
3697 | break; |
3698 | } |
3699 | } |
3700 | if (pass->ds_att.image) { |
3701 | if ((action->depth.action == SG_ACTION_CLEAR) && (action->stencil.action == SG_ACTION_CLEAR)) { |
3702 | glClearBufferfi(GL_DEPTH_STENCIL, 0, action->depth.val, action->stencil.val); |
3703 | } |
3704 | else if (action->depth.action == SG_ACTION_CLEAR) { |
3705 | glClearBufferfv(GL_DEPTH, 0, &action->depth.val); |
3706 | } |
3707 | else if (action->stencil.action == SG_ACTION_CLEAR) { |
3708 | GLuint val = action->stencil.val; |
3709 | glClearBufferuiv(GL_STENCIL, 0, &val); |
3710 | } |
3711 | } |
3712 | } |
3713 | #endif |
3714 | _SG_GL_CHECK_ERROR(); |
3715 | } |
3716 | |
3717 | _SOKOL_PRIVATE void _sg_end_pass() { |
3718 | SOKOL_ASSERT(_sg_gl.in_pass); |
3719 | _SG_GL_CHECK_ERROR(); |
3720 | |
3721 | /* if this was an offscreen pass, and MSAA rendering was used, need |
3722 | to resolve into the pass images */ |
3723 | #if !defined(SOKOL_GLES2) |
3724 | if (!_sg_gl_gles2 && _sg_gl.cur_pass) { |
3725 | /* check if the pass object is still valid */ |
3726 | const _sg_pass* pass = _sg_gl.cur_pass; |
3727 | SOKOL_ASSERT(pass->slot.id == _sg_gl.cur_pass_id.id); |
3728 | bool is_msaa = (0 != _sg_gl.cur_pass->color_atts[0].gl_msaa_resolve_buffer); |
3729 | if (is_msaa) { |
3730 | SOKOL_ASSERT(pass->gl_fb); |
3731 | glBindFramebuffer(GL_READ_FRAMEBUFFER, pass->gl_fb); |
3732 | SOKOL_ASSERT(pass->color_atts[0].image); |
3733 | const int w = pass->color_atts[0].image->width; |
3734 | const int h = pass->color_atts[0].image->height; |
3735 | for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { |
3736 | const _sg_attachment* att = &pass->color_atts[att_index]; |
3737 | if (att->image) { |
3738 | SOKOL_ASSERT(att->gl_msaa_resolve_buffer); |
3739 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, att->gl_msaa_resolve_buffer); |
3740 | glReadBuffer(GL_COLOR_ATTACHMENT0 + att_index); |
3741 | const GLenum gl_att = GL_COLOR_ATTACHMENT0; |
3742 | glDrawBuffers(1, &gl_att); |
3743 | glBlitFramebuffer(0, 0, w, h, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST); |
3744 | } |
3745 | else { |
3746 | break; |
3747 | } |
3748 | } |
3749 | } |
3750 | } |
3751 | #endif |
3752 | _sg_gl.cur_pass = 0; |
3753 | _sg_gl.cur_pass_id.id = SG_INVALID_ID; |
3754 | _sg_gl.cur_pass_width = 0; |
3755 | _sg_gl.cur_pass_height = 0; |
3756 | |
3757 | SOKOL_ASSERT(_sg_gl.cur_context); |
3758 | glBindFramebuffer(GL_FRAMEBUFFER, _sg_gl.cur_context->default_framebuffer); |
3759 | _sg_gl.in_pass = false; |
3760 | _SG_GL_CHECK_ERROR(); |
3761 | } |
3762 | |
3763 | _SOKOL_PRIVATE void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
3764 | SOKOL_ASSERT(_sg_gl.in_pass); |
3765 | y = origin_top_left ? (_sg_gl.cur_pass_height - (y+h)) : y; |
3766 | glViewport(x, y, w, h); |
3767 | } |
3768 | |
3769 | _SOKOL_PRIVATE void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
3770 | SOKOL_ASSERT(_sg_gl.in_pass); |
3771 | y = origin_top_left ? (_sg_gl.cur_pass_height - (y+h)) : y; |
3772 | glScissor(x, y, w, h); |
3773 | } |
3774 | |
3775 | _SOKOL_PRIVATE void _sg_apply_draw_state( |
3776 | _sg_pipeline* pip, |
3777 | _sg_buffer** vbs, const int* vb_offsets, int num_vbs, |
3778 | _sg_buffer* ib, int ib_offset, |
3779 | _sg_image** vs_imgs, int num_vs_imgs, |
3780 | _sg_image** fs_imgs, int num_fs_imgs) |
3781 | { |
3782 | SOKOL_ASSERT(pip); |
3783 | SOKOL_ASSERT(pip->shader); |
3784 | _SOKOL_UNUSED(num_fs_imgs); |
3785 | _SOKOL_UNUSED(num_vs_imgs); |
3786 | _SOKOL_UNUSED(num_vbs); |
3787 | _SG_GL_CHECK_ERROR(); |
3788 | |
3789 | /* need to apply pipeline state? */ |
3790 | if ((_sg_gl.cache.cur_pipeline != pip) || (_sg_gl.cache.cur_pipeline_id.id != pip->slot.id)) { |
3791 | _sg_gl.cache.cur_pipeline = pip; |
3792 | _sg_gl.cache.cur_pipeline_id.id = pip->slot.id; |
3793 | _sg_gl.cache.cur_primitive_type = _sg_gl_primitive_type(pip->primitive_type); |
3794 | _sg_gl.cache.cur_index_type = _sg_gl_index_type(pip->index_type); |
3795 | |
3796 | /* update depth-stencil state */ |
3797 | const sg_depth_stencil_state* new_ds = &pip->depth_stencil; |
3798 | sg_depth_stencil_state* cache_ds = &_sg_gl.cache.ds; |
3799 | if (new_ds->depth_compare_func != cache_ds->depth_compare_func) { |
3800 | cache_ds->depth_compare_func = new_ds->depth_compare_func; |
3801 | glDepthFunc(_sg_gl_compare_func(new_ds->depth_compare_func)); |
3802 | } |
3803 | if (new_ds->depth_write_enabled != cache_ds->depth_write_enabled) { |
3804 | cache_ds->depth_write_enabled = new_ds->depth_write_enabled; |
3805 | glDepthMask(new_ds->depth_write_enabled); |
3806 | } |
3807 | if (new_ds->stencil_enabled != cache_ds->stencil_enabled) { |
3808 | cache_ds->stencil_enabled = new_ds->stencil_enabled; |
3809 | if (new_ds->stencil_enabled) glEnable(GL_STENCIL_TEST); |
3810 | else glDisable(GL_STENCIL_TEST); |
3811 | } |
3812 | if (new_ds->stencil_write_mask != cache_ds->stencil_write_mask) { |
3813 | cache_ds->stencil_write_mask = new_ds->stencil_write_mask; |
3814 | glStencilMask(new_ds->stencil_write_mask); |
3815 | } |
3816 | for (int i = 0; i < 2; i++) { |
3817 | const sg_stencil_state* new_ss = (i==0)? &new_ds->stencil_front : &new_ds->stencil_back; |
3818 | sg_stencil_state* cache_ss = (i==0)? &cache_ds->stencil_front : &cache_ds->stencil_back; |
3819 | GLenum gl_face = (i==0)? GL_FRONT : GL_BACK; |
3820 | if ((new_ss->compare_func != cache_ss->compare_func) || |
3821 | (new_ds->stencil_read_mask != cache_ds->stencil_read_mask) || |
3822 | (new_ds->stencil_ref != cache_ds->stencil_ref)) |
3823 | { |
3824 | cache_ss->compare_func = new_ss->compare_func; |
3825 | glStencilFuncSeparate(gl_face, |
3826 | _sg_gl_compare_func(new_ss->compare_func), |
3827 | new_ds->stencil_ref, |
3828 | new_ds->stencil_read_mask); |
3829 | } |
3830 | if ((new_ss->fail_op != cache_ss->fail_op) || |
3831 | (new_ss->depth_fail_op != cache_ss->depth_fail_op) || |
3832 | (new_ss->pass_op != cache_ss->pass_op)) |
3833 | { |
3834 | cache_ss->fail_op = new_ss->fail_op; |
3835 | cache_ss->depth_fail_op = new_ss->depth_fail_op; |
3836 | cache_ss->pass_op = new_ss->pass_op; |
3837 | glStencilOpSeparate(gl_face, |
3838 | _sg_gl_stencil_op(new_ss->fail_op), |
3839 | _sg_gl_stencil_op(new_ss->depth_fail_op), |
3840 | _sg_gl_stencil_op(new_ss->pass_op)); |
3841 | } |
3842 | } |
3843 | cache_ds->stencil_read_mask = new_ds->stencil_read_mask; |
3844 | cache_ds->stencil_ref = new_ds->stencil_ref; |
3845 | |
3846 | /* update blend state */ |
3847 | const sg_blend_state* new_b = &pip->blend; |
3848 | sg_blend_state* cache_b = &_sg_gl.cache.blend; |
3849 | if (new_b->enabled != cache_b->enabled) { |
3850 | cache_b->enabled = new_b->enabled; |
3851 | if (new_b->enabled) glEnable(GL_BLEND); |
3852 | else glDisable(GL_BLEND); |
3853 | } |
3854 | if ((new_b->src_factor_rgb != cache_b->src_factor_rgb) || |
3855 | (new_b->dst_factor_rgb != cache_b->dst_factor_rgb) || |
3856 | (new_b->src_factor_alpha != cache_b->src_factor_alpha) || |
3857 | (new_b->dst_factor_alpha != cache_b->dst_factor_alpha)) |
3858 | { |
3859 | cache_b->src_factor_rgb = new_b->src_factor_rgb; |
3860 | cache_b->dst_factor_rgb = new_b->dst_factor_rgb; |
3861 | cache_b->src_factor_alpha = new_b->src_factor_alpha; |
3862 | cache_b->dst_factor_alpha = new_b->dst_factor_alpha; |
3863 | glBlendFuncSeparate(_sg_gl_blend_factor(new_b->src_factor_rgb), |
3864 | _sg_gl_blend_factor(new_b->dst_factor_rgb), |
3865 | _sg_gl_blend_factor(new_b->src_factor_alpha), |
3866 | _sg_gl_blend_factor(new_b->dst_factor_alpha)); |
3867 | } |
3868 | if ((new_b->op_rgb != cache_b->op_rgb) || (new_b->op_alpha != cache_b->op_alpha)) { |
3869 | cache_b->op_rgb = new_b->op_rgb; |
3870 | cache_b->op_alpha = new_b->op_alpha; |
3871 | glBlendEquationSeparate(_sg_gl_blend_op(new_b->op_rgb), _sg_gl_blend_op(new_b->op_alpha)); |
3872 | } |
3873 | if (new_b->color_write_mask != cache_b->color_write_mask) { |
3874 | cache_b->color_write_mask = new_b->color_write_mask; |
3875 | glColorMask((new_b->color_write_mask & SG_COLORMASK_R) != 0, |
3876 | (new_b->color_write_mask & SG_COLORMASK_G) != 0, |
3877 | (new_b->color_write_mask & SG_COLORMASK_B) != 0, |
3878 | (new_b->color_write_mask & SG_COLORMASK_A) != 0); |
3879 | } |
3880 | if (!_sg_fequal(new_b->blend_color[0], cache_b->blend_color[0], 0.0001f) || |
3881 | !_sg_fequal(new_b->blend_color[1], cache_b->blend_color[1], 0.0001f) || |
3882 | !_sg_fequal(new_b->blend_color[2], cache_b->blend_color[2], 0.0001f) || |
3883 | !_sg_fequal(new_b->blend_color[3], cache_b->blend_color[3], 0.0001f)) |
3884 | { |
3885 | const float* bc = new_b->blend_color; |
3886 | for (int i=0; i<4; i++) { |
3887 | cache_b->blend_color[i] = bc[i]; |
3888 | } |
3889 | glBlendColor(bc[0], bc[1], bc[2], bc[3]); |
3890 | } |
3891 | |
3892 | /* update rasterizer state */ |
3893 | const sg_rasterizer_state* new_r = &pip->rast; |
3894 | sg_rasterizer_state* cache_r = &_sg_gl.cache.rast; |
3895 | if (new_r->cull_mode != cache_r->cull_mode) { |
3896 | cache_r->cull_mode = new_r->cull_mode; |
3897 | if (SG_CULLMODE_NONE == new_r->cull_mode) { |
3898 | glDisable(GL_CULL_FACE); |
3899 | } |
3900 | else { |
3901 | glEnable(GL_CULL_FACE); |
3902 | GLenum gl_mode = (SG_CULLMODE_FRONT == new_r->cull_mode) ? GL_FRONT : GL_BACK; |
3903 | glCullFace(gl_mode); |
3904 | } |
3905 | } |
3906 | if (new_r->face_winding != cache_r->face_winding) { |
3907 | cache_r->face_winding = new_r->face_winding; |
3908 | GLenum gl_winding = (SG_FACEWINDING_CW == new_r->face_winding) ? GL_CW : GL_CCW; |
3909 | glFrontFace(gl_winding); |
3910 | } |
3911 | if (new_r->alpha_to_coverage_enabled != cache_r->alpha_to_coverage_enabled) { |
3912 | cache_r->alpha_to_coverage_enabled = new_r->alpha_to_coverage_enabled; |
3913 | if (new_r->alpha_to_coverage_enabled) glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
3914 | else glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); |
3915 | } |
3916 | #ifdef SOKOL_GLCORE33 |
3917 | if (new_r->sample_count != cache_r->sample_count) { |
3918 | cache_r->sample_count = new_r->sample_count; |
3919 | if (new_r->sample_count > 1) glEnable(GL_MULTISAMPLE); |
3920 | else glDisable(GL_MULTISAMPLE); |
3921 | } |
3922 | #endif |
3923 | if (!_sg_fequal(new_r->depth_bias, cache_r->depth_bias, 0.000001f) || |
3924 | !_sg_fequal(new_r->depth_bias_slope_scale, cache_r->depth_bias_slope_scale, 0.000001f)) |
3925 | { |
3926 | /* according to ANGLE's D3D11 backend: |
3927 | D3D11 SlopeScaledDepthBias ==> GL polygonOffsetFactor |
3928 | D3D11 DepthBias ==> GL polygonOffsetUnits |
3929 | DepthBiasClamp has no meaning on GL |
3930 | */ |
3931 | cache_r->depth_bias = new_r->depth_bias; |
3932 | cache_r->depth_bias_slope_scale = new_r->depth_bias_slope_scale; |
3933 | glPolygonOffset(new_r->depth_bias_slope_scale, new_r->depth_bias); |
3934 | bool po_enabled = true; |
3935 | if (_sg_fequal(new_r->depth_bias, 0.0f, 0.000001f) && |
3936 | _sg_fequal(new_r->depth_bias_slope_scale, 0.0f, 0.000001f)) |
3937 | { |
3938 | po_enabled = false; |
3939 | } |
3940 | if (po_enabled != _sg_gl.cache.polygon_offset_enabled) { |
3941 | _sg_gl.cache.polygon_offset_enabled = po_enabled; |
3942 | if (po_enabled) glEnable(GL_POLYGON_OFFSET_FILL); |
3943 | else glDisable(GL_POLYGON_OFFSET_FILL); |
3944 | } |
3945 | } |
3946 | |
3947 | /* bind shader program */ |
3948 | glUseProgram(pip->shader->gl_prog); |
3949 | } |
3950 | |
3951 | /* bind textures */ |
3952 | _SG_GL_CHECK_ERROR(); |
3953 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
3954 | const _sg_shader_stage* stage = &pip->shader->stage[stage_index]; |
3955 | _sg_image** imgs = (stage_index == SG_SHADERSTAGE_VS)? vs_imgs : fs_imgs; |
3956 | SOKOL_ASSERT(((stage_index == SG_SHADERSTAGE_VS)? num_vs_imgs : num_fs_imgs) == stage->num_images); |
3957 | for (int img_index = 0; img_index < stage->num_images; img_index++) { |
3958 | const _sg_shader_image* shd_img = &stage->images[img_index]; |
3959 | if (shd_img->gl_loc != -1) { |
3960 | _sg_image* img = imgs[img_index]; |
3961 | const GLuint gl_tex = img->gl_tex[img->active_slot]; |
3962 | SOKOL_ASSERT(img && img->gl_target); |
3963 | SOKOL_ASSERT((shd_img->gl_tex_slot != -1) && gl_tex); |
3964 | glUniform1i(shd_img->gl_loc, shd_img->gl_tex_slot); |
3965 | glActiveTexture(GL_TEXTURE0+shd_img->gl_tex_slot); |
3966 | glBindTexture(img->gl_target, gl_tex); |
3967 | } |
3968 | } |
3969 | } |
3970 | _SG_GL_CHECK_ERROR(); |
3971 | |
3972 | /* index buffer (can be 0) */ |
3973 | const GLuint gl_ib = ib ? ib->gl_buf[ib->active_slot] : 0; |
3974 | _sg_gl_bind_buffer(GL_ELEMENT_ARRAY_BUFFER, gl_ib, &_sg_gl.cache); |
3975 | _sg_gl.cache.cur_ib_offset = ib_offset; |
3976 | |
3977 | /* vertex attributes */ |
3978 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
3979 | _sg_gl_attr* attr = &pip->gl_attrs[attr_index]; |
3980 | _sg_gl_cache_attr* cache_attr = &_sg_gl.cache.attrs[attr_index]; |
3981 | bool cache_attr_dirty = false; |
3982 | int vb_offset = 0; |
3983 | GLuint gl_vb = 0; |
3984 | if (attr->vb_index >= 0) { |
3985 | /* attribute is enabled */ |
3986 | SOKOL_ASSERT(attr->vb_index < num_vbs); |
3987 | _sg_buffer* vb = vbs[attr->vb_index]; |
3988 | gl_vb = vb->gl_buf[vb->active_slot]; |
3989 | SOKOL_ASSERT(vb); |
3990 | vb_offset = vb_offsets[attr->vb_index] + attr->offset; |
3991 | if ((gl_vb != cache_attr->gl_vbuf) || |
3992 | (attr->size != cache_attr->gl_attr.size) || |
3993 | (attr->type != cache_attr->gl_attr.type) || |
3994 | (attr->normalized != cache_attr->gl_attr.normalized) || |
3995 | (attr->stride != cache_attr->gl_attr.stride) || |
3996 | (vb_offset != cache_attr->gl_attr.offset) || |
3997 | (cache_attr->gl_attr.divisor != attr->divisor)) |
3998 | { |
3999 | _sg_gl_bind_buffer(GL_ARRAY_BUFFER, gl_vb, &_sg_gl.cache); |
4000 | glVertexAttribPointer(attr_index, attr->size, attr->type, |
4001 | attr->normalized, attr->stride, |
4002 | (const GLvoid*)(GLintptr)vb_offset); |
4003 | #ifdef SOKOL_INSTANCING_ENABLED |
4004 | if (_sg_gl.features[SG_FEATURE_INSTANCING]) { |
4005 | glVertexAttribDivisor(attr_index, attr->divisor); |
4006 | } |
4007 | #endif |
4008 | cache_attr_dirty = true; |
4009 | } |
4010 | if (cache_attr->gl_attr.vb_index == -1) { |
4011 | glEnableVertexAttribArray(attr_index); |
4012 | cache_attr_dirty = true; |
4013 | } |
4014 | } |
4015 | else { |
4016 | /* attribute is disabled */ |
4017 | if (cache_attr->gl_attr.vb_index != -1) { |
4018 | glDisableVertexAttribArray(attr_index); |
4019 | cache_attr_dirty = true; |
4020 | } |
4021 | } |
4022 | if (cache_attr_dirty) { |
4023 | cache_attr->gl_attr = *attr; |
4024 | cache_attr->gl_attr.offset = vb_offset; |
4025 | cache_attr->gl_vbuf = gl_vb; |
4026 | } |
4027 | } |
4028 | _SG_GL_CHECK_ERROR(); |
4029 | } |
4030 | |
4031 | _SOKOL_PRIVATE void _sg_apply_uniform_block(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { |
4032 | _SOKOL_UNUSED(num_bytes); |
4033 | SOKOL_ASSERT(data && (num_bytes > 0)); |
4034 | SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); |
4035 | SOKOL_ASSERT(_sg_gl.cache.cur_pipeline); |
4036 | SOKOL_ASSERT(_sg_gl.cache.cur_pipeline->slot.id == _sg_gl.cache.cur_pipeline_id.id); |
4037 | SOKOL_ASSERT(_sg_gl.cache.cur_pipeline->shader->slot.id == _sg_gl.cache.cur_pipeline->shader_id.id); |
4038 | _sg_shader_stage* stage = &_sg_gl.cache.cur_pipeline->shader->stage[stage_index]; |
4039 | SOKOL_ASSERT(ub_index < stage->num_uniform_blocks); |
4040 | _sg_uniform_block* ub = &stage->uniform_blocks[ub_index]; |
4041 | SOKOL_ASSERT(ub->size == num_bytes); |
4042 | for (int u_index = 0; u_index < ub->num_uniforms; u_index++) { |
4043 | _sg_uniform* u = &ub->uniforms[u_index]; |
4044 | SOKOL_ASSERT(u->type != SG_UNIFORMTYPE_INVALID); |
4045 | if (u->gl_loc == -1) { |
4046 | continue; |
4047 | } |
4048 | GLfloat* ptr = (GLfloat*) (((uint8_t*)data) + u->offset); |
4049 | switch (u->type) { |
4050 | case SG_UNIFORMTYPE_INVALID: |
4051 | break; |
4052 | case SG_UNIFORMTYPE_FLOAT: |
4053 | glUniform1fv(u->gl_loc, u->count, ptr); |
4054 | break; |
4055 | case SG_UNIFORMTYPE_FLOAT2: |
4056 | glUniform2fv(u->gl_loc, u->count, ptr); |
4057 | break; |
4058 | case SG_UNIFORMTYPE_FLOAT3: |
4059 | glUniform3fv(u->gl_loc, u->count, ptr); |
4060 | break; |
4061 | case SG_UNIFORMTYPE_FLOAT4: |
4062 | glUniform4fv(u->gl_loc, u->count, ptr); |
4063 | break; |
4064 | case SG_UNIFORMTYPE_MAT4: |
4065 | glUniformMatrix4fv(u->gl_loc, u->count, GL_FALSE, ptr); |
4066 | break; |
4067 | default: |
4068 | SOKOL_UNREACHABLE; |
4069 | break; |
4070 | } |
4071 | } |
4072 | } |
4073 | |
4074 | _SOKOL_PRIVATE void _sg_draw(int base_element, int num_elements, int num_instances) { |
4075 | const GLenum i_type = _sg_gl.cache.cur_index_type; |
4076 | const GLenum p_type = _sg_gl.cache.cur_primitive_type; |
4077 | if (0 != i_type) { |
4078 | /* indexed rendering */ |
4079 | const int i_size = (i_type == GL_UNSIGNED_SHORT) ? 2 : 4; |
4080 | const int ib_offset = _sg_gl.cache.cur_ib_offset; |
4081 | const GLvoid* indices = (const GLvoid*)(GLintptr)(base_element*i_size+ib_offset); |
4082 | if (num_instances == 1) { |
4083 | glDrawElements(p_type, num_elements, i_type, indices); |
4084 | } |
4085 | else { |
4086 | if (_sg_gl.features[SG_FEATURE_INSTANCING]) { |
4087 | glDrawElementsInstanced(p_type, num_elements, i_type, indices, num_instances); |
4088 | } |
4089 | } |
4090 | } |
4091 | else { |
4092 | /* non-indexed rendering */ |
4093 | if (num_instances == 1) { |
4094 | glDrawArrays(p_type, base_element, num_elements); |
4095 | } |
4096 | else { |
4097 | if (_sg_gl.features[SG_FEATURE_INSTANCING]) { |
4098 | glDrawArraysInstanced(p_type, base_element, num_elements, num_instances); |
4099 | } |
4100 | } |
4101 | } |
4102 | } |
4103 | |
4104 | _SOKOL_PRIVATE void _sg_commit() { |
4105 | SOKOL_ASSERT(!_sg_gl.in_pass); |
4106 | } |
4107 | |
4108 | _SOKOL_PRIVATE void _sg_update_buffer(_sg_buffer* buf, const void* data_ptr, int data_size) { |
4109 | SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); |
4110 | /* only one update per buffer per frame allowed */ |
4111 | if (++buf->active_slot >= buf->num_slots) { |
4112 | buf->active_slot = 0; |
4113 | } |
4114 | GLenum gl_tgt = _sg_gl_buffer_target(buf->type); |
4115 | SOKOL_ASSERT(buf->active_slot < SG_NUM_INFLIGHT_FRAMES); |
4116 | GLuint gl_buf = buf->gl_buf[buf->active_slot]; |
4117 | SOKOL_ASSERT(gl_buf); |
4118 | _SG_GL_CHECK_ERROR(); |
4119 | _sg_gl_bind_buffer(gl_tgt, gl_buf, &_sg_gl.cache); |
4120 | glBufferSubData(gl_tgt, 0, data_size, data_ptr); |
4121 | _SG_GL_CHECK_ERROR(); |
4122 | } |
4123 | |
4124 | _SOKOL_PRIVATE void _sg_append_buffer(_sg_buffer* buf, const void* data_ptr, int data_size, bool new_frame) { |
4125 | SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); |
4126 | if (new_frame) { |
4127 | if (++buf->active_slot >= buf->num_slots) { |
4128 | buf->active_slot = 0; |
4129 | } |
4130 | } |
4131 | GLenum gl_tgt = _sg_gl_buffer_target(buf->type); |
4132 | SOKOL_ASSERT(buf->active_slot < SG_NUM_INFLIGHT_FRAMES); |
4133 | GLuint gl_buf = buf->gl_buf[buf->active_slot]; |
4134 | SOKOL_ASSERT(gl_buf); |
4135 | _SG_GL_CHECK_ERROR(); |
4136 | _sg_gl_bind_buffer(gl_tgt, gl_buf, &_sg_gl.cache); |
4137 | glBufferSubData(gl_tgt, buf->append_pos, data_size, data_ptr); |
4138 | _SG_GL_CHECK_ERROR(); |
4139 | } |
4140 | |
4141 | _SOKOL_PRIVATE void _sg_update_image(_sg_image* img, const sg_image_content* data) { |
4142 | SOKOL_ASSERT(img && data); |
4143 | /* only one update per image per frame allowed */ |
4144 | if (++img->active_slot >= img->num_slots) { |
4145 | img->active_slot = 0; |
4146 | } |
4147 | SOKOL_ASSERT(img->active_slot < SG_NUM_INFLIGHT_FRAMES); |
4148 | SOKOL_ASSERT(0 != img->gl_tex[img->active_slot]); |
4149 | glBindTexture(img->gl_target, img->gl_tex[img->active_slot]); |
4150 | const GLenum gl_img_format = _sg_gl_teximage_format(img->pixel_format); |
4151 | const GLenum gl_img_type = _sg_gl_teximage_type(img->pixel_format); |
4152 | const int num_faces = img->type == SG_IMAGETYPE_CUBE ? 6 : 1; |
4153 | const int num_mips = img->num_mipmaps; |
4154 | for (int face_index = 0; face_index < num_faces; face_index++) { |
4155 | for (int mip_index = 0; mip_index < num_mips; mip_index++) { |
4156 | GLenum gl_img_target = img->gl_target; |
4157 | if (SG_IMAGETYPE_CUBE == img->type) { |
4158 | gl_img_target = _sg_gl_cubeface_target(face_index); |
4159 | } |
4160 | const GLvoid* data_ptr = data->subimage[face_index][mip_index].ptr; |
4161 | int mip_width = img->width >> mip_index; |
4162 | if (mip_width == 0) { |
4163 | mip_width = 1; |
4164 | } |
4165 | int mip_height = img->height >> mip_index; |
4166 | if (mip_height == 0) { |
4167 | mip_height = 1; |
4168 | } |
4169 | if ((SG_IMAGETYPE_2D == img->type) || (SG_IMAGETYPE_CUBE == img->type)) { |
4170 | glTexSubImage2D(gl_img_target, mip_index, |
4171 | 0, 0, |
4172 | mip_width, mip_height, |
4173 | gl_img_format, gl_img_type, |
4174 | data_ptr); |
4175 | } |
4176 | #if !defined(SOKOL_GLES2) |
4177 | else if (!_sg_gl_gles2 && ((SG_IMAGETYPE_3D == img->type) || (SG_IMAGETYPE_ARRAY == img->type))) { |
4178 | int mip_depth = img->depth >> mip_index; |
4179 | if (mip_depth == 0) { |
4180 | mip_depth = 1; |
4181 | } |
4182 | glTexSubImage3D(gl_img_target, mip_index, |
4183 | 0, 0, 0, |
4184 | mip_width, mip_height, mip_depth, |
4185 | gl_img_format, gl_img_type, |
4186 | data_ptr); |
4187 | |
4188 | } |
4189 | #endif |
4190 | } |
4191 | } |
4192 | } |
4193 | |
4194 | /*== D3D11 BACKEND ===========================================================*/ |
4195 | #elif defined(SOKOL_D3D11) |
4196 | |
4197 | #ifndef D3D11_NO_HELPERS |
4198 | #define D3D11_NO_HELPERS |
4199 | #endif |
4200 | #ifndef CINTERFACE |
4201 | #define CINTERFACE |
4202 | #endif |
4203 | #ifndef COBJMACROS |
4204 | #define COBJMACROS |
4205 | #endif |
4206 | #ifndef WIN32_LEAN_AND_MEAN |
4207 | #define WIN32_LEAN_AND_MEAN |
4208 | #endif |
4209 | #include <windows.h> |
4210 | #include <d3d11.h> |
4211 | #if (defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) |
4212 | #pragma comment (lib, "WindowsApp.lib") |
4213 | #else |
4214 | #pragma comment (lib, "user32.lib") |
4215 | #pragma comment (lib, "dxgi.lib") |
4216 | #pragma comment (lib, "d3d11.lib") |
4217 | #pragma comment (lib, "dxguid.lib") |
4218 | #endif |
4219 | #if defined(SOKOL_D3D11_SHADER_COMPILER) |
4220 | #include <d3dcompiler.h> |
4221 | #if !(defined(WINAPI_FAMILY_PARTITION) && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)) |
4222 | #pragma comment (lib, "d3dcompiler.lib") |
4223 | #endif |
4224 | #endif |
4225 | |
4226 | /*-- enum translation functions ----------------------------------------------*/ |
4227 | _SOKOL_PRIVATE D3D11_USAGE _sg_d3d11_usage(sg_usage usg) { |
4228 | switch (usg) { |
4229 | case SG_USAGE_IMMUTABLE: |
4230 | return D3D11_USAGE_IMMUTABLE; |
4231 | case SG_USAGE_DYNAMIC: |
4232 | case SG_USAGE_STREAM: |
4233 | return D3D11_USAGE_DYNAMIC; |
4234 | default: |
4235 | SOKOL_UNREACHABLE; |
4236 | return (D3D11_USAGE) 0; |
4237 | } |
4238 | } |
4239 | |
4240 | _SOKOL_PRIVATE UINT _sg_d3d11_cpu_access_flags(sg_usage usg) { |
4241 | switch (usg) { |
4242 | case SG_USAGE_IMMUTABLE: |
4243 | return 0; |
4244 | case SG_USAGE_DYNAMIC: |
4245 | case SG_USAGE_STREAM: |
4246 | return D3D11_CPU_ACCESS_WRITE; |
4247 | default: |
4248 | SOKOL_UNREACHABLE; |
4249 | return 0; |
4250 | } |
4251 | } |
4252 | |
4253 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_texture_format(sg_pixel_format fmt) { |
4254 | /* |
4255 | NOTE: the following pixel formats are only supported on D3D11.1 |
4256 | (we're running on D3D11.0): |
4257 | DXGI_FORMAT_B4G4R4A4_UNORM |
4258 | DXGI_FORMAT_B5G6R5_UNORM |
4259 | DXGI_FORMAT_B5G5R5A1_UNORM |
4260 | */ |
4261 | switch (fmt) { |
4262 | case SG_PIXELFORMAT_RGBA8: return DXGI_FORMAT_R8G8B8A8_UNORM; |
4263 | case SG_PIXELFORMAT_R10G10B10A2: return DXGI_FORMAT_R10G10B10A2_UNORM; |
4264 | case SG_PIXELFORMAT_RGBA32F: return DXGI_FORMAT_R32G32B32A32_FLOAT; |
4265 | case SG_PIXELFORMAT_RGBA16F: return DXGI_FORMAT_R16G16B16A16_FLOAT; |
4266 | case SG_PIXELFORMAT_R32F: return DXGI_FORMAT_R32_FLOAT; |
4267 | case SG_PIXELFORMAT_R16F: return DXGI_FORMAT_R16_FLOAT; |
4268 | case SG_PIXELFORMAT_L8: return DXGI_FORMAT_R8_UNORM; |
4269 | case SG_PIXELFORMAT_DXT1: return DXGI_FORMAT_BC1_UNORM; |
4270 | case SG_PIXELFORMAT_DXT3: return DXGI_FORMAT_BC2_UNORM; |
4271 | case SG_PIXELFORMAT_DXT5: return DXGI_FORMAT_BC3_UNORM; |
4272 | default: return DXGI_FORMAT_UNKNOWN; |
4273 | }; |
4274 | } |
4275 | |
4276 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_rendertarget_color_format(sg_pixel_format fmt) { |
4277 | switch (fmt) { |
4278 | case SG_PIXELFORMAT_RGBA8: return DXGI_FORMAT_R8G8B8A8_UNORM; |
4279 | case SG_PIXELFORMAT_RGBA32F: return DXGI_FORMAT_R32G32B32A32_FLOAT; |
4280 | case SG_PIXELFORMAT_RGBA16F: return DXGI_FORMAT_R16G16B16A16_FLOAT; |
4281 | case SG_PIXELFORMAT_R32F: return DXGI_FORMAT_R32_FLOAT; |
4282 | case SG_PIXELFORMAT_R16F: return DXGI_FORMAT_R16_FLOAT; |
4283 | case SG_PIXELFORMAT_L8: return DXGI_FORMAT_R8_UNORM; |
4284 | default: return DXGI_FORMAT_UNKNOWN; |
4285 | } |
4286 | } |
4287 | |
4288 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_rendertarget_depth_format(sg_pixel_format fmt) { |
4289 | switch (fmt) { |
4290 | case SG_PIXELFORMAT_DEPTH: return DXGI_FORMAT_D16_UNORM; |
4291 | case SG_PIXELFORMAT_DEPTHSTENCIL: return DXGI_FORMAT_D24_UNORM_S8_UINT; |
4292 | default: return DXGI_FORMAT_UNKNOWN; |
4293 | } |
4294 | } |
4295 | |
4296 | _SOKOL_PRIVATE D3D11_PRIMITIVE_TOPOLOGY _sg_d3d11_primitive_topology(sg_primitive_type prim_type) { |
4297 | switch (prim_type) { |
4298 | case SG_PRIMITIVETYPE_POINTS: return D3D11_PRIMITIVE_TOPOLOGY_POINTLIST; |
4299 | case SG_PRIMITIVETYPE_LINES: return D3D11_PRIMITIVE_TOPOLOGY_LINELIST; |
4300 | case SG_PRIMITIVETYPE_LINE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP; |
4301 | case SG_PRIMITIVETYPE_TRIANGLES: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST; |
4302 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; |
4303 | default: SOKOL_UNREACHABLE; return (D3D11_PRIMITIVE_TOPOLOGY) 0; |
4304 | } |
4305 | } |
4306 | |
4307 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_index_format(sg_index_type index_type) { |
4308 | switch (index_type) { |
4309 | case SG_INDEXTYPE_NONE: return DXGI_FORMAT_UNKNOWN; |
4310 | case SG_INDEXTYPE_UINT16: return DXGI_FORMAT_R16_UINT; |
4311 | case SG_INDEXTYPE_UINT32: return DXGI_FORMAT_R32_UINT; |
4312 | default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; |
4313 | } |
4314 | } |
4315 | |
4316 | _SOKOL_PRIVATE D3D11_FILTER _sg_d3d11_filter(sg_filter min_f, sg_filter mag_f, uint32_t max_anisotropy) { |
4317 | if (max_anisotropy > 1) { |
4318 | return D3D11_FILTER_ANISOTROPIC; |
4319 | } |
4320 | else if (mag_f == SG_FILTER_NEAREST) { |
4321 | switch (min_f) { |
4322 | case SG_FILTER_NEAREST: |
4323 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
4324 | return D3D11_FILTER_MIN_MAG_MIP_POINT; |
4325 | case SG_FILTER_LINEAR: |
4326 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
4327 | return D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT; |
4328 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
4329 | return D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR; |
4330 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
4331 | return D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR; |
4332 | default: |
4333 | SOKOL_UNREACHABLE; break; |
4334 | } |
4335 | } |
4336 | else if (mag_f == SG_FILTER_LINEAR) { |
4337 | switch (min_f) { |
4338 | case SG_FILTER_NEAREST: |
4339 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
4340 | return D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT; |
4341 | case SG_FILTER_LINEAR: |
4342 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
4343 | return D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT; |
4344 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
4345 | return D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR; |
4346 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
4347 | return D3D11_FILTER_MIN_MAG_MIP_LINEAR; |
4348 | default: |
4349 | SOKOL_UNREACHABLE; break; |
4350 | } |
4351 | } |
4352 | /* invalid value for mag filter */ |
4353 | SOKOL_UNREACHABLE; |
4354 | return D3D11_FILTER_MIN_MAG_MIP_POINT; |
4355 | } |
4356 | |
4357 | _SOKOL_PRIVATE D3D11_TEXTURE_ADDRESS_MODE _sg_d3d11_address_mode(sg_wrap m) { |
4358 | switch (m) { |
4359 | case SG_WRAP_REPEAT: return D3D11_TEXTURE_ADDRESS_WRAP; |
4360 | case SG_WRAP_CLAMP_TO_EDGE: return D3D11_TEXTURE_ADDRESS_CLAMP; |
4361 | case SG_WRAP_MIRRORED_REPEAT: return D3D11_TEXTURE_ADDRESS_MIRROR; |
4362 | default: SOKOL_UNREACHABLE; return (D3D11_TEXTURE_ADDRESS_MODE) 0; |
4363 | } |
4364 | } |
4365 | |
4366 | _SOKOL_PRIVATE DXGI_FORMAT _sg_d3d11_vertex_format(sg_vertex_format fmt) { |
4367 | switch (fmt) { |
4368 | case SG_VERTEXFORMAT_FLOAT: return DXGI_FORMAT_R32_FLOAT; |
4369 | case SG_VERTEXFORMAT_FLOAT2: return DXGI_FORMAT_R32G32_FLOAT; |
4370 | case SG_VERTEXFORMAT_FLOAT3: return DXGI_FORMAT_R32G32B32_FLOAT; |
4371 | case SG_VERTEXFORMAT_FLOAT4: return DXGI_FORMAT_R32G32B32A32_FLOAT; |
4372 | case SG_VERTEXFORMAT_BYTE4: return DXGI_FORMAT_R8G8B8A8_SINT; |
4373 | case SG_VERTEXFORMAT_BYTE4N: return DXGI_FORMAT_R8G8B8A8_SNORM; |
4374 | case SG_VERTEXFORMAT_UBYTE4: return DXGI_FORMAT_R8G8B8A8_UINT; |
4375 | case SG_VERTEXFORMAT_UBYTE4N: return DXGI_FORMAT_R8G8B8A8_UNORM; |
4376 | case SG_VERTEXFORMAT_SHORT2: return DXGI_FORMAT_R16G16_SINT; |
4377 | case SG_VERTEXFORMAT_SHORT2N: return DXGI_FORMAT_R16G16_SNORM; |
4378 | case SG_VERTEXFORMAT_SHORT4: return DXGI_FORMAT_R16G16B16A16_SINT; |
4379 | case SG_VERTEXFORMAT_SHORT4N: return DXGI_FORMAT_R16G16B16A16_SNORM; |
4380 | /* FIXME: signed 10-10-10-2 vertex format not supported on d3d11 (only unsigned) */ |
4381 | default: SOKOL_UNREACHABLE; return (DXGI_FORMAT) 0; |
4382 | } |
4383 | } |
4384 | |
4385 | _SOKOL_PRIVATE D3D11_INPUT_CLASSIFICATION _sg_d3d11_input_classification(sg_vertex_step step) { |
4386 | switch (step) { |
4387 | case SG_VERTEXSTEP_PER_VERTEX: return D3D11_INPUT_PER_VERTEX_DATA; |
4388 | case SG_VERTEXSTEP_PER_INSTANCE: return D3D11_INPUT_PER_INSTANCE_DATA; |
4389 | default: SOKOL_UNREACHABLE; return (D3D11_INPUT_CLASSIFICATION) 0; |
4390 | } |
4391 | } |
4392 | |
4393 | _SOKOL_PRIVATE D3D11_CULL_MODE _sg_d3d11_cull_mode(sg_cull_mode m) { |
4394 | switch (m) { |
4395 | case SG_CULLMODE_NONE: return D3D11_CULL_NONE; |
4396 | case SG_CULLMODE_FRONT: return D3D11_CULL_FRONT; |
4397 | case SG_CULLMODE_BACK: return D3D11_CULL_BACK; |
4398 | default: SOKOL_UNREACHABLE; return (D3D11_CULL_MODE) 0; |
4399 | } |
4400 | } |
4401 | |
4402 | _SOKOL_PRIVATE D3D11_COMPARISON_FUNC _sg_d3d11_compare_func(sg_compare_func f) { |
4403 | switch (f) { |
4404 | case SG_COMPAREFUNC_NEVER: return D3D11_COMPARISON_NEVER; |
4405 | case SG_COMPAREFUNC_LESS: return D3D11_COMPARISON_LESS; |
4406 | case SG_COMPAREFUNC_EQUAL: return D3D11_COMPARISON_EQUAL; |
4407 | case SG_COMPAREFUNC_LESS_EQUAL: return D3D11_COMPARISON_LESS_EQUAL; |
4408 | case SG_COMPAREFUNC_GREATER: return D3D11_COMPARISON_GREATER; |
4409 | case SG_COMPAREFUNC_NOT_EQUAL: return D3D11_COMPARISON_NOT_EQUAL; |
4410 | case SG_COMPAREFUNC_GREATER_EQUAL: return D3D11_COMPARISON_GREATER_EQUAL; |
4411 | case SG_COMPAREFUNC_ALWAYS: return D3D11_COMPARISON_ALWAYS; |
4412 | default: SOKOL_UNREACHABLE; return (D3D11_COMPARISON_FUNC) 0; |
4413 | } |
4414 | } |
4415 | |
4416 | _SOKOL_PRIVATE D3D11_STENCIL_OP _sg_d3d11_stencil_op(sg_stencil_op op) { |
4417 | switch (op) { |
4418 | case SG_STENCILOP_KEEP: return D3D11_STENCIL_OP_KEEP; |
4419 | case SG_STENCILOP_ZERO: return D3D11_STENCIL_OP_ZERO; |
4420 | case SG_STENCILOP_REPLACE: return D3D11_STENCIL_OP_REPLACE; |
4421 | case SG_STENCILOP_INCR_CLAMP: return D3D11_STENCIL_OP_INCR_SAT; |
4422 | case SG_STENCILOP_DECR_CLAMP: return D3D11_STENCIL_OP_DECR_SAT; |
4423 | case SG_STENCILOP_INVERT: return D3D11_STENCIL_OP_INVERT; |
4424 | case SG_STENCILOP_INCR_WRAP: return D3D11_STENCIL_OP_INCR; |
4425 | case SG_STENCILOP_DECR_WRAP: return D3D11_STENCIL_OP_DECR; |
4426 | default: SOKOL_UNREACHABLE; return (D3D11_STENCIL_OP) 0; |
4427 | } |
4428 | } |
4429 | |
4430 | _SOKOL_PRIVATE D3D11_BLEND _sg_d3d11_blend_factor(sg_blend_factor f) { |
4431 | switch (f) { |
4432 | case SG_BLENDFACTOR_ZERO: return D3D11_BLEND_ZERO; |
4433 | case SG_BLENDFACTOR_ONE: return D3D11_BLEND_ONE; |
4434 | case SG_BLENDFACTOR_SRC_COLOR: return D3D11_BLEND_SRC_COLOR; |
4435 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return D3D11_BLEND_INV_SRC_COLOR; |
4436 | case SG_BLENDFACTOR_SRC_ALPHA: return D3D11_BLEND_SRC_ALPHA; |
4437 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return D3D11_BLEND_INV_SRC_ALPHA; |
4438 | case SG_BLENDFACTOR_DST_COLOR: return D3D11_BLEND_DEST_COLOR; |
4439 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return D3D11_BLEND_INV_DEST_COLOR; |
4440 | case SG_BLENDFACTOR_DST_ALPHA: return D3D11_BLEND_DEST_ALPHA; |
4441 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return D3D11_BLEND_INV_DEST_ALPHA; |
4442 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return D3D11_BLEND_SRC_ALPHA_SAT; |
4443 | case SG_BLENDFACTOR_BLEND_COLOR: return D3D11_BLEND_BLEND_FACTOR; |
4444 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return D3D11_BLEND_INV_BLEND_FACTOR; |
4445 | case SG_BLENDFACTOR_BLEND_ALPHA: return D3D11_BLEND_BLEND_FACTOR; |
4446 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return D3D11_BLEND_INV_BLEND_FACTOR; |
4447 | default: SOKOL_UNREACHABLE; return (D3D11_BLEND) 0; |
4448 | } |
4449 | } |
4450 | |
4451 | _SOKOL_PRIVATE D3D11_BLEND_OP _sg_d3d11_blend_op(sg_blend_op op) { |
4452 | switch (op) { |
4453 | case SG_BLENDOP_ADD: return D3D11_BLEND_OP_ADD; |
4454 | case SG_BLENDOP_SUBTRACT: return D3D11_BLEND_OP_SUBTRACT; |
4455 | case SG_BLENDOP_REVERSE_SUBTRACT: return D3D11_BLEND_OP_REV_SUBTRACT; |
4456 | default: SOKOL_UNREACHABLE; return (D3D11_BLEND_OP) 0; |
4457 | } |
4458 | } |
4459 | |
4460 | _SOKOL_PRIVATE UINT8 _sg_d3d11_color_write_mask(sg_color_mask m) { |
4461 | UINT8 res = 0; |
4462 | if (m & SG_COLORMASK_R) { |
4463 | res |= D3D11_COLOR_WRITE_ENABLE_RED; |
4464 | } |
4465 | if (m & SG_COLORMASK_G) { |
4466 | res |= D3D11_COLOR_WRITE_ENABLE_GREEN; |
4467 | } |
4468 | if (m & SG_COLORMASK_B) { |
4469 | res |= D3D11_COLOR_WRITE_ENABLE_BLUE; |
4470 | } |
4471 | if (m & SG_COLORMASK_A) { |
4472 | res |= D3D11_COLOR_WRITE_ENABLE_ALPHA; |
4473 | } |
4474 | return res; |
4475 | } |
4476 | |
4477 | /*-- backend resource structures ---------------------------------------------*/ |
4478 | typedef struct { |
4479 | _sg_slot slot; |
4480 | int size; |
4481 | int append_pos; |
4482 | bool append_overflow; |
4483 | sg_buffer_type type; |
4484 | sg_usage usage; |
4485 | uint32_t update_frame_index; |
4486 | uint32_t append_frame_index; |
4487 | ID3D11Buffer* d3d11_buf; |
4488 | } _sg_buffer; |
4489 | |
4490 | _SOKOL_PRIVATE void _sg_init_buffer_slot(_sg_buffer* buf) { |
4491 | SOKOL_ASSERT(buf); |
4492 | memset(buf, 0, sizeof(_sg_buffer)); |
4493 | } |
4494 | |
4495 | typedef struct { |
4496 | _sg_slot slot; |
4497 | sg_image_type type; |
4498 | bool render_target; |
4499 | int width; |
4500 | int height; |
4501 | int depth; |
4502 | int num_mipmaps; |
4503 | sg_usage usage; |
4504 | sg_pixel_format pixel_format; |
4505 | int sample_count; |
4506 | sg_filter min_filter; |
4507 | sg_filter mag_filter; |
4508 | sg_wrap wrap_u; |
4509 | sg_wrap wrap_v; |
4510 | sg_wrap wrap_w; |
4511 | uint32_t max_anisotropy; |
4512 | uint32_t upd_frame_index; |
4513 | DXGI_FORMAT d3d11_format; |
4514 | ID3D11Texture2D* d3d11_tex2d; |
4515 | ID3D11Texture3D* d3d11_tex3d; |
4516 | ID3D11Texture2D* d3d11_texds; |
4517 | ID3D11Texture2D* d3d11_texmsaa; |
4518 | ID3D11ShaderResourceView* d3d11_srv; |
4519 | ID3D11SamplerState* d3d11_smp; |
4520 | } _sg_image; |
4521 | |
4522 | _SOKOL_PRIVATE void _sg_init_image_slot(_sg_image* img) { |
4523 | SOKOL_ASSERT(img); |
4524 | memset(img, 0, sizeof(_sg_image)); |
4525 | } |
4526 | |
4527 | typedef struct { |
4528 | int size; |
4529 | } _sg_uniform_block; |
4530 | |
4531 | typedef struct { |
4532 | sg_image_type type; |
4533 | } _sg_shader_image; |
4534 | |
4535 | typedef struct { |
4536 | int num_uniform_blocks; |
4537 | int num_images; |
4538 | _sg_uniform_block uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
4539 | _sg_shader_image images[SG_MAX_SHADERSTAGE_IMAGES]; |
4540 | ID3D11Buffer* d3d11_cbs[SG_MAX_SHADERSTAGE_UBS]; |
4541 | } _sg_shader_stage; |
4542 | |
4543 | typedef struct { |
4544 | _sg_slot slot; |
4545 | _sg_shader_stage stage[SG_NUM_SHADER_STAGES]; |
4546 | ID3D11VertexShader* d3d11_vs; |
4547 | ID3D11PixelShader* d3d11_fs; |
4548 | void* d3d11_vs_blob; |
4549 | int d3d11_vs_blob_length; |
4550 | } _sg_shader; |
4551 | |
4552 | _SOKOL_PRIVATE void _sg_init_shader_slot(_sg_shader* shd) { |
4553 | SOKOL_ASSERT(shd); |
4554 | memset(shd, 0, sizeof(_sg_shader)); |
4555 | } |
4556 | |
4557 | typedef struct { |
4558 | _sg_slot slot; |
4559 | _sg_shader* shader; |
4560 | sg_shader shader_id; |
4561 | sg_index_type index_type; |
4562 | bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; |
4563 | int color_attachment_count; |
4564 | sg_pixel_format color_format; |
4565 | sg_pixel_format depth_format; |
4566 | int sample_count; |
4567 | float blend_color[4]; |
4568 | UINT d3d11_stencil_ref; |
4569 | UINT d3d11_vb_strides[SG_MAX_SHADERSTAGE_BUFFERS]; |
4570 | D3D_PRIMITIVE_TOPOLOGY d3d11_topology; |
4571 | DXGI_FORMAT d3d11_index_format; |
4572 | ID3D11InputLayout* d3d11_il; |
4573 | ID3D11RasterizerState* d3d11_rs; |
4574 | ID3D11DepthStencilState* d3d11_dss; |
4575 | ID3D11BlendState* d3d11_bs; |
4576 | } _sg_pipeline; |
4577 | |
4578 | _SOKOL_PRIVATE void _sg_init_pipeline_slot(_sg_pipeline* pip) { |
4579 | SOKOL_ASSERT(pip); |
4580 | memset(pip, 0, sizeof(_sg_pipeline)); |
4581 | } |
4582 | |
4583 | typedef struct { |
4584 | _sg_image* image; |
4585 | sg_image image_id; |
4586 | int mip_level; |
4587 | int slice; |
4588 | } _sg_attachment; |
4589 | |
4590 | typedef struct { |
4591 | _sg_slot slot; |
4592 | int num_color_atts; |
4593 | _sg_attachment color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
4594 | _sg_attachment ds_att; |
4595 | ID3D11RenderTargetView* d3d11_rtvs[SG_MAX_COLOR_ATTACHMENTS]; |
4596 | ID3D11DepthStencilView* d3d11_dsv; |
4597 | } _sg_pass; |
4598 | |
4599 | _SOKOL_PRIVATE void _sg_init_pass_slot(_sg_pass* pass) { |
4600 | SOKOL_ASSERT(pass); |
4601 | memset(pass, 0, sizeof(_sg_pass)); |
4602 | } |
4603 | |
4604 | typedef struct { |
4605 | _sg_slot slot; |
4606 | } _sg_context; |
4607 | |
4608 | _SOKOL_PRIVATE void _sg_init_context_slot(_sg_context* context) { |
4609 | SOKOL_ASSERT(context); |
4610 | memset(context, 0, sizeof(_sg_context)); |
4611 | } |
4612 | |
4613 | /*-- main D3D11 backend state and functions ----------------------------------*/ |
4614 | typedef struct { |
4615 | bool valid; |
4616 | ID3D11Device* dev; |
4617 | ID3D11DeviceContext* ctx; |
4618 | const void* (*rtv_cb)(void); |
4619 | const void* (*dsv_cb)(void); |
4620 | bool in_pass; |
4621 | bool use_indexed_draw; |
4622 | int cur_width; |
4623 | int cur_height; |
4624 | int num_rtvs; |
4625 | _sg_pass* cur_pass; |
4626 | sg_pass cur_pass_id; |
4627 | _sg_pipeline* cur_pipeline; |
4628 | sg_pipeline cur_pipeline_id; |
4629 | ID3D11RenderTargetView* cur_rtvs[SG_MAX_COLOR_ATTACHMENTS]; |
4630 | ID3D11DepthStencilView* cur_dsv; |
4631 | /* the following arrays are used for unbinding resources, they will always contain zeroes */ |
4632 | ID3D11RenderTargetView* zero_rtvs[SG_MAX_COLOR_ATTACHMENTS]; |
4633 | ID3D11Buffer* zero_vbs[SG_MAX_SHADERSTAGE_BUFFERS]; |
4634 | UINT zero_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
4635 | UINT zero_vb_strides[SG_MAX_SHADERSTAGE_BUFFERS]; |
4636 | ID3D11Buffer* zero_cbs[SG_MAX_SHADERSTAGE_UBS]; |
4637 | ID3D11ShaderResourceView* zero_srvs[SG_MAX_SHADERSTAGE_IMAGES]; |
4638 | ID3D11SamplerState* zero_smps[SG_MAX_SHADERSTAGE_IMAGES]; |
4639 | /* global subresourcedata array for texture updates */ |
4640 | D3D11_SUBRESOURCE_DATA subres_data[SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS]; |
4641 | } _sg_backend; |
4642 | static _sg_backend _sg_d3d11; |
4643 | |
4644 | _SOKOL_PRIVATE void _sg_setup_backend(const sg_desc* desc) { |
4645 | SOKOL_ASSERT(desc); |
4646 | SOKOL_ASSERT(desc->d3d11_device); |
4647 | SOKOL_ASSERT(desc->d3d11_device_context); |
4648 | SOKOL_ASSERT(desc->d3d11_render_target_view_cb); |
4649 | SOKOL_ASSERT(desc->d3d11_depth_stencil_view_cb); |
4650 | SOKOL_ASSERT(desc->d3d11_render_target_view_cb != desc->d3d11_depth_stencil_view_cb); |
4651 | memset(&_sg_d3d11, 0, sizeof(_sg_d3d11)); |
4652 | _sg_d3d11.valid = true; |
4653 | _sg_d3d11.dev = (ID3D11Device*) desc->d3d11_device; |
4654 | _sg_d3d11.ctx = (ID3D11DeviceContext*) desc->d3d11_device_context; |
4655 | _sg_d3d11.rtv_cb = desc->d3d11_render_target_view_cb; |
4656 | _sg_d3d11.dsv_cb = desc->d3d11_depth_stencil_view_cb; |
4657 | } |
4658 | |
4659 | _SOKOL_PRIVATE void _sg_discard_backend() { |
4660 | SOKOL_ASSERT(_sg_d3d11.valid); |
4661 | memset(&_sg_d3d11, 0, sizeof(_sg_d3d11)); |
4662 | } |
4663 | |
4664 | _SOKOL_PRIVATE bool _sg_query_feature(sg_feature f) { |
4665 | switch (f) { |
4666 | case SG_FEATURE_INSTANCING: |
4667 | case SG_FEATURE_TEXTURE_COMPRESSION_DXT: |
4668 | case SG_FEATURE_TEXTURE_FLOAT: |
4669 | case SG_FEATURE_TEXTURE_HALF_FLOAT: |
4670 | case SG_FEATURE_ORIGIN_TOP_LEFT: |
4671 | case SG_FEATURE_MSAA_RENDER_TARGETS: |
4672 | case SG_FEATURE_MULTIPLE_RENDER_TARGET: |
4673 | case SG_FEATURE_IMAGETYPE_3D: |
4674 | case SG_FEATURE_IMAGETYPE_ARRAY: |
4675 | return true; |
4676 | default: |
4677 | return false; |
4678 | } |
4679 | } |
4680 | |
4681 | _SOKOL_PRIVATE void _sg_d3d11_clear_state() { |
4682 | /* clear all the device context state, so that resource refs don't keep stuck in the d3d device context */ |
4683 | ID3D11DeviceContext_OMSetRenderTargets(_sg_d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg_d3d11.zero_rtvs, NULL); |
4684 | ID3D11DeviceContext_RSSetState(_sg_d3d11.ctx, NULL); |
4685 | ID3D11DeviceContext_OMSetDepthStencilState(_sg_d3d11.ctx, NULL, 0); |
4686 | ID3D11DeviceContext_OMSetBlendState(_sg_d3d11.ctx, NULL, NULL, 0xFFFFFFFF); |
4687 | ID3D11DeviceContext_IASetVertexBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, _sg_d3d11.zero_vbs, _sg_d3d11.zero_vb_strides, _sg_d3d11.zero_vb_offsets); |
4688 | ID3D11DeviceContext_IASetIndexBuffer(_sg_d3d11.ctx, NULL, DXGI_FORMAT_UNKNOWN, 0); |
4689 | ID3D11DeviceContext_IASetInputLayout(_sg_d3d11.ctx, NULL); |
4690 | ID3D11DeviceContext_VSSetShader(_sg_d3d11.ctx, NULL, NULL, 0); |
4691 | ID3D11DeviceContext_PSSetShader(_sg_d3d11.ctx, NULL, NULL, 0); |
4692 | ID3D11DeviceContext_VSSetConstantBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg_d3d11.zero_cbs); |
4693 | ID3D11DeviceContext_PSSetConstantBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, _sg_d3d11.zero_cbs); |
4694 | ID3D11DeviceContext_VSSetShaderResources(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg_d3d11.zero_srvs); |
4695 | ID3D11DeviceContext_PSSetShaderResources(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg_d3d11.zero_srvs); |
4696 | ID3D11DeviceContext_VSSetSamplers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg_d3d11.zero_smps); |
4697 | ID3D11DeviceContext_PSSetSamplers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, _sg_d3d11.zero_smps); |
4698 | } |
4699 | |
4700 | _SOKOL_PRIVATE void _sg_reset_state_cache() { |
4701 | /* just clear the d3d11 device context state */ |
4702 | _sg_d3d11_clear_state(); |
4703 | } |
4704 | |
4705 | _SOKOL_PRIVATE void _sg_activate_context(_sg_context* ctx) { |
4706 | _SOKOL_UNUSED(ctx); |
4707 | _sg_reset_state_cache(); |
4708 | } |
4709 | |
4710 | _SOKOL_PRIVATE void _sg_create_context(_sg_context* ctx) { |
4711 | SOKOL_ASSERT(ctx); |
4712 | SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_ALLOC); |
4713 | ctx->slot.state = SG_RESOURCESTATE_VALID; |
4714 | } |
4715 | |
4716 | _SOKOL_PRIVATE void _sg_destroy_context(_sg_context* ctx) { |
4717 | SOKOL_ASSERT(ctx); |
4718 | _sg_init_context_slot(ctx); |
4719 | } |
4720 | |
4721 | _SOKOL_PRIVATE void _sg_create_buffer(_sg_buffer* buf, const sg_buffer_desc* desc) { |
4722 | SOKOL_ASSERT(buf && desc); |
4723 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_ALLOC); |
4724 | SOKOL_ASSERT(!buf->d3d11_buf); |
4725 | buf->size = desc->size; |
4726 | buf->append_pos = 0; |
4727 | buf->append_overflow = false; |
4728 | buf->type = _sg_def(desc->type, SG_BUFFERTYPE_VERTEXBUFFER); |
4729 | buf->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
4730 | buf->update_frame_index = 0; |
4731 | buf->append_frame_index = 0; |
4732 | const bool injected = (0 != desc->d3d11_buffer); |
4733 | if (injected) { |
4734 | buf->d3d11_buf = (ID3D11Buffer*) desc->d3d11_buffer; |
4735 | ID3D11Buffer_AddRef(buf->d3d11_buf); |
4736 | } |
4737 | else { |
4738 | D3D11_BUFFER_DESC d3d11_desc; |
4739 | memset(&d3d11_desc, 0, sizeof(d3d11_desc)); |
4740 | d3d11_desc.ByteWidth = buf->size; |
4741 | d3d11_desc.Usage = _sg_d3d11_usage(buf->usage); |
4742 | d3d11_desc.BindFlags = buf->type == SG_BUFFERTYPE_VERTEXBUFFER ? D3D11_BIND_VERTEX_BUFFER : D3D11_BIND_INDEX_BUFFER; |
4743 | d3d11_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(buf->usage); |
4744 | D3D11_SUBRESOURCE_DATA* init_data_ptr = 0; |
4745 | D3D11_SUBRESOURCE_DATA init_data; |
4746 | memset(&init_data, 0, sizeof(init_data)); |
4747 | if (buf->usage == SG_USAGE_IMMUTABLE) { |
4748 | SOKOL_ASSERT(desc->content); |
4749 | init_data.pSysMem = desc->content; |
4750 | init_data_ptr = &init_data; |
4751 | } |
4752 | HRESULT hr = ID3D11Device_CreateBuffer(_sg_d3d11.dev, &d3d11_desc, init_data_ptr, &buf->d3d11_buf); |
4753 | _SOKOL_UNUSED(hr); |
4754 | SOKOL_ASSERT(SUCCEEDED(hr) && buf->d3d11_buf); |
4755 | } |
4756 | buf->slot.state = SG_RESOURCESTATE_VALID; |
4757 | } |
4758 | |
4759 | _SOKOL_PRIVATE void _sg_destroy_buffer(_sg_buffer* buf) { |
4760 | SOKOL_ASSERT(buf); |
4761 | if (buf->d3d11_buf) { |
4762 | ID3D11Buffer_Release(buf->d3d11_buf); |
4763 | } |
4764 | _sg_init_buffer_slot(buf); |
4765 | } |
4766 | |
4767 | _SOKOL_PRIVATE void _sg_d3d11_fill_subres_data(const _sg_image* img, const sg_image_content* content) { |
4768 | const int num_faces = (img->type == SG_IMAGETYPE_CUBE) ? 6:1; |
4769 | const int num_slices = (img->type == SG_IMAGETYPE_ARRAY) ? img->depth:1; |
4770 | int subres_index = 0; |
4771 | for (int face_index = 0; face_index < num_faces; face_index++) { |
4772 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
4773 | for (int mip_index = 0; mip_index < img->num_mipmaps; mip_index++, subres_index++) { |
4774 | SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); |
4775 | D3D11_SUBRESOURCE_DATA* subres_data = &_sg_d3d11.subres_data[subres_index]; |
4776 | const int mip_width = ((img->width>>mip_index)>0) ? img->width>>mip_index : 1; |
4777 | const int mip_height = ((img->height>>mip_index)>0) ? img->height>>mip_index : 1; |
4778 | const sg_subimage_content* subimg_content = &(content->subimage[face_index][mip_index]); |
4779 | const int slice_size = subimg_content->size / num_slices; |
4780 | const int slice_offset = slice_size * slice_index; |
4781 | const uint8_t* ptr = (const uint8_t*) subimg_content->ptr; |
4782 | subres_data->pSysMem = ptr + slice_offset; |
4783 | subres_data->SysMemPitch = _sg_row_pitch(img->pixel_format, mip_width); |
4784 | if (img->type == SG_IMAGETYPE_3D) { |
4785 | /* FIXME? const int mip_depth = ((img->depth>>mip_index)>0) ? img->depth>>mip_index : 1; */ |
4786 | subres_data->SysMemSlicePitch = _sg_surface_pitch(img->pixel_format, mip_width, mip_height); |
4787 | } |
4788 | else { |
4789 | subres_data->SysMemSlicePitch = 0; |
4790 | } |
4791 | } |
4792 | } |
4793 | } |
4794 | } |
4795 | |
4796 | _SOKOL_PRIVATE void _sg_create_image(_sg_image* img, const sg_image_desc* desc) { |
4797 | SOKOL_ASSERT(img && desc); |
4798 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_ALLOC); |
4799 | SOKOL_ASSERT(!img->d3d11_tex2d && !img->d3d11_tex3d && !img->d3d11_texds && !img->d3d11_texmsaa); |
4800 | SOKOL_ASSERT(!img->d3d11_srv && !img->d3d11_smp); |
4801 | HRESULT hr; |
4802 | |
4803 | img->type = _sg_def(desc->type, SG_IMAGETYPE_2D); |
4804 | img->render_target = desc->render_target; |
4805 | img->width = desc->width; |
4806 | img->height = desc->height; |
4807 | img->depth = _sg_def(desc->depth, 1); |
4808 | img->num_mipmaps = _sg_def(desc->num_mipmaps, 1); |
4809 | img->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
4810 | img->pixel_format = _sg_def(desc->pixel_format, SG_PIXELFORMAT_RGBA8); |
4811 | img->sample_count = _sg_def(desc->sample_count, 1); |
4812 | img->min_filter = _sg_def(desc->min_filter, SG_FILTER_NEAREST); |
4813 | img->mag_filter = _sg_def(desc->mag_filter, SG_FILTER_NEAREST); |
4814 | img->wrap_u = _sg_def(desc->wrap_u, SG_WRAP_REPEAT); |
4815 | img->wrap_v = _sg_def(desc->wrap_v, SG_WRAP_REPEAT); |
4816 | img->wrap_w = _sg_def(desc->wrap_w, SG_WRAP_REPEAT); |
4817 | img->max_anisotropy = _sg_def(desc->max_anisotropy, 1); |
4818 | img->upd_frame_index = 0; |
4819 | const bool injected = (0 != desc->d3d11_texture); |
4820 | |
4821 | /* special case depth-stencil buffer? */ |
4822 | if (_sg_is_valid_rendertarget_depth_format(img->pixel_format)) { |
4823 | /* create only a depth-texture */ |
4824 | SOKOL_ASSERT(!injected); |
4825 | img->d3d11_format = _sg_d3d11_rendertarget_depth_format(img->pixel_format); |
4826 | if (img->d3d11_format == DXGI_FORMAT_UNKNOWN) { |
4827 | /* trying to create a texture format that's not supported by D3D */ |
4828 | SOKOL_LOG("trying to create a D3D11 depth-texture with unsupported pixel format\n" ); |
4829 | img->slot.state = SG_RESOURCESTATE_FAILED; |
4830 | return; |
4831 | } |
4832 | D3D11_TEXTURE2D_DESC d3d11_desc; |
4833 | memset(&d3d11_desc, 0, sizeof(d3d11_desc)); |
4834 | d3d11_desc.Width = img->width; |
4835 | d3d11_desc.Height = img->height; |
4836 | d3d11_desc.MipLevels = 1; |
4837 | d3d11_desc.ArraySize = 1; |
4838 | d3d11_desc.Format = img->d3d11_format; |
4839 | d3d11_desc.Usage = D3D11_USAGE_DEFAULT; |
4840 | d3d11_desc.BindFlags = D3D11_BIND_DEPTH_STENCIL; |
4841 | d3d11_desc.SampleDesc.Count = img->sample_count; |
4842 | d3d11_desc.SampleDesc.Quality = (img->sample_count > 1) ? D3D11_STANDARD_MULTISAMPLE_PATTERN : 0; |
4843 | hr = ID3D11Device_CreateTexture2D(_sg_d3d11.dev, &d3d11_desc, NULL, &img->d3d11_texds); |
4844 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_texds); |
4845 | } |
4846 | else { |
4847 | /* create (or inject) color texture */ |
4848 | |
4849 | /* prepare initial content pointers */ |
4850 | D3D11_SUBRESOURCE_DATA* init_data = 0; |
4851 | if (!injected && (img->usage == SG_USAGE_IMMUTABLE) && !img->render_target) { |
4852 | _sg_d3d11_fill_subres_data(img, &desc->content); |
4853 | init_data = _sg_d3d11.subres_data; |
4854 | } |
4855 | if (img->type != SG_IMAGETYPE_3D) { |
4856 | /* 2D-, cube- or array-texture */ |
4857 | /* if this is an MSAA render target, the following texture will be the 'resolve-texture' */ |
4858 | D3D11_TEXTURE2D_DESC d3d11_tex_desc; |
4859 | memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); |
4860 | d3d11_tex_desc.Width = img->width; |
4861 | d3d11_tex_desc.Height = img->height; |
4862 | d3d11_tex_desc.MipLevels = img->num_mipmaps; |
4863 | switch (img->type) { |
4864 | case SG_IMAGETYPE_ARRAY: d3d11_tex_desc.ArraySize = img->depth; break; |
4865 | case SG_IMAGETYPE_CUBE: d3d11_tex_desc.ArraySize = 6; break; |
4866 | default: d3d11_tex_desc.ArraySize = 1; break; |
4867 | } |
4868 | d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; |
4869 | if (img->render_target) { |
4870 | img->d3d11_format = _sg_d3d11_rendertarget_color_format(img->pixel_format); |
4871 | d3d11_tex_desc.Format = img->d3d11_format; |
4872 | d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; |
4873 | if (img->sample_count == 1) { |
4874 | d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; |
4875 | } |
4876 | d3d11_tex_desc.CPUAccessFlags = 0; |
4877 | } |
4878 | else { |
4879 | img->d3d11_format = _sg_d3d11_texture_format(img->pixel_format); |
4880 | d3d11_tex_desc.Format = img->d3d11_format; |
4881 | d3d11_tex_desc.Usage = _sg_d3d11_usage(img->usage); |
4882 | d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->usage); |
4883 | } |
4884 | if (img->d3d11_format == DXGI_FORMAT_UNKNOWN) { |
4885 | /* trying to create a texture format that's not supported by D3D */ |
4886 | SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n" ); |
4887 | img->slot.state = SG_RESOURCESTATE_FAILED; |
4888 | return; |
4889 | } |
4890 | d3d11_tex_desc.SampleDesc.Count = 1; |
4891 | d3d11_tex_desc.SampleDesc.Quality = 0; |
4892 | d3d11_tex_desc.MiscFlags = (img->type == SG_IMAGETYPE_CUBE) ? D3D11_RESOURCE_MISC_TEXTURECUBE : 0; |
4893 | if (injected) { |
4894 | img->d3d11_tex2d = (ID3D11Texture2D*) desc->d3d11_texture; |
4895 | ID3D11Texture2D_AddRef(img->d3d11_tex2d); |
4896 | } |
4897 | else { |
4898 | hr = ID3D11Device_CreateTexture2D(_sg_d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11_tex2d); |
4899 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_tex2d); |
4900 | } |
4901 | |
4902 | /* also need to create a separate MSAA render target texture? */ |
4903 | if (img->sample_count > 1) { |
4904 | d3d11_tex_desc.BindFlags |= D3D11_BIND_RENDER_TARGET; |
4905 | d3d11_tex_desc.SampleDesc.Count = img->sample_count; |
4906 | d3d11_tex_desc.SampleDesc.Quality = (UINT)D3D11_STANDARD_MULTISAMPLE_PATTERN; |
4907 | hr = ID3D11Device_CreateTexture2D(_sg_d3d11.dev, &d3d11_tex_desc, NULL, &img->d3d11_texmsaa); |
4908 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_texmsaa); |
4909 | } |
4910 | |
4911 | /* shader-resource-view */ |
4912 | D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; |
4913 | memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc)); |
4914 | d3d11_srv_desc.Format = d3d11_tex_desc.Format; |
4915 | switch (img->type) { |
4916 | case SG_IMAGETYPE_2D: |
4917 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; |
4918 | d3d11_srv_desc.Texture2D.MipLevels = img->num_mipmaps; |
4919 | break; |
4920 | case SG_IMAGETYPE_CUBE: |
4921 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURECUBE; |
4922 | d3d11_srv_desc.TextureCube.MipLevels = img->num_mipmaps; |
4923 | break; |
4924 | case SG_IMAGETYPE_ARRAY: |
4925 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2DARRAY; |
4926 | d3d11_srv_desc.Texture2DArray.MipLevels = img->num_mipmaps; |
4927 | d3d11_srv_desc.Texture2DArray.ArraySize = img->depth; |
4928 | break; |
4929 | default: |
4930 | SOKOL_UNREACHABLE; break; |
4931 | } |
4932 | hr = ID3D11Device_CreateShaderResourceView(_sg_d3d11.dev, (ID3D11Resource*)img->d3d11_tex2d, &d3d11_srv_desc, &img->d3d11_srv); |
4933 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_srv); |
4934 | } |
4935 | else { |
4936 | /* 3D texture */ |
4937 | D3D11_TEXTURE3D_DESC d3d11_tex_desc; |
4938 | memset(&d3d11_tex_desc, 0, sizeof(d3d11_tex_desc)); |
4939 | d3d11_tex_desc.Width = img->width; |
4940 | d3d11_tex_desc.Height = img->height; |
4941 | d3d11_tex_desc.Depth = img->depth; |
4942 | d3d11_tex_desc.MipLevels = img->num_mipmaps; |
4943 | if (img->render_target) { |
4944 | img->d3d11_format = _sg_d3d11_rendertarget_color_format(img->pixel_format); |
4945 | d3d11_tex_desc.Format = img->d3d11_format; |
4946 | d3d11_tex_desc.Usage = D3D11_USAGE_DEFAULT; |
4947 | d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE|D3D11_BIND_RENDER_TARGET; |
4948 | d3d11_tex_desc.CPUAccessFlags = 0; |
4949 | } |
4950 | else { |
4951 | img->d3d11_format = _sg_d3d11_texture_format(img->pixel_format); |
4952 | d3d11_tex_desc.Format = img->d3d11_format; |
4953 | d3d11_tex_desc.Usage = _sg_d3d11_usage(img->usage); |
4954 | d3d11_tex_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; |
4955 | d3d11_tex_desc.CPUAccessFlags = _sg_d3d11_cpu_access_flags(img->usage); |
4956 | } |
4957 | if (img->d3d11_format == DXGI_FORMAT_UNKNOWN) { |
4958 | /* trying to create a texture format that's not supported by D3D */ |
4959 | SOKOL_LOG("trying to create a D3D11 texture with unsupported pixel format\n" ); |
4960 | img->slot.state = SG_RESOURCESTATE_FAILED; |
4961 | return; |
4962 | } |
4963 | if (injected) { |
4964 | img->d3d11_tex3d = (ID3D11Texture3D*) desc->d3d11_texture; |
4965 | ID3D11Texture3D_AddRef(img->d3d11_tex3d); |
4966 | } |
4967 | else { |
4968 | hr = ID3D11Device_CreateTexture3D(_sg_d3d11.dev, &d3d11_tex_desc, init_data, &img->d3d11_tex3d); |
4969 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_tex3d); |
4970 | } |
4971 | |
4972 | /* shader resource view for 3d texture */ |
4973 | D3D11_SHADER_RESOURCE_VIEW_DESC d3d11_srv_desc; |
4974 | memset(&d3d11_srv_desc, 0, sizeof(d3d11_srv_desc)); |
4975 | d3d11_srv_desc.Format = d3d11_tex_desc.Format; |
4976 | d3d11_srv_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE3D; |
4977 | d3d11_srv_desc.Texture3D.MipLevels = img->num_mipmaps; |
4978 | hr = ID3D11Device_CreateShaderResourceView(_sg_d3d11.dev, (ID3D11Resource*)img->d3d11_tex3d, &d3d11_srv_desc, &img->d3d11_srv); |
4979 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_srv); |
4980 | } |
4981 | |
4982 | /* sampler state object, note D3D11 implements an internal shared-pool for sampler objects */ |
4983 | D3D11_SAMPLER_DESC d3d11_smp_desc; |
4984 | memset(&d3d11_smp_desc, 0, sizeof(d3d11_smp_desc)); |
4985 | d3d11_smp_desc.Filter = _sg_d3d11_filter(img->min_filter, img->mag_filter, img->max_anisotropy); |
4986 | d3d11_smp_desc.AddressU = _sg_d3d11_address_mode(img->wrap_u); |
4987 | d3d11_smp_desc.AddressV = _sg_d3d11_address_mode(img->wrap_v); |
4988 | d3d11_smp_desc.AddressW = _sg_d3d11_address_mode(img->wrap_w); |
4989 | d3d11_smp_desc.MaxAnisotropy = img->max_anisotropy; |
4990 | d3d11_smp_desc.ComparisonFunc = D3D11_COMPARISON_NEVER; |
4991 | d3d11_smp_desc.MinLOD = desc->min_lod; |
4992 | d3d11_smp_desc.MaxLOD = _sg_def_flt(desc->max_lod, D3D11_FLOAT32_MAX); |
4993 | hr = ID3D11Device_CreateSamplerState(_sg_d3d11.dev, &d3d11_smp_desc, &img->d3d11_smp); |
4994 | SOKOL_ASSERT(SUCCEEDED(hr) && img->d3d11_smp); |
4995 | } |
4996 | img->slot.state = SG_RESOURCESTATE_VALID; |
4997 | } |
4998 | |
4999 | _SOKOL_PRIVATE void _sg_destroy_image(_sg_image* img) { |
5000 | SOKOL_ASSERT(img); |
5001 | if (img->d3d11_tex2d) { |
5002 | ID3D11Texture2D_Release(img->d3d11_tex2d); |
5003 | } |
5004 | if (img->d3d11_tex3d) { |
5005 | ID3D11Texture3D_Release(img->d3d11_tex3d); |
5006 | } |
5007 | if (img->d3d11_texds) { |
5008 | ID3D11Texture2D_Release(img->d3d11_texds); |
5009 | } |
5010 | if (img->d3d11_texmsaa) { |
5011 | ID3D11Texture2D_Release(img->d3d11_texmsaa); |
5012 | } |
5013 | if (img->d3d11_srv) { |
5014 | ID3D11ShaderResourceView_Release(img->d3d11_srv); |
5015 | } |
5016 | if (img->d3d11_smp) { |
5017 | ID3D11SamplerState_Release(img->d3d11_smp); |
5018 | } |
5019 | _sg_init_image_slot(img); |
5020 | } |
5021 | |
5022 | #if defined(SOKOL_D3D11_SHADER_COMPILER) |
5023 | _SOKOL_PRIVATE ID3DBlob* _sg_d3d11_compile_shader(const sg_shader_stage_desc* stage_desc, const char* target) { |
5024 | ID3DBlob* output = NULL; |
5025 | ID3DBlob* errors = NULL; |
5026 | D3DCompile( |
5027 | stage_desc->source, /* pSrcData */ |
5028 | strlen(stage_desc->source), /* SrcDataSize */ |
5029 | NULL, /* pSourceName */ |
5030 | NULL, /* pDefines */ |
5031 | NULL, /* pInclude */ |
5032 | stage_desc->entry ? stage_desc->entry : "main" , /* pEntryPoint */ |
5033 | target, /* pTarget (vs_5_0 or ps_5_0) */ |
5034 | D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR | D3DCOMPILE_OPTIMIZATION_LEVEL3, /* Flags1 */ |
5035 | 0, /* Flags2 */ |
5036 | &output, /* ppCode */ |
5037 | &errors); /* ppErrorMsgs */ |
5038 | if (errors) { |
5039 | SOKOL_LOG((LPCSTR)ID3D10Blob_GetBufferPointer(errors)); |
5040 | ID3D10Blob_Release(errors); errors = NULL; |
5041 | } |
5042 | return output; |
5043 | } |
5044 | #endif |
5045 | |
5046 | #define _sg_d3d11_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1)) |
5047 | |
5048 | _SOKOL_PRIVATE void _sg_create_shader(_sg_shader* shd, const sg_shader_desc* desc) { |
5049 | SOKOL_ASSERT(shd && desc); |
5050 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_ALLOC); |
5051 | SOKOL_ASSERT(!shd->d3d11_vs && !shd->d3d11_fs && !shd->d3d11_vs_blob); |
5052 | HRESULT hr; |
5053 | |
5054 | /* shader stage uniform blocks and image slots */ |
5055 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
5056 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; |
5057 | _sg_shader_stage* stage = &shd->stage[stage_index]; |
5058 | SOKOL_ASSERT(stage->num_uniform_blocks == 0); |
5059 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
5060 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
5061 | if (0 == ub_desc->size) { |
5062 | break; |
5063 | } |
5064 | _sg_uniform_block* ub = &stage->uniform_blocks[ub_index]; |
5065 | ub->size = ub_desc->size; |
5066 | |
5067 | /* create a D3D constant buffer */ |
5068 | SOKOL_ASSERT(!stage->d3d11_cbs[ub_index]); |
5069 | D3D11_BUFFER_DESC cb_desc; |
5070 | memset(&cb_desc, 0, sizeof(cb_desc)); |
5071 | cb_desc.ByteWidth = _sg_d3d11_roundup(ub->size, 16); |
5072 | cb_desc.Usage = D3D11_USAGE_DEFAULT; |
5073 | cb_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; |
5074 | hr = ID3D11Device_CreateBuffer(_sg_d3d11.dev, &cb_desc, NULL, &stage->d3d11_cbs[ub_index]); |
5075 | SOKOL_ASSERT(SUCCEEDED(hr) && stage->d3d11_cbs[ub_index]); |
5076 | |
5077 | stage->num_uniform_blocks++; |
5078 | } |
5079 | SOKOL_ASSERT(stage->num_images == 0); |
5080 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
5081 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
5082 | if (img_desc->type == _SG_IMAGETYPE_DEFAULT) { |
5083 | break; |
5084 | } |
5085 | stage->images[img_index].type = img_desc->type; |
5086 | stage->num_images++; |
5087 | } |
5088 | } |
5089 | |
5090 | const void* vs_ptr = 0, *fs_ptr = 0; |
5091 | SIZE_T vs_length = 0, fs_length = 0; |
5092 | #if defined(SOKOL_D3D11_SHADER_COMPILER) |
5093 | ID3DBlob* vs_blob = 0, *fs_blob = 0; |
5094 | #endif |
5095 | if (desc->vs.byte_code && desc->fs.byte_code) { |
5096 | /* create from byte code */ |
5097 | vs_ptr = desc->vs.byte_code; |
5098 | fs_ptr = desc->fs.byte_code; |
5099 | vs_length = desc->vs.byte_code_size; |
5100 | fs_length = desc->fs.byte_code_size; |
5101 | } |
5102 | else { |
5103 | /* compile shader code */ |
5104 | #if defined(SOKOL_D3D11_SHADER_COMPILER) |
5105 | vs_blob = _sg_d3d11_compile_shader(&desc->vs, "vs_5_0" ); |
5106 | fs_blob = _sg_d3d11_compile_shader(&desc->fs, "ps_5_0" ); |
5107 | if (vs_blob && fs_blob) { |
5108 | vs_ptr = ID3D10Blob_GetBufferPointer(vs_blob); |
5109 | vs_length = ID3D10Blob_GetBufferSize(vs_blob); |
5110 | fs_ptr = ID3D10Blob_GetBufferPointer(fs_blob); |
5111 | fs_length = ID3D10Blob_GetBufferSize(fs_blob); |
5112 | } |
5113 | #endif |
5114 | } |
5115 | if (vs_ptr && fs_ptr && (vs_length > 0) && (fs_length > 0)) { |
5116 | /* create the D3D vertex- and pixel-shader objects */ |
5117 | hr = ID3D11Device_CreateVertexShader(_sg_d3d11.dev, vs_ptr, vs_length, NULL, &shd->d3d11_vs); |
5118 | SOKOL_ASSERT(SUCCEEDED(hr) && shd->d3d11_vs); |
5119 | hr = ID3D11Device_CreatePixelShader(_sg_d3d11.dev, fs_ptr, fs_length, NULL, &shd->d3d11_fs); |
5120 | SOKOL_ASSERT(SUCCEEDED(hr) && shd->d3d11_fs); |
5121 | |
5122 | /* need to store the vertex shader byte code, this is needed later in sg_create_pipeline */ |
5123 | shd->d3d11_vs_blob_length = (int)vs_length; |
5124 | shd->d3d11_vs_blob = SOKOL_MALLOC((int)vs_length); |
5125 | SOKOL_ASSERT(shd->d3d11_vs_blob); |
5126 | memcpy(shd->d3d11_vs_blob, vs_ptr, vs_length); |
5127 | |
5128 | shd->slot.state = SG_RESOURCESTATE_VALID; |
5129 | } |
5130 | else { |
5131 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
5132 | } |
5133 | #if defined(SOKOL_D3D11_SHADER_COMPILER) |
5134 | if (vs_blob) { |
5135 | ID3D10Blob_Release(vs_blob); vs_blob = 0; |
5136 | } |
5137 | if (fs_blob) { |
5138 | ID3D10Blob_Release(fs_blob); fs_blob = 0; |
5139 | } |
5140 | #endif |
5141 | } |
5142 | |
5143 | _SOKOL_PRIVATE void _sg_destroy_shader(_sg_shader* shd) { |
5144 | SOKOL_ASSERT(shd); |
5145 | if (shd->d3d11_vs) { |
5146 | ID3D11VertexShader_Release(shd->d3d11_vs); |
5147 | } |
5148 | if (shd->d3d11_fs) { |
5149 | ID3D11PixelShader_Release(shd->d3d11_fs); |
5150 | } |
5151 | if (shd->d3d11_vs_blob) { |
5152 | SOKOL_FREE(shd->d3d11_vs_blob); |
5153 | } |
5154 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
5155 | _sg_shader_stage* stage = &shd->stage[stage_index]; |
5156 | for (int ub_index = 0; ub_index < stage->num_uniform_blocks; ub_index++) { |
5157 | if (stage->d3d11_cbs[ub_index]) { |
5158 | ID3D11Buffer_Release(stage->d3d11_cbs[ub_index]); |
5159 | } |
5160 | } |
5161 | } |
5162 | _sg_init_shader_slot(shd); |
5163 | } |
5164 | |
5165 | _SOKOL_PRIVATE void _sg_create_pipeline(_sg_pipeline* pip, _sg_shader* shd, const sg_pipeline_desc* desc) { |
5166 | SOKOL_ASSERT(pip && shd && desc); |
5167 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_ALLOC); |
5168 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
5169 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_VALID); |
5170 | SOKOL_ASSERT(shd->d3d11_vs_blob && shd->d3d11_vs_blob_length > 0); |
5171 | SOKOL_ASSERT(!pip->d3d11_il && !pip->d3d11_rs && !pip->d3d11_dss && !pip->d3d11_bs); |
5172 | HRESULT hr; |
5173 | |
5174 | pip->shader = shd; |
5175 | pip->shader_id = desc->shader; |
5176 | pip->index_type = _sg_def(desc->index_type, SG_INDEXTYPE_NONE); |
5177 | pip->color_attachment_count = _sg_def(desc->blend.color_attachment_count, 1); |
5178 | pip->color_format = _sg_def(desc->blend.color_format, SG_PIXELFORMAT_RGBA8); |
5179 | pip->depth_format = _sg_def(desc->blend.depth_format, SG_PIXELFORMAT_DEPTHSTENCIL); |
5180 | pip->sample_count = _sg_def(desc->rasterizer.sample_count, 1); |
5181 | pip->d3d11_index_format = _sg_d3d11_index_format(pip->index_type); |
5182 | pip->d3d11_topology = _sg_d3d11_primitive_topology(_sg_def(desc->primitive_type, SG_PRIMITIVETYPE_TRIANGLES)); |
5183 | for (int i = 0; i < 4; i++) { |
5184 | pip->blend_color[i] = desc->blend.blend_color[i]; |
5185 | } |
5186 | pip->d3d11_stencil_ref = desc->depth_stencil.stencil_ref; |
5187 | |
5188 | /* create input layout object */ |
5189 | int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS]; |
5190 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
5191 | auto_offset[layout_index] = 0; |
5192 | } |
5193 | bool use_auto_offset = true; |
5194 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
5195 | /* to use computed offsets, all attr offsets must be 0 */ |
5196 | if (desc->layout.attrs[attr_index].offset != 0) { |
5197 | use_auto_offset = false; |
5198 | } |
5199 | } |
5200 | D3D11_INPUT_ELEMENT_DESC d3d11_comps[SG_MAX_VERTEX_ATTRIBUTES]; |
5201 | memset(d3d11_comps, 0, sizeof(d3d11_comps)); |
5202 | int attr_index = 0; |
5203 | for (; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
5204 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
5205 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
5206 | break; |
5207 | } |
5208 | SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); |
5209 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[a_desc->buffer_index]; |
5210 | const sg_vertex_step step_func = _sg_def(l_desc->step_func, SG_VERTEXSTEP_PER_VERTEX); |
5211 | const int step_rate = _sg_def(l_desc->step_rate, 1); |
5212 | D3D11_INPUT_ELEMENT_DESC* d3d11_comp = &d3d11_comps[attr_index]; |
5213 | d3d11_comp->SemanticName = a_desc->sem_name; |
5214 | d3d11_comp->SemanticIndex = a_desc->sem_index; |
5215 | d3d11_comp->Format = _sg_d3d11_vertex_format(a_desc->format); |
5216 | d3d11_comp->InputSlot = a_desc->buffer_index; |
5217 | d3d11_comp->AlignedByteOffset = use_auto_offset ? auto_offset[a_desc->buffer_index] : a_desc->offset; |
5218 | d3d11_comp->InputSlotClass = _sg_d3d11_input_classification(step_func); |
5219 | if (SG_VERTEXSTEP_PER_INSTANCE == step_func) { |
5220 | d3d11_comp->InstanceDataStepRate = step_rate; |
5221 | } |
5222 | auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format); |
5223 | pip->vertex_layout_valid[a_desc->buffer_index] = true; |
5224 | } |
5225 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
5226 | if (pip->vertex_layout_valid[layout_index]) { |
5227 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; |
5228 | const int stride = l_desc->stride ? l_desc->stride : auto_offset[layout_index]; |
5229 | SOKOL_ASSERT(stride > 0); |
5230 | pip->d3d11_vb_strides[layout_index] = stride; |
5231 | } |
5232 | else { |
5233 | pip->d3d11_vb_strides[layout_index] = 0; |
5234 | } |
5235 | } |
5236 | hr = ID3D11Device_CreateInputLayout(_sg_d3d11.dev, |
5237 | d3d11_comps, /* pInputElementDesc */ |
5238 | attr_index, /* NumElements */ |
5239 | shd->d3d11_vs_blob, /* pShaderByteCodeWithInputSignature */ |
5240 | shd->d3d11_vs_blob_length, /* BytecodeLength */ |
5241 | &pip->d3d11_il); |
5242 | SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11_il); |
5243 | |
5244 | /* create rasterizer state */ |
5245 | D3D11_RASTERIZER_DESC rs_desc; |
5246 | memset(&rs_desc, 0, sizeof(rs_desc)); |
5247 | rs_desc.FillMode = D3D11_FILL_SOLID; |
5248 | rs_desc.CullMode = _sg_d3d11_cull_mode(_sg_def(desc->rasterizer.cull_mode, SG_CULLMODE_NONE)); |
5249 | rs_desc.FrontCounterClockwise = _sg_def(desc->rasterizer.face_winding, SG_FACEWINDING_CW) == SG_FACEWINDING_CCW; |
5250 | rs_desc.DepthBias = (INT) desc->rasterizer.depth_bias; |
5251 | rs_desc.DepthBiasClamp = desc->rasterizer.depth_bias_clamp; |
5252 | rs_desc.SlopeScaledDepthBias = desc->rasterizer.depth_bias_slope_scale; |
5253 | rs_desc.DepthClipEnable = TRUE; |
5254 | rs_desc.ScissorEnable = TRUE; |
5255 | rs_desc.MultisampleEnable = _sg_def(desc->rasterizer.sample_count, 1) > 1; |
5256 | rs_desc.AntialiasedLineEnable = FALSE; |
5257 | hr = ID3D11Device_CreateRasterizerState(_sg_d3d11.dev, &rs_desc, &pip->d3d11_rs); |
5258 | SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11_rs); |
5259 | |
5260 | /* create depth-stencil state */ |
5261 | D3D11_DEPTH_STENCIL_DESC dss_desc; |
5262 | memset(&dss_desc, 0, sizeof(dss_desc)); |
5263 | dss_desc.DepthEnable = TRUE; |
5264 | dss_desc.DepthWriteMask = desc->depth_stencil.depth_write_enabled ? D3D11_DEPTH_WRITE_MASK_ALL : D3D11_DEPTH_WRITE_MASK_ZERO; |
5265 | dss_desc.DepthFunc = _sg_d3d11_compare_func(_sg_def(desc->depth_stencil.depth_compare_func, SG_COMPAREFUNC_ALWAYS)); |
5266 | dss_desc.StencilEnable = desc->depth_stencil.stencil_enabled; |
5267 | dss_desc.StencilReadMask = desc->depth_stencil.stencil_read_mask; |
5268 | dss_desc.StencilWriteMask = desc->depth_stencil.stencil_write_mask; |
5269 | const sg_stencil_state* sf = &desc->depth_stencil.stencil_front; |
5270 | dss_desc.FrontFace.StencilFailOp = _sg_d3d11_stencil_op(_sg_def(sf->fail_op, SG_STENCILOP_KEEP)); |
5271 | dss_desc.FrontFace.StencilDepthFailOp = _sg_d3d11_stencil_op(_sg_def(sf->depth_fail_op, SG_STENCILOP_KEEP)); |
5272 | dss_desc.FrontFace.StencilPassOp = _sg_d3d11_stencil_op(_sg_def(sf->pass_op, SG_STENCILOP_KEEP)); |
5273 | dss_desc.FrontFace.StencilFunc = _sg_d3d11_compare_func(_sg_def(sf->compare_func, SG_COMPAREFUNC_ALWAYS)); |
5274 | const sg_stencil_state* sb = &desc->depth_stencil.stencil_back; |
5275 | dss_desc.BackFace.StencilFailOp = _sg_d3d11_stencil_op(_sg_def(sb->fail_op, SG_STENCILOP_KEEP)); |
5276 | dss_desc.BackFace.StencilDepthFailOp = _sg_d3d11_stencil_op(_sg_def(sb->depth_fail_op, SG_STENCILOP_KEEP)); |
5277 | dss_desc.BackFace.StencilPassOp = _sg_d3d11_stencil_op(_sg_def(sb->pass_op, SG_STENCILOP_KEEP)); |
5278 | dss_desc.BackFace.StencilFunc = _sg_d3d11_compare_func(_sg_def(sb->compare_func, SG_COMPAREFUNC_ALWAYS)); |
5279 | hr = ID3D11Device_CreateDepthStencilState(_sg_d3d11.dev, &dss_desc, &pip->d3d11_dss); |
5280 | SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11_dss); |
5281 | |
5282 | /* create blend state */ |
5283 | D3D11_BLEND_DESC bs_desc; |
5284 | memset(&bs_desc, 0, sizeof(bs_desc)); |
5285 | bs_desc.AlphaToCoverageEnable = desc->rasterizer.alpha_to_coverage_enabled; |
5286 | bs_desc.IndependentBlendEnable = FALSE; |
5287 | bs_desc.RenderTarget[0].BlendEnable = desc->blend.enabled; |
5288 | bs_desc.RenderTarget[0].SrcBlend = _sg_d3d11_blend_factor(_sg_def(desc->blend.src_factor_rgb, SG_BLENDFACTOR_ONE)); |
5289 | bs_desc.RenderTarget[0].DestBlend = _sg_d3d11_blend_factor(_sg_def(desc->blend.dst_factor_rgb, SG_BLENDFACTOR_ZERO)); |
5290 | bs_desc.RenderTarget[0].BlendOp = _sg_d3d11_blend_op(_sg_def(desc->blend.op_rgb, SG_BLENDOP_ADD)); |
5291 | bs_desc.RenderTarget[0].SrcBlendAlpha = _sg_d3d11_blend_factor(_sg_def(desc->blend.src_factor_alpha, SG_BLENDFACTOR_ONE)); |
5292 | bs_desc.RenderTarget[0].DestBlendAlpha = _sg_d3d11_blend_factor(_sg_def(desc->blend.dst_factor_alpha, SG_BLENDFACTOR_ZERO)); |
5293 | bs_desc.RenderTarget[0].BlendOpAlpha = _sg_d3d11_blend_op(_sg_def(desc->blend.op_alpha, SG_BLENDOP_ADD)); |
5294 | bs_desc.RenderTarget[0].RenderTargetWriteMask = _sg_d3d11_color_write_mask(_sg_def((sg_color_mask)desc->blend.color_write_mask, SG_COLORMASK_RGBA)); |
5295 | hr = ID3D11Device_CreateBlendState(_sg_d3d11.dev, &bs_desc, &pip->d3d11_bs); |
5296 | SOKOL_ASSERT(SUCCEEDED(hr) && pip->d3d11_bs); |
5297 | |
5298 | pip->slot.state = SG_RESOURCESTATE_VALID; |
5299 | } |
5300 | |
5301 | _SOKOL_PRIVATE void _sg_destroy_pipeline(_sg_pipeline* pip) { |
5302 | SOKOL_ASSERT(pip); |
5303 | if (pip->d3d11_il) { |
5304 | ID3D11InputLayout_Release(pip->d3d11_il); |
5305 | } |
5306 | if (pip->d3d11_rs) { |
5307 | ID3D11RasterizerState_Release(pip->d3d11_rs); |
5308 | } |
5309 | if (pip->d3d11_dss) { |
5310 | ID3D11DepthStencilState_Release(pip->d3d11_dss); |
5311 | } |
5312 | if (pip->d3d11_bs) { |
5313 | ID3D11BlendState_Release(pip->d3d11_bs); |
5314 | } |
5315 | _sg_init_pipeline_slot(pip); |
5316 | } |
5317 | |
5318 | _SOKOL_PRIVATE void _sg_create_pass(_sg_pass* pass, _sg_image** att_images, const sg_pass_desc* desc) { |
5319 | SOKOL_ASSERT(pass && desc); |
5320 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_ALLOC); |
5321 | SOKOL_ASSERT(att_images && att_images[0]); |
5322 | SOKOL_ASSERT(_sg_d3d11.dev); |
5323 | |
5324 | const sg_attachment_desc* att_desc; |
5325 | _sg_attachment* att; |
5326 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5327 | SOKOL_ASSERT(0 == pass->color_atts[i].image); |
5328 | SOKOL_ASSERT(pass->d3d11_rtvs[i] == 0); |
5329 | att_desc = &desc->color_attachments[i]; |
5330 | if (att_desc->image.id != SG_INVALID_ID) { |
5331 | pass->num_color_atts++; |
5332 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
5333 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->pixel_format)); |
5334 | att = &pass->color_atts[i]; |
5335 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
5336 | att->image = att_images[i]; |
5337 | att->image_id = att_desc->image; |
5338 | att->mip_level = att_desc->mip_level; |
5339 | att->slice = att_desc->slice; |
5340 | |
5341 | /* create D3D11 render-target-view */ |
5342 | ID3D11Resource* d3d11_res = 0; |
5343 | const bool is_msaa = att->image->sample_count > 1; |
5344 | D3D11_RENDER_TARGET_VIEW_DESC d3d11_rtv_desc; |
5345 | memset(&d3d11_rtv_desc, 0, sizeof(d3d11_rtv_desc)); |
5346 | d3d11_rtv_desc.Format = att->image->d3d11_format; |
5347 | switch (att->image->type) { |
5348 | case SG_IMAGETYPE_2D: |
5349 | if (is_msaa) { |
5350 | d3d11_res = (ID3D11Resource*) att->image->d3d11_texmsaa; |
5351 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMS; |
5352 | } |
5353 | else { |
5354 | d3d11_res = (ID3D11Resource*) att->image->d3d11_tex2d; |
5355 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D; |
5356 | d3d11_rtv_desc.Texture2D.MipSlice = att->mip_level; |
5357 | } |
5358 | break; |
5359 | case SG_IMAGETYPE_CUBE: |
5360 | case SG_IMAGETYPE_ARRAY: |
5361 | if (is_msaa) { |
5362 | d3d11_res = (ID3D11Resource*) att->image->d3d11_texmsaa; |
5363 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY; |
5364 | d3d11_rtv_desc.Texture2DMSArray.FirstArraySlice = att->slice; |
5365 | d3d11_rtv_desc.Texture2DMSArray.ArraySize = 1; |
5366 | } |
5367 | else { |
5368 | d3d11_res = (ID3D11Resource*) att->image->d3d11_tex2d; |
5369 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2DARRAY; |
5370 | d3d11_rtv_desc.Texture2DArray.MipSlice = att->mip_level; |
5371 | d3d11_rtv_desc.Texture2DArray.FirstArraySlice = att->slice; |
5372 | d3d11_rtv_desc.Texture2DArray.ArraySize = 1; |
5373 | } |
5374 | break; |
5375 | case SG_IMAGETYPE_3D: |
5376 | SOKOL_ASSERT(!is_msaa); |
5377 | d3d11_res = (ID3D11Resource*) att->image->d3d11_tex3d; |
5378 | d3d11_rtv_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE3D; |
5379 | d3d11_rtv_desc.Texture3D.MipSlice = att->mip_level; |
5380 | d3d11_rtv_desc.Texture3D.FirstWSlice = att->slice; |
5381 | d3d11_rtv_desc.Texture3D.WSize = 1; |
5382 | break; |
5383 | default: |
5384 | SOKOL_UNREACHABLE; break; |
5385 | } |
5386 | SOKOL_ASSERT(d3d11_res); |
5387 | HRESULT hr = ID3D11Device_CreateRenderTargetView(_sg_d3d11.dev, d3d11_res, &d3d11_rtv_desc, &pass->d3d11_rtvs[i]); |
5388 | _SOKOL_UNUSED(hr); |
5389 | SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11_rtvs[i]); |
5390 | } |
5391 | } |
5392 | |
5393 | /* optional depth-stencil image */ |
5394 | SOKOL_ASSERT(0 == pass->ds_att.image); |
5395 | SOKOL_ASSERT(pass->d3d11_dsv == 0); |
5396 | att_desc = &desc->depth_stencil_attachment; |
5397 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
5398 | if (att_desc->image.id != SG_INVALID_ID) { |
5399 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
5400 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->pixel_format)); |
5401 | att = &pass->ds_att; |
5402 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
5403 | att->image = att_images[ds_img_index]; |
5404 | att->image_id = att_desc->image; |
5405 | att->mip_level = att_desc->mip_level; |
5406 | att->slice = att_desc->slice; |
5407 | |
5408 | /* create D3D11 depth-stencil-view */ |
5409 | D3D11_DEPTH_STENCIL_VIEW_DESC d3d11_dsv_desc; |
5410 | memset(&d3d11_dsv_desc, 0, sizeof(d3d11_dsv_desc)); |
5411 | d3d11_dsv_desc.Format = att->image->d3d11_format; |
5412 | const bool is_msaa = att->image->sample_count > 1; |
5413 | if (is_msaa) { |
5414 | d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS; |
5415 | } |
5416 | else { |
5417 | d3d11_dsv_desc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; |
5418 | } |
5419 | ID3D11Resource* d3d11_res = (ID3D11Resource*) att->image->d3d11_texds; |
5420 | SOKOL_ASSERT(d3d11_res); |
5421 | HRESULT hr = ID3D11Device_CreateDepthStencilView(_sg_d3d11.dev, d3d11_res, &d3d11_dsv_desc, &pass->d3d11_dsv); |
5422 | _SOKOL_UNUSED(hr); |
5423 | SOKOL_ASSERT(SUCCEEDED(hr) && pass->d3d11_dsv); |
5424 | } |
5425 | pass->slot.state = SG_RESOURCESTATE_VALID; |
5426 | } |
5427 | |
5428 | _SOKOL_PRIVATE void _sg_destroy_pass(_sg_pass* pass) { |
5429 | SOKOL_ASSERT(pass); |
5430 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5431 | if (pass->d3d11_rtvs[i]) { |
5432 | ID3D11RenderTargetView_Release(pass->d3d11_rtvs[i]); |
5433 | } |
5434 | } |
5435 | if (pass->d3d11_dsv) { |
5436 | ID3D11DepthStencilView_Release(pass->d3d11_dsv); |
5437 | } |
5438 | _sg_init_pass_slot(pass); |
5439 | } |
5440 | |
5441 | _SOKOL_PRIVATE void _sg_begin_pass(_sg_pass* pass, const sg_pass_action* action, int w, int h) { |
5442 | SOKOL_ASSERT(action); |
5443 | SOKOL_ASSERT(!_sg_d3d11.in_pass); |
5444 | _sg_d3d11.in_pass = true; |
5445 | _sg_d3d11.cur_width = w; |
5446 | _sg_d3d11.cur_height = h; |
5447 | if (pass) { |
5448 | _sg_d3d11.cur_pass = pass; |
5449 | _sg_d3d11.cur_pass_id.id = pass->slot.id; |
5450 | _sg_d3d11.num_rtvs = 0; |
5451 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5452 | _sg_d3d11.cur_rtvs[i] = pass->d3d11_rtvs[i]; |
5453 | if (_sg_d3d11.cur_rtvs[i]) { |
5454 | _sg_d3d11.num_rtvs++; |
5455 | } |
5456 | } |
5457 | _sg_d3d11.cur_dsv = pass->d3d11_dsv; |
5458 | } |
5459 | else { |
5460 | /* render to default frame buffer */ |
5461 | _sg_d3d11.cur_pass = 0; |
5462 | _sg_d3d11.cur_pass_id.id = SG_INVALID_ID; |
5463 | _sg_d3d11.num_rtvs = 1; |
5464 | _sg_d3d11.cur_rtvs[0] = (ID3D11RenderTargetView*) _sg_d3d11.rtv_cb(); |
5465 | for (int i = 1; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5466 | _sg_d3d11.cur_rtvs[i] = 0; |
5467 | } |
5468 | _sg_d3d11.cur_dsv = (ID3D11DepthStencilView*) _sg_d3d11.dsv_cb(); |
5469 | SOKOL_ASSERT(_sg_d3d11.cur_rtvs[0] && _sg_d3d11.cur_dsv); |
5470 | } |
5471 | /* apply the render-target- and depth-stencil-views */ |
5472 | ID3D11DeviceContext_OMSetRenderTargets(_sg_d3d11.ctx, SG_MAX_COLOR_ATTACHMENTS, _sg_d3d11.cur_rtvs, _sg_d3d11.cur_dsv); |
5473 | |
5474 | /* set viewport and scissor rect to cover whole screen */ |
5475 | D3D11_VIEWPORT vp; |
5476 | memset(&vp, 0, sizeof(vp)); |
5477 | vp.Width = (FLOAT) w; |
5478 | vp.Height = (FLOAT) h; |
5479 | vp.MaxDepth = 1.0f; |
5480 | ID3D11DeviceContext_RSSetViewports(_sg_d3d11.ctx, 1, &vp); |
5481 | D3D11_RECT rect; |
5482 | rect.left = 0; |
5483 | rect.top = 0; |
5484 | rect.right = w; |
5485 | rect.bottom = h; |
5486 | ID3D11DeviceContext_RSSetScissorRects(_sg_d3d11.ctx, 1, &rect); |
5487 | |
5488 | /* perform clear action */ |
5489 | for (int i = 0; i < _sg_d3d11.num_rtvs; i++) { |
5490 | if (action->colors[i].action == SG_ACTION_CLEAR) { |
5491 | ID3D11DeviceContext_ClearRenderTargetView(_sg_d3d11.ctx, _sg_d3d11.cur_rtvs[i], action->colors[i].val); |
5492 | } |
5493 | } |
5494 | UINT ds_flags = 0; |
5495 | if (action->depth.action == SG_ACTION_CLEAR) { |
5496 | ds_flags |= D3D11_CLEAR_DEPTH; |
5497 | } |
5498 | if (action->stencil.action == SG_ACTION_CLEAR) { |
5499 | ds_flags |= D3D11_CLEAR_STENCIL; |
5500 | } |
5501 | if ((0 != ds_flags) && _sg_d3d11.cur_dsv) { |
5502 | ID3D11DeviceContext_ClearDepthStencilView(_sg_d3d11.ctx, _sg_d3d11.cur_dsv, ds_flags, action->depth.val, action->stencil.val); |
5503 | } |
5504 | } |
5505 | |
5506 | /* D3D11CalcSubresource only exists for C++ */ |
5507 | _SOKOL_PRIVATE UINT _sg_d3d11_calcsubresource(UINT mip_slice, UINT array_slice, UINT mip_levels) { |
5508 | return mip_slice + array_slice * mip_levels; |
5509 | } |
5510 | |
5511 | _SOKOL_PRIVATE void _sg_end_pass() { |
5512 | SOKOL_ASSERT(_sg_d3d11.in_pass && _sg_d3d11.ctx); |
5513 | _sg_d3d11.in_pass = false; |
5514 | |
5515 | /* need to resolve MSAA render target into texture? */ |
5516 | if (_sg_d3d11.cur_pass) { |
5517 | SOKOL_ASSERT(_sg_d3d11.cur_pass->slot.id == _sg_d3d11.cur_pass_id.id); |
5518 | for (int i = 0; i < _sg_d3d11.num_rtvs; i++) { |
5519 | _sg_attachment* att = &_sg_d3d11.cur_pass->color_atts[i]; |
5520 | SOKOL_ASSERT(att->image && (att->image->slot.id == att->image_id.id)); |
5521 | if (att->image->sample_count > 1) { |
5522 | SOKOL_ASSERT(att->image->d3d11_tex2d && att->image->d3d11_texmsaa && !att->image->d3d11_tex3d); |
5523 | SOKOL_ASSERT(DXGI_FORMAT_UNKNOWN != att->image->d3d11_format); |
5524 | const _sg_image* img = att->image; |
5525 | UINT subres = _sg_d3d11_calcsubresource(att->mip_level, att->slice, img->num_mipmaps); |
5526 | ID3D11DeviceContext_ResolveSubresource(_sg_d3d11.ctx, |
5527 | (ID3D11Resource*) img->d3d11_tex2d, /* pDstResource */ |
5528 | subres, /* DstSubresource */ |
5529 | (ID3D11Resource*) img->d3d11_texmsaa, /* pSrcResource */ |
5530 | subres, /* SrcSubresource */ |
5531 | img->d3d11_format); |
5532 | } |
5533 | } |
5534 | } |
5535 | _sg_d3d11.cur_pass = 0; |
5536 | _sg_d3d11.cur_pass_id.id = SG_INVALID_ID; |
5537 | _sg_d3d11.cur_pipeline = 0; |
5538 | _sg_d3d11.cur_pipeline_id.id = SG_INVALID_ID; |
5539 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
5540 | _sg_d3d11.cur_rtvs[i] = 0; |
5541 | } |
5542 | _sg_d3d11.cur_dsv = 0; |
5543 | _sg_d3d11_clear_state(); |
5544 | } |
5545 | |
5546 | _SOKOL_PRIVATE void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
5547 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5548 | SOKOL_ASSERT(_sg_d3d11.in_pass); |
5549 | D3D11_VIEWPORT vp; |
5550 | vp.TopLeftX = (FLOAT) x; |
5551 | vp.TopLeftY = (FLOAT) (origin_top_left ? y : (_sg_d3d11.cur_height - (y + h))); |
5552 | vp.Width = (FLOAT) w; |
5553 | vp.Height = (FLOAT) h; |
5554 | vp.MinDepth = 0.0f; |
5555 | vp.MaxDepth = 1.0f; |
5556 | ID3D11DeviceContext_RSSetViewports(_sg_d3d11.ctx, 1, &vp); |
5557 | } |
5558 | |
5559 | _SOKOL_PRIVATE void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
5560 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5561 | SOKOL_ASSERT(_sg_d3d11.in_pass); |
5562 | D3D11_RECT rect; |
5563 | rect.left = x; |
5564 | rect.top = (origin_top_left ? y : (_sg_d3d11.cur_height - (y + h))); |
5565 | rect.right = x + w; |
5566 | rect.bottom = origin_top_left ? (y + h) : (_sg_d3d11.cur_height - y); |
5567 | ID3D11DeviceContext_RSSetScissorRects(_sg_d3d11.ctx, 1, &rect); |
5568 | } |
5569 | |
5570 | _SOKOL_PRIVATE void _sg_apply_draw_state( |
5571 | _sg_pipeline* pip, |
5572 | _sg_buffer** vbs, const int* vb_offsets, int num_vbs, |
5573 | _sg_buffer* ib, int ib_offset, |
5574 | _sg_image** vs_imgs, int num_vs_imgs, |
5575 | _sg_image** fs_imgs, int num_fs_imgs) |
5576 | { |
5577 | SOKOL_ASSERT(pip); |
5578 | SOKOL_ASSERT(pip->shader); |
5579 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5580 | SOKOL_ASSERT(_sg_d3d11.in_pass); |
5581 | SOKOL_ASSERT(pip->d3d11_rs && pip->d3d11_bs && pip->d3d11_dss && pip->d3d11_il); |
5582 | |
5583 | _sg_d3d11.cur_pipeline = pip; |
5584 | _sg_d3d11.cur_pipeline_id.id = pip->slot.id; |
5585 | _sg_d3d11.use_indexed_draw = (pip->d3d11_index_format != DXGI_FORMAT_UNKNOWN); |
5586 | |
5587 | /* gather all the D3D11 resources into arrays */ |
5588 | ID3D11Buffer* d3d11_ib = ib ? ib->d3d11_buf : 0; |
5589 | ID3D11Buffer* d3d11_vbs[SG_MAX_SHADERSTAGE_BUFFERS]; |
5590 | UINT d3d11_vb_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
5591 | ID3D11ShaderResourceView* d3d11_vs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; |
5592 | ID3D11SamplerState* d3d11_vs_smps[SG_MAX_SHADERSTAGE_IMAGES]; |
5593 | ID3D11ShaderResourceView* d3d11_fs_srvs[SG_MAX_SHADERSTAGE_IMAGES]; |
5594 | ID3D11SamplerState* d3d11_fs_smps[SG_MAX_SHADERSTAGE_IMAGES]; |
5595 | int i; |
5596 | for (i = 0; i < num_vbs; i++) { |
5597 | SOKOL_ASSERT(vbs[i]->d3d11_buf); |
5598 | d3d11_vbs[i] = vbs[i]->d3d11_buf; |
5599 | d3d11_vb_offsets[i] = vb_offsets[i]; |
5600 | } |
5601 | for (; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
5602 | d3d11_vbs[i] = 0; |
5603 | d3d11_vb_offsets[i] = 0; |
5604 | } |
5605 | for (i = 0; i < num_vs_imgs; i++) { |
5606 | SOKOL_ASSERT(vs_imgs[i]->d3d11_srv); |
5607 | SOKOL_ASSERT(vs_imgs[i]->d3d11_smp); |
5608 | d3d11_vs_srvs[i] = vs_imgs[i]->d3d11_srv; |
5609 | d3d11_vs_smps[i] = vs_imgs[i]->d3d11_smp; |
5610 | } |
5611 | for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
5612 | d3d11_vs_srvs[i] = 0; |
5613 | d3d11_vs_smps[i] = 0; |
5614 | } |
5615 | for (i = 0; i < num_fs_imgs; i++) { |
5616 | SOKOL_ASSERT(fs_imgs[i]->d3d11_srv); |
5617 | SOKOL_ASSERT(fs_imgs[i]->d3d11_smp); |
5618 | d3d11_fs_srvs[i] = fs_imgs[i]->d3d11_srv; |
5619 | d3d11_fs_smps[i] = fs_imgs[i]->d3d11_smp; |
5620 | } |
5621 | for (; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
5622 | d3d11_fs_srvs[i] = 0; |
5623 | d3d11_fs_smps[i] = 0; |
5624 | } |
5625 | |
5626 | /* FIXME: is it worth it to implement a state cache here? measure! */ |
5627 | ID3D11DeviceContext_RSSetState(_sg_d3d11.ctx, pip->d3d11_rs); |
5628 | ID3D11DeviceContext_OMSetDepthStencilState(_sg_d3d11.ctx, pip->d3d11_dss, pip->d3d11_stencil_ref); |
5629 | ID3D11DeviceContext_OMSetBlendState(_sg_d3d11.ctx, pip->d3d11_bs, pip->blend_color, 0xFFFFFFFF); |
5630 | |
5631 | ID3D11DeviceContext_IASetVertexBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_BUFFERS, d3d11_vbs, pip->d3d11_vb_strides, d3d11_vb_offsets); |
5632 | ID3D11DeviceContext_IASetPrimitiveTopology(_sg_d3d11.ctx, pip->d3d11_topology); |
5633 | ID3D11DeviceContext_IASetIndexBuffer(_sg_d3d11.ctx, d3d11_ib, pip->d3d11_index_format, ib_offset); |
5634 | ID3D11DeviceContext_IASetInputLayout(_sg_d3d11.ctx, pip->d3d11_il); |
5635 | |
5636 | ID3D11DeviceContext_VSSetShader(_sg_d3d11.ctx, pip->shader->d3d11_vs, NULL, 0); |
5637 | ID3D11DeviceContext_VSSetConstantBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->stage[SG_SHADERSTAGE_VS].d3d11_cbs); |
5638 | ID3D11DeviceContext_VSSetShaderResources(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_srvs); |
5639 | ID3D11DeviceContext_VSSetSamplers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_vs_smps); |
5640 | |
5641 | ID3D11DeviceContext_PSSetShader(_sg_d3d11.ctx, pip->shader->d3d11_fs, NULL, 0); |
5642 | ID3D11DeviceContext_PSSetConstantBuffers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_UBS, pip->shader->stage[SG_SHADERSTAGE_FS].d3d11_cbs); |
5643 | ID3D11DeviceContext_PSSetShaderResources(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_srvs); |
5644 | ID3D11DeviceContext_PSSetSamplers(_sg_d3d11.ctx, 0, SG_MAX_SHADERSTAGE_IMAGES, d3d11_fs_smps); |
5645 | } |
5646 | |
5647 | _SOKOL_PRIVATE void _sg_apply_uniform_block(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { |
5648 | _SOKOL_UNUSED(num_bytes); |
5649 | SOKOL_ASSERT(_sg_d3d11.ctx && _sg_d3d11.in_pass); |
5650 | SOKOL_ASSERT(data && (num_bytes > 0)); |
5651 | SOKOL_ASSERT((stage_index >= 0) && (stage_index < SG_NUM_SHADER_STAGES)); |
5652 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
5653 | SOKOL_ASSERT(_sg_d3d11.cur_pipeline && _sg_d3d11.cur_pipeline->slot.id == _sg_d3d11.cur_pipeline_id.id); |
5654 | SOKOL_ASSERT(_sg_d3d11.cur_pipeline->shader && _sg_d3d11.cur_pipeline->shader->slot.id == _sg_d3d11.cur_pipeline->shader_id.id); |
5655 | SOKOL_ASSERT(ub_index < _sg_d3d11.cur_pipeline->shader->stage[stage_index].num_uniform_blocks); |
5656 | SOKOL_ASSERT(num_bytes == _sg_d3d11.cur_pipeline->shader->stage[stage_index].uniform_blocks[ub_index].size); |
5657 | ID3D11Buffer* cb = _sg_d3d11.cur_pipeline->shader->stage[stage_index].d3d11_cbs[ub_index]; |
5658 | SOKOL_ASSERT(cb); |
5659 | ID3D11DeviceContext_UpdateSubresource(_sg_d3d11.ctx, (ID3D11Resource*)cb, 0, NULL, data, 0, 0); |
5660 | } |
5661 | |
5662 | _SOKOL_PRIVATE void _sg_draw(int base_element, int num_elements, int num_instances) { |
5663 | SOKOL_ASSERT(_sg_d3d11.in_pass); |
5664 | if (_sg_d3d11.use_indexed_draw) { |
5665 | if (1 == num_instances) { |
5666 | ID3D11DeviceContext_DrawIndexed(_sg_d3d11.ctx, num_elements, base_element, 0); |
5667 | } |
5668 | else { |
5669 | ID3D11DeviceContext_DrawIndexedInstanced(_sg_d3d11.ctx, num_elements, num_instances, base_element, 0, 0); |
5670 | } |
5671 | } |
5672 | else { |
5673 | if (1 == num_instances) { |
5674 | ID3D11DeviceContext_Draw(_sg_d3d11.ctx, num_elements, base_element); |
5675 | } |
5676 | else { |
5677 | ID3D11DeviceContext_DrawInstanced(_sg_d3d11.ctx, num_elements, num_instances, base_element, 0); |
5678 | } |
5679 | } |
5680 | } |
5681 | |
5682 | _SOKOL_PRIVATE void _sg_commit() { |
5683 | SOKOL_ASSERT(!_sg_d3d11.in_pass); |
5684 | } |
5685 | |
5686 | _SOKOL_PRIVATE void _sg_update_buffer(_sg_buffer* buf, const void* data_ptr, int data_size) { |
5687 | SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); |
5688 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5689 | SOKOL_ASSERT(buf->d3d11_buf); |
5690 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
5691 | HRESULT hr = ID3D11DeviceContext_Map(_sg_d3d11.ctx, (ID3D11Resource*)buf->d3d11_buf, 0, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); |
5692 | _SOKOL_UNUSED(hr); |
5693 | SOKOL_ASSERT(SUCCEEDED(hr)); |
5694 | memcpy(d3d11_msr.pData, data_ptr, data_size); |
5695 | ID3D11DeviceContext_Unmap(_sg_d3d11.ctx, (ID3D11Resource*)buf->d3d11_buf, 0); |
5696 | } |
5697 | |
5698 | _SOKOL_PRIVATE void _sg_append_buffer(_sg_buffer* buf, const void* data_ptr, int data_size, bool new_frame) { |
5699 | SOKOL_ASSERT(buf && data_ptr && (data_size > 0)); |
5700 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5701 | SOKOL_ASSERT(buf->d3d11_buf); |
5702 | D3D11_MAP map_type = new_frame ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE_NO_OVERWRITE; |
5703 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
5704 | HRESULT hr = ID3D11DeviceContext_Map(_sg_d3d11.ctx, (ID3D11Resource*)buf->d3d11_buf, 0, map_type, 0, &d3d11_msr); |
5705 | _SOKOL_UNUSED(hr); |
5706 | SOKOL_ASSERT(SUCCEEDED(hr)); |
5707 | uint8_t* dst_ptr = (uint8_t*)d3d11_msr.pData + buf->append_pos; |
5708 | memcpy(dst_ptr, data_ptr, data_size); |
5709 | ID3D11DeviceContext_Unmap(_sg_d3d11.ctx, (ID3D11Resource*)buf->d3d11_buf, 0); |
5710 | } |
5711 | |
5712 | _SOKOL_PRIVATE void _sg_update_image(_sg_image* img, const sg_image_content* data) { |
5713 | SOKOL_ASSERT(img && data); |
5714 | SOKOL_ASSERT(_sg_d3d11.ctx); |
5715 | SOKOL_ASSERT(img->d3d11_tex2d || img->d3d11_tex3d); |
5716 | ID3D11Resource* d3d11_res = 0; |
5717 | if (img->d3d11_tex3d) { |
5718 | d3d11_res = (ID3D11Resource*) img->d3d11_tex3d; |
5719 | } |
5720 | else { |
5721 | d3d11_res = (ID3D11Resource*) img->d3d11_tex2d; |
5722 | } |
5723 | SOKOL_ASSERT(d3d11_res); |
5724 | const int num_faces = (img->type == SG_IMAGETYPE_CUBE) ? 6:1; |
5725 | const int num_slices = (img->type == SG_IMAGETYPE_ARRAY) ? img->depth:1; |
5726 | int subres_index = 0; |
5727 | HRESULT hr; |
5728 | D3D11_MAPPED_SUBRESOURCE d3d11_msr; |
5729 | for (int face_index = 0; face_index < num_faces; face_index++) { |
5730 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
5731 | for (int mip_index = 0; mip_index < img->num_mipmaps; mip_index++, subres_index++) { |
5732 | SOKOL_ASSERT(subres_index < (SG_MAX_MIPMAPS * SG_MAX_TEXTUREARRAY_LAYERS)); |
5733 | const int mip_width = ((img->width>>mip_index)>0) ? img->width>>mip_index : 1; |
5734 | const int mip_height = ((img->height>>mip_index)>0) ? img->height>>mip_index : 1; |
5735 | const int src_pitch = _sg_row_pitch(img->pixel_format, mip_width); |
5736 | const sg_subimage_content* subimg_content = &(data->subimage[face_index][mip_index]); |
5737 | const int slice_size = subimg_content->size / num_slices; |
5738 | const int slice_offset = slice_size * slice_index; |
5739 | const uint8_t* slice_ptr = ((const uint8_t*)subimg_content->ptr) + slice_offset; |
5740 | hr = ID3D11DeviceContext_Map(_sg_d3d11.ctx, d3d11_res, subres_index, D3D11_MAP_WRITE_DISCARD, 0, &d3d11_msr); |
5741 | SOKOL_ASSERT(SUCCEEDED(hr)); |
5742 | /* FIXME: need to handle difference in depth-pitch for 3D textures as well! */ |
5743 | if (src_pitch == (int)d3d11_msr.RowPitch) { |
5744 | memcpy(d3d11_msr.pData, slice_ptr, slice_size); |
5745 | } |
5746 | else { |
5747 | SOKOL_ASSERT(src_pitch < (int)d3d11_msr.RowPitch); |
5748 | const uint8_t* src_ptr = slice_ptr; |
5749 | uint8_t* dst_ptr = (uint8_t*) d3d11_msr.pData; |
5750 | for (int row_index = 0; row_index < mip_height; row_index++) { |
5751 | memcpy(dst_ptr, src_ptr, src_pitch); |
5752 | src_ptr += src_pitch; |
5753 | dst_ptr += d3d11_msr.RowPitch; |
5754 | } |
5755 | } |
5756 | ID3D11DeviceContext_Unmap(_sg_d3d11.ctx, d3d11_res, subres_index); |
5757 | } |
5758 | } |
5759 | } |
5760 | } |
5761 | |
5762 | /*== METAL BACKEND ===========================================================*/ |
5763 | #elif defined(SOKOL_METAL) |
5764 | |
5765 | #if !__has_feature(objc_arc) |
5766 | #error "Please enable ARC when using the Metal backend" |
5767 | #endif |
5768 | |
5769 | /* memset() */ |
5770 | #include <string.h> |
5771 | #include <TargetConditionals.h> |
5772 | #import <Metal/Metal.h> |
5773 | |
5774 | enum { |
5775 | _SG_MTL_DEFAULT_UB_SIZE = 4 * 1024 * 1024, |
5776 | #if !TARGET_OS_IPHONE |
5777 | _SG_MTL_UB_ALIGN = 256, |
5778 | #else |
5779 | _SG_MTL_UB_ALIGN = 16, |
5780 | #endif |
5781 | _SG_MTL_DEFAULT_SAMPLER_CACHE_CAPACITY = 64, |
5782 | _SG_MTL_INVALID_POOL_INDEX = 0xFFFFFFFF |
5783 | }; |
5784 | |
5785 | /*-- enum translation functions ----------------------------------------------*/ |
5786 | _SOKOL_PRIVATE MTLLoadAction _sg_mtl_load_action(sg_action a) { |
5787 | switch (a) { |
5788 | case SG_ACTION_CLEAR: return MTLLoadActionClear; |
5789 | case SG_ACTION_LOAD: return MTLLoadActionLoad; |
5790 | case SG_ACTION_DONTCARE: return MTLLoadActionDontCare; |
5791 | default: SOKOL_UNREACHABLE; return (MTLLoadAction)0; |
5792 | } |
5793 | } |
5794 | |
5795 | _SOKOL_PRIVATE MTLResourceOptions _sg_mtl_buffer_resource_options(sg_usage usg) { |
5796 | switch (usg) { |
5797 | case SG_USAGE_IMMUTABLE: |
5798 | return MTLResourceStorageModeShared; |
5799 | case SG_USAGE_DYNAMIC: |
5800 | case SG_USAGE_STREAM: |
5801 | #if !TARGET_OS_IPHONE |
5802 | return MTLCPUCacheModeWriteCombined|MTLResourceStorageModeManaged; |
5803 | #else |
5804 | return MTLCPUCacheModeWriteCombined; |
5805 | #endif |
5806 | default: |
5807 | SOKOL_UNREACHABLE; |
5808 | return 0; |
5809 | } |
5810 | } |
5811 | |
5812 | _SOKOL_PRIVATE MTLVertexStepFunction _sg_mtl_step_function(sg_vertex_step step) { |
5813 | switch (step) { |
5814 | case SG_VERTEXSTEP_PER_VERTEX: return MTLVertexStepFunctionPerVertex; |
5815 | case SG_VERTEXSTEP_PER_INSTANCE: return MTLVertexStepFunctionPerInstance; |
5816 | default: SOKOL_UNREACHABLE; return (MTLVertexStepFunction)0; |
5817 | } |
5818 | } |
5819 | |
5820 | _SOKOL_PRIVATE MTLVertexFormat _sg_mtl_vertex_format(sg_vertex_format fmt) { |
5821 | switch (fmt) { |
5822 | case SG_VERTEXFORMAT_FLOAT: return MTLVertexFormatFloat; |
5823 | case SG_VERTEXFORMAT_FLOAT2: return MTLVertexFormatFloat2; |
5824 | case SG_VERTEXFORMAT_FLOAT3: return MTLVertexFormatFloat3; |
5825 | case SG_VERTEXFORMAT_FLOAT4: return MTLVertexFormatFloat4; |
5826 | case SG_VERTEXFORMAT_BYTE4: return MTLVertexFormatChar4; |
5827 | case SG_VERTEXFORMAT_BYTE4N: return MTLVertexFormatChar4Normalized; |
5828 | case SG_VERTEXFORMAT_UBYTE4: return MTLVertexFormatUChar4; |
5829 | case SG_VERTEXFORMAT_UBYTE4N: return MTLVertexFormatUChar4Normalized; |
5830 | case SG_VERTEXFORMAT_SHORT2: return MTLVertexFormatShort2; |
5831 | case SG_VERTEXFORMAT_SHORT2N: return MTLVertexFormatShort2Normalized; |
5832 | case SG_VERTEXFORMAT_SHORT4: return MTLVertexFormatShort4; |
5833 | case SG_VERTEXFORMAT_SHORT4N: return MTLVertexFormatShort4Normalized; |
5834 | case SG_VERTEXFORMAT_UINT10_N2: return MTLVertexFormatUInt1010102Normalized; |
5835 | default: SOKOL_UNREACHABLE; return (MTLVertexFormat)0; |
5836 | } |
5837 | } |
5838 | |
5839 | _SOKOL_PRIVATE MTLPrimitiveType _sg_mtl_primitive_type(sg_primitive_type t) { |
5840 | switch (t) { |
5841 | case SG_PRIMITIVETYPE_POINTS: return MTLPrimitiveTypePoint; |
5842 | case SG_PRIMITIVETYPE_LINES: return MTLPrimitiveTypeLine; |
5843 | case SG_PRIMITIVETYPE_LINE_STRIP: return MTLPrimitiveTypeLineStrip; |
5844 | case SG_PRIMITIVETYPE_TRIANGLES: return MTLPrimitiveTypeTriangle; |
5845 | case SG_PRIMITIVETYPE_TRIANGLE_STRIP: return MTLPrimitiveTypeTriangleStrip; |
5846 | default: SOKOL_UNREACHABLE; return (MTLPrimitiveType)0; |
5847 | } |
5848 | } |
5849 | |
5850 | _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_texture_format(sg_pixel_format fmt) { |
5851 | switch (fmt) { |
5852 | case SG_PIXELFORMAT_RGBA8: return MTLPixelFormatRGBA8Unorm; |
5853 | case SG_PIXELFORMAT_R10G10B10A2: return MTLPixelFormatRGB10A2Unorm; |
5854 | case SG_PIXELFORMAT_RGBA32F: return MTLPixelFormatRGBA32Float; |
5855 | case SG_PIXELFORMAT_RGBA16F: return MTLPixelFormatRGBA16Float; |
5856 | case SG_PIXELFORMAT_R32F: return MTLPixelFormatR32Float; |
5857 | case SG_PIXELFORMAT_R16F: return MTLPixelFormatR16Float; |
5858 | case SG_PIXELFORMAT_L8: return MTLPixelFormatR8Unorm; |
5859 | #if !TARGET_OS_IPHONE |
5860 | case SG_PIXELFORMAT_DXT1: return MTLPixelFormatBC1_RGBA; |
5861 | case SG_PIXELFORMAT_DXT3: return MTLPixelFormatBC2_RGBA; |
5862 | case SG_PIXELFORMAT_DXT5: return MTLPixelFormatBC3_RGBA; |
5863 | #else |
5864 | case SG_PIXELFORMAT_PVRTC2_RGB: return MTLPixelFormatPVRTC_RGB_2BPP; |
5865 | case SG_PIXELFORMAT_PVRTC4_RGB: return MTLPixelFormatPVRTC_RGB_4BPP; |
5866 | case SG_PIXELFORMAT_PVRTC2_RGBA: return MTLPixelFormatPVRTC_RGBA_2BPP; |
5867 | case SG_PIXELFORMAT_PVRTC4_RGBA: return MTLPixelFormatPVRTC_RGBA_4BPP; |
5868 | case SG_PIXELFORMAT_ETC2_RGB8: return MTLPixelFormatETC2_RGB8; |
5869 | case SG_PIXELFORMAT_ETC2_SRGB8: return MTLPixelFormatETC2_RGB8_sRGB; |
5870 | #endif |
5871 | default: return MTLPixelFormatInvalid; |
5872 | } |
5873 | } |
5874 | |
5875 | _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_rendertarget_color_format(sg_pixel_format fmt) { |
5876 | switch (fmt) { |
5877 | case SG_PIXELFORMAT_RGBA8: return MTLPixelFormatBGRA8Unorm; /* not a bug */ |
5878 | case SG_PIXELFORMAT_RGBA32F: return MTLPixelFormatRGBA32Float; |
5879 | case SG_PIXELFORMAT_RGBA16F: return MTLPixelFormatRGBA16Float; |
5880 | case SG_PIXELFORMAT_R10G10B10A2: return MTLPixelFormatRGB10A2Unorm; |
5881 | default: return MTLPixelFormatInvalid; |
5882 | } |
5883 | } |
5884 | |
5885 | _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_rendertarget_depth_format(sg_pixel_format fmt) { |
5886 | switch (fmt) { |
5887 | case SG_PIXELFORMAT_DEPTH: |
5888 | return MTLPixelFormatDepth32Float; |
5889 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
5890 | /* NOTE: Depth24_Stencil8 isn't universally supported! */ |
5891 | return MTLPixelFormatDepth32Float_Stencil8; |
5892 | default: |
5893 | return MTLPixelFormatInvalid; |
5894 | } |
5895 | } |
5896 | |
5897 | _SOKOL_PRIVATE MTLPixelFormat _sg_mtl_rendertarget_stencil_format(sg_pixel_format fmt) { |
5898 | switch (fmt) { |
5899 | case SG_PIXELFORMAT_DEPTHSTENCIL: |
5900 | return MTLPixelFormatDepth32Float_Stencil8; |
5901 | default: |
5902 | return MTLPixelFormatInvalid; |
5903 | } |
5904 | } |
5905 | |
5906 | _SOKOL_PRIVATE MTLColorWriteMask _sg_mtl_color_write_mask(sg_color_mask m) { |
5907 | MTLColorWriteMask mtl_mask = MTLColorWriteMaskNone; |
5908 | if (m & SG_COLORMASK_R) { |
5909 | mtl_mask |= MTLColorWriteMaskRed; |
5910 | } |
5911 | if (m & SG_COLORMASK_G) { |
5912 | mtl_mask |= MTLColorWriteMaskGreen; |
5913 | } |
5914 | if (m & SG_COLORMASK_B) { |
5915 | mtl_mask |= MTLColorWriteMaskBlue; |
5916 | } |
5917 | if (m & SG_COLORMASK_A) { |
5918 | mtl_mask |= MTLColorWriteMaskAlpha; |
5919 | } |
5920 | return mtl_mask; |
5921 | } |
5922 | |
5923 | _SOKOL_PRIVATE MTLBlendOperation _sg_mtl_blend_op(sg_blend_op op) { |
5924 | switch (op) { |
5925 | case SG_BLENDOP_ADD: return MTLBlendOperationAdd; |
5926 | case SG_BLENDOP_SUBTRACT: return MTLBlendOperationSubtract; |
5927 | case SG_BLENDOP_REVERSE_SUBTRACT: return MTLBlendOperationReverseSubtract; |
5928 | default: SOKOL_UNREACHABLE; return (MTLBlendOperation)0; |
5929 | } |
5930 | } |
5931 | |
5932 | _SOKOL_PRIVATE MTLBlendFactor _sg_mtl_blend_factor(sg_blend_factor f) { |
5933 | switch (f) { |
5934 | case SG_BLENDFACTOR_ZERO: return MTLBlendFactorZero; |
5935 | case SG_BLENDFACTOR_ONE: return MTLBlendFactorOne; |
5936 | case SG_BLENDFACTOR_SRC_COLOR: return MTLBlendFactorSourceColor; |
5937 | case SG_BLENDFACTOR_ONE_MINUS_SRC_COLOR: return MTLBlendFactorOneMinusSourceColor; |
5938 | case SG_BLENDFACTOR_SRC_ALPHA: return MTLBlendFactorSourceAlpha; |
5939 | case SG_BLENDFACTOR_ONE_MINUS_SRC_ALPHA: return MTLBlendFactorOneMinusSourceAlpha; |
5940 | case SG_BLENDFACTOR_DST_COLOR: return MTLBlendFactorDestinationColor; |
5941 | case SG_BLENDFACTOR_ONE_MINUS_DST_COLOR: return MTLBlendFactorOneMinusDestinationColor; |
5942 | case SG_BLENDFACTOR_DST_ALPHA: return MTLBlendFactorDestinationAlpha; |
5943 | case SG_BLENDFACTOR_ONE_MINUS_DST_ALPHA: return MTLBlendFactorOneMinusDestinationAlpha; |
5944 | case SG_BLENDFACTOR_SRC_ALPHA_SATURATED: return MTLBlendFactorSourceAlphaSaturated; |
5945 | case SG_BLENDFACTOR_BLEND_COLOR: return MTLBlendFactorBlendColor; |
5946 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_COLOR: return MTLBlendFactorOneMinusBlendColor; |
5947 | case SG_BLENDFACTOR_BLEND_ALPHA: return MTLBlendFactorBlendAlpha; |
5948 | case SG_BLENDFACTOR_ONE_MINUS_BLEND_ALPHA: return MTLBlendFactorOneMinusBlendAlpha; |
5949 | default: SOKOL_UNREACHABLE; return (MTLBlendFactor)0; |
5950 | } |
5951 | } |
5952 | |
5953 | _SOKOL_PRIVATE MTLCompareFunction _sg_mtl_compare_func(sg_compare_func f) { |
5954 | switch (f) { |
5955 | case SG_COMPAREFUNC_NEVER: return MTLCompareFunctionNever; |
5956 | case SG_COMPAREFUNC_LESS: return MTLCompareFunctionLess; |
5957 | case SG_COMPAREFUNC_EQUAL: return MTLCompareFunctionEqual; |
5958 | case SG_COMPAREFUNC_LESS_EQUAL: return MTLCompareFunctionLessEqual; |
5959 | case SG_COMPAREFUNC_GREATER: return MTLCompareFunctionGreater; |
5960 | case SG_COMPAREFUNC_NOT_EQUAL: return MTLCompareFunctionNotEqual; |
5961 | case SG_COMPAREFUNC_GREATER_EQUAL: return MTLCompareFunctionGreaterEqual; |
5962 | case SG_COMPAREFUNC_ALWAYS: return MTLCompareFunctionAlways; |
5963 | default: SOKOL_UNREACHABLE; return (MTLCompareFunction)0; |
5964 | } |
5965 | } |
5966 | |
5967 | _SOKOL_PRIVATE MTLStencilOperation _sg_mtl_stencil_op(sg_stencil_op op) { |
5968 | switch (op) { |
5969 | case SG_STENCILOP_KEEP: return MTLStencilOperationKeep; |
5970 | case SG_STENCILOP_ZERO: return MTLStencilOperationZero; |
5971 | case SG_STENCILOP_REPLACE: return MTLStencilOperationReplace; |
5972 | case SG_STENCILOP_INCR_CLAMP: return MTLStencilOperationIncrementClamp; |
5973 | case SG_STENCILOP_DECR_CLAMP: return MTLStencilOperationDecrementClamp; |
5974 | case SG_STENCILOP_INVERT: return MTLStencilOperationInvert; |
5975 | case SG_STENCILOP_INCR_WRAP: return MTLStencilOperationIncrementWrap; |
5976 | case SG_STENCILOP_DECR_WRAP: return MTLStencilOperationDecrementWrap; |
5977 | default: SOKOL_UNREACHABLE; return (MTLStencilOperation)0; |
5978 | } |
5979 | } |
5980 | |
5981 | _SOKOL_PRIVATE MTLCullMode _sg_mtl_cull_mode(sg_cull_mode m) { |
5982 | switch (m) { |
5983 | case SG_CULLMODE_NONE: return MTLCullModeNone; |
5984 | case SG_CULLMODE_FRONT: return MTLCullModeFront; |
5985 | case SG_CULLMODE_BACK: return MTLCullModeBack; |
5986 | default: SOKOL_UNREACHABLE; return (MTLCullMode)0; |
5987 | } |
5988 | } |
5989 | |
5990 | _SOKOL_PRIVATE MTLWinding _sg_mtl_winding(sg_face_winding w) { |
5991 | switch (w) { |
5992 | case SG_FACEWINDING_CW: return MTLWindingClockwise; |
5993 | case SG_FACEWINDING_CCW: return MTLWindingCounterClockwise; |
5994 | default: SOKOL_UNREACHABLE; return (MTLWinding)0; |
5995 | } |
5996 | } |
5997 | |
5998 | _SOKOL_PRIVATE MTLIndexType _sg_mtl_index_type(sg_index_type t) { |
5999 | switch (t) { |
6000 | case SG_INDEXTYPE_UINT16: return MTLIndexTypeUInt16; |
6001 | case SG_INDEXTYPE_UINT32: return MTLIndexTypeUInt32; |
6002 | default: SOKOL_UNREACHABLE; return (MTLIndexType)0; |
6003 | } |
6004 | } |
6005 | |
6006 | _SOKOL_PRIVATE NSUInteger _sg_mtl_index_size(sg_index_type t) { |
6007 | switch (t) { |
6008 | case SG_INDEXTYPE_NONE: return 0; |
6009 | case SG_INDEXTYPE_UINT16: return 2; |
6010 | case SG_INDEXTYPE_UINT32: return 4; |
6011 | default: SOKOL_UNREACHABLE; return 0; |
6012 | } |
6013 | } |
6014 | |
6015 | _SOKOL_PRIVATE MTLTextureType _sg_mtl_texture_type(sg_image_type t) { |
6016 | switch (t) { |
6017 | case SG_IMAGETYPE_2D: return MTLTextureType2D; |
6018 | case SG_IMAGETYPE_CUBE: return MTLTextureTypeCube; |
6019 | case SG_IMAGETYPE_3D: return MTLTextureType3D; |
6020 | case SG_IMAGETYPE_ARRAY: return MTLTextureType2DArray; |
6021 | default: SOKOL_UNREACHABLE; return (MTLTextureType)0; |
6022 | } |
6023 | } |
6024 | |
6025 | _SOKOL_PRIVATE bool _sg_mtl_is_pvrtc(sg_pixel_format fmt) { |
6026 | switch (fmt) { |
6027 | case SG_PIXELFORMAT_PVRTC2_RGB: |
6028 | case SG_PIXELFORMAT_PVRTC2_RGBA: |
6029 | case SG_PIXELFORMAT_PVRTC4_RGB: |
6030 | case SG_PIXELFORMAT_PVRTC4_RGBA: |
6031 | return true; |
6032 | default: |
6033 | return false; |
6034 | } |
6035 | } |
6036 | |
6037 | _SOKOL_PRIVATE MTLSamplerAddressMode _sg_mtl_address_mode(sg_wrap w) { |
6038 | switch (w) { |
6039 | case SG_WRAP_REPEAT: return MTLSamplerAddressModeRepeat; |
6040 | case SG_WRAP_CLAMP_TO_EDGE: return MTLSamplerAddressModeClampToEdge; |
6041 | case SG_WRAP_MIRRORED_REPEAT: return MTLSamplerAddressModeMirrorRepeat; |
6042 | default: SOKOL_UNREACHABLE; return (MTLSamplerAddressMode)0; |
6043 | } |
6044 | } |
6045 | |
6046 | _SOKOL_PRIVATE MTLSamplerMinMagFilter _sg_mtl_minmag_filter(sg_filter f) { |
6047 | switch (f) { |
6048 | case SG_FILTER_NEAREST: |
6049 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
6050 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
6051 | return MTLSamplerMinMagFilterNearest; |
6052 | case SG_FILTER_LINEAR: |
6053 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
6054 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
6055 | return MTLSamplerMinMagFilterLinear; |
6056 | default: |
6057 | SOKOL_UNREACHABLE; return (MTLSamplerMinMagFilter)0; |
6058 | } |
6059 | } |
6060 | |
6061 | _SOKOL_PRIVATE MTLSamplerMipFilter _sg_mtl_mip_filter(sg_filter f) { |
6062 | switch (f) { |
6063 | case SG_FILTER_NEAREST: |
6064 | case SG_FILTER_LINEAR: |
6065 | return MTLSamplerMipFilterNotMipmapped; |
6066 | case SG_FILTER_NEAREST_MIPMAP_NEAREST: |
6067 | case SG_FILTER_LINEAR_MIPMAP_NEAREST: |
6068 | return MTLSamplerMipFilterNearest; |
6069 | case SG_FILTER_NEAREST_MIPMAP_LINEAR: |
6070 | case SG_FILTER_LINEAR_MIPMAP_LINEAR: |
6071 | return MTLSamplerMipFilterLinear; |
6072 | default: |
6073 | SOKOL_UNREACHABLE; return (MTLSamplerMipFilter)0; |
6074 | } |
6075 | } |
6076 | |
6077 | /*-- a pool for all Metal resource objects, with deferred release queue -------*/ |
6078 | static uint32_t _sg_mtl_pool_size; |
6079 | static NSMutableArray* _sg_mtl_pool; |
6080 | static uint32_t _sg_mtl_free_queue_top; |
6081 | static uint32_t* _sg_mtl_free_queue; |
6082 | static uint32_t _sg_mtl_release_queue_front; |
6083 | static uint32_t _sg_mtl_release_queue_back; |
6084 | typedef struct { |
6085 | uint32_t frame_index; /* frame index at which it is safe to release this resource */ |
6086 | uint32_t pool_index; |
6087 | } _sg_mtl_release_item; |
6088 | static _sg_mtl_release_item* _sg_mtl_release_queue; |
6089 | |
6090 | _SOKOL_PRIVATE void _sg_mtl_init_pool(const sg_desc* desc) { |
6091 | _sg_mtl_pool_size = 2 * |
6092 | 2 * _sg_def(desc->buffer_pool_size, _SG_DEFAULT_BUFFER_POOL_SIZE) + |
6093 | 5 * _sg_def(desc->image_pool_size, _SG_DEFAULT_IMAGE_POOL_SIZE) + |
6094 | 4 * _sg_def(desc->shader_pool_size, _SG_DEFAULT_SHADER_POOL_SIZE) + |
6095 | 2 * _sg_def(desc->pipeline_pool_size, _SG_DEFAULT_PIPELINE_POOL_SIZE) + |
6096 | _sg_def(desc->pass_pool_size, _SG_DEFAULT_PASS_POOL_SIZE); |
6097 | _sg_mtl_pool = [NSMutableArray arrayWithCapacity:_sg_mtl_pool_size]; |
6098 | NSNull* null = [NSNull null]; |
6099 | for (uint32_t i = 0; i < _sg_mtl_pool_size; i++) { |
6100 | [_sg_mtl_pool addObject:null]; |
6101 | } |
6102 | SOKOL_ASSERT([_sg_mtl_pool count] == _sg_mtl_pool_size); |
6103 | /* a queue of currently free slot indices */ |
6104 | _sg_mtl_free_queue_top = 0; |
6105 | _sg_mtl_free_queue = (uint32_t*)SOKOL_MALLOC(_sg_mtl_pool_size * sizeof(uint32_t)); |
6106 | for (int i = _sg_mtl_pool_size-1; i >= 0; i--) { |
6107 | _sg_mtl_free_queue[_sg_mtl_free_queue_top++] = (uint32_t)i; |
6108 | } |
6109 | /* a circular queue which holds release items (frame index |
6110 | when a resource is to be released, and the resource's |
6111 | pool index |
6112 | */ |
6113 | _sg_mtl_release_queue_front = 0; |
6114 | _sg_mtl_release_queue_back = 0; |
6115 | _sg_mtl_release_queue = (_sg_mtl_release_item*)SOKOL_MALLOC(_sg_mtl_pool_size * sizeof(_sg_mtl_release_item)); |
6116 | for (uint32_t i = 0; i < _sg_mtl_pool_size; i++) { |
6117 | _sg_mtl_release_queue[i].frame_index = 0; |
6118 | _sg_mtl_release_queue[i].pool_index = _SG_MTL_INVALID_POOL_INDEX; |
6119 | } |
6120 | } |
6121 | |
6122 | _SOKOL_PRIVATE void _sg_mtl_destroy_pool() { |
6123 | SOKOL_FREE(_sg_mtl_release_queue); _sg_mtl_release_queue = 0; |
6124 | SOKOL_FREE(_sg_mtl_free_queue); _sg_mtl_free_queue = 0; |
6125 | _sg_mtl_pool = nil; |
6126 | } |
6127 | |
6128 | /* get a new free resource pool slot */ |
6129 | _SOKOL_PRIVATE uint32_t _sg_mtl_alloc_pool_slot() { |
6130 | SOKOL_ASSERT(_sg_mtl_free_queue_top > 0); |
6131 | const uint32_t pool_index = _sg_mtl_free_queue[--_sg_mtl_free_queue_top]; |
6132 | return pool_index; |
6133 | } |
6134 | |
6135 | /* put a free resource pool slot back into the free-queue */ |
6136 | _SOKOL_PRIVATE void _sg_mtl_free_pool_slot(uint32_t pool_index) { |
6137 | SOKOL_ASSERT(_sg_mtl_free_queue_top < _sg_mtl_pool_size); |
6138 | _sg_mtl_free_queue[_sg_mtl_free_queue_top++] = pool_index; |
6139 | } |
6140 | |
6141 | /* add an MTLResource to the pool, return pool index or 0xFFFFFFFF if input was 'nil' */ |
6142 | _SOKOL_PRIVATE uint32_t _sg_mtl_add_resource(id res) { |
6143 | if (nil == res) { |
6144 | return _SG_MTL_INVALID_POOL_INDEX; |
6145 | } |
6146 | const uint32_t pool_index = _sg_mtl_alloc_pool_slot(); |
6147 | SOKOL_ASSERT([NSNull null] == _sg_mtl_pool[pool_index]); |
6148 | _sg_mtl_pool[pool_index] = res; |
6149 | return pool_index; |
6150 | } |
6151 | |
6152 | /* mark an MTLResource for release, this will put the resource into the |
6153 | deferred-release queue, and the resource will then be released N frames later, |
6154 | the special pool index 0xFFFFFFFF will be ignored (this means that a nil |
6155 | value was provided to _sg_mtl_add_resource() |
6156 | */ |
6157 | _SOKOL_PRIVATE void _sg_mtl_release_resource(uint32_t frame_index, uint32_t pool_index) { |
6158 | if (pool_index == _SG_MTL_INVALID_POOL_INDEX) { |
6159 | return; |
6160 | } |
6161 | SOKOL_ASSERT((pool_index >= 0) && (pool_index < _sg_mtl_pool_size)); |
6162 | SOKOL_ASSERT([NSNull null] != _sg_mtl_pool[pool_index]); |
6163 | int slot_index = _sg_mtl_release_queue_front++; |
6164 | if (_sg_mtl_release_queue_front >= _sg_mtl_pool_size) { |
6165 | /* wrap-around */ |
6166 | _sg_mtl_release_queue_front = 0; |
6167 | } |
6168 | /* release queue full? */ |
6169 | SOKOL_ASSERT(_sg_mtl_release_queue_front != _sg_mtl_release_queue_back); |
6170 | SOKOL_ASSERT(0 == _sg_mtl_release_queue[slot_index].frame_index); |
6171 | const uint32_t safe_to_release_frame_index = frame_index + SG_NUM_INFLIGHT_FRAMES + 1; |
6172 | _sg_mtl_release_queue[slot_index].frame_index = safe_to_release_frame_index; |
6173 | _sg_mtl_release_queue[slot_index].pool_index = pool_index; |
6174 | } |
6175 | |
6176 | /* run garbage-collection pass on all resources in the release-queue */ |
6177 | _SOKOL_PRIVATE void _sg_mtl_garbage_collect(uint32_t frame_index) { |
6178 | while (_sg_mtl_release_queue_back != _sg_mtl_release_queue_front) { |
6179 | if (frame_index < _sg_mtl_release_queue[_sg_mtl_release_queue_back].frame_index) { |
6180 | /* don't need to check further, release-items past this are too young */ |
6181 | break; |
6182 | } |
6183 | /* safe to release this resource */ |
6184 | const uint32_t pool_index = _sg_mtl_release_queue[_sg_mtl_release_queue_back].pool_index; |
6185 | SOKOL_ASSERT(pool_index < _sg_mtl_pool_size); |
6186 | SOKOL_ASSERT(_sg_mtl_pool[pool_index] != [NSNull null]); |
6187 | _sg_mtl_pool[pool_index] = [NSNull null]; |
6188 | /* put the now free pool index back on the free queue */ |
6189 | _sg_mtl_free_pool_slot(pool_index); |
6190 | /* reset the release queue slot and advance the back index */ |
6191 | _sg_mtl_release_queue[_sg_mtl_release_queue_back].frame_index = 0; |
6192 | _sg_mtl_release_queue[_sg_mtl_release_queue_back].pool_index = _SG_MTL_INVALID_POOL_INDEX; |
6193 | _sg_mtl_release_queue_back++; |
6194 | if (_sg_mtl_release_queue_back >= _sg_mtl_pool_size) { |
6195 | /* wrap-around */ |
6196 | _sg_mtl_release_queue_back = 0; |
6197 | } |
6198 | } |
6199 | } |
6200 | |
6201 | /*-- a very simple sampler cache ----------------------------------------------- |
6202 | |
6203 | since there's only a small number of different samplers, sampler objects |
6204 | will never be deleted (except on shutdown), and searching an identical |
6205 | sampler is a simple linear search |
6206 | */ |
6207 | typedef struct { |
6208 | sg_filter min_filter; |
6209 | sg_filter mag_filter; |
6210 | sg_wrap wrap_u; |
6211 | sg_wrap wrap_v; |
6212 | sg_wrap wrap_w; |
6213 | uint32_t max_anisotropy; |
6214 | int min_lod; /* orig min/max_lod is float, this is int(min/max_lod*1000.0) */ |
6215 | int max_lod; |
6216 | uint32_t mtl_sampler_state; |
6217 | } _sg_mtl_sampler_cache_item; |
6218 | static int _sg_mtl_sampler_cache_capacity; |
6219 | static int _sg_mtl_sampler_cache_size; |
6220 | static _sg_mtl_sampler_cache_item* _sg_mtl_sampler_cache; |
6221 | |
6222 | /* initialize the sampler cache */ |
6223 | _SOKOL_PRIVATE void _sg_mtl_init_sampler_cache(const sg_desc* desc) { |
6224 | _sg_mtl_sampler_cache_capacity = _sg_def(desc->mtl_sampler_cache_size, _SG_MTL_DEFAULT_SAMPLER_CACHE_CAPACITY); |
6225 | _sg_mtl_sampler_cache_size = 0; |
6226 | const int size = _sg_mtl_sampler_cache_capacity * sizeof(_sg_mtl_sampler_cache_item); |
6227 | _sg_mtl_sampler_cache = (_sg_mtl_sampler_cache_item*)SOKOL_MALLOC(size); |
6228 | memset(_sg_mtl_sampler_cache, 0, size); |
6229 | } |
6230 | |
6231 | /* destroy the sampler cache, and release all sampler objects */ |
6232 | _SOKOL_PRIVATE void _sg_mtl_destroy_sampler_cache(uint32_t frame_index) { |
6233 | SOKOL_ASSERT(_sg_mtl_sampler_cache); |
6234 | SOKOL_ASSERT(_sg_mtl_sampler_cache_size <= _sg_mtl_sampler_cache_capacity); |
6235 | for (int i = 0; i < _sg_mtl_sampler_cache_size; i++) { |
6236 | _sg_mtl_release_resource(frame_index, _sg_mtl_sampler_cache[i].mtl_sampler_state); |
6237 | } |
6238 | SOKOL_FREE(_sg_mtl_sampler_cache); _sg_mtl_sampler_cache = 0; |
6239 | _sg_mtl_sampler_cache_size = 0; |
6240 | _sg_mtl_sampler_cache_capacity = 0; |
6241 | } |
6242 | |
6243 | /* |
6244 | create and add an MTLSamplerStateObject and return its resource pool index, |
6245 | reuse identical sampler state if one exists |
6246 | */ |
6247 | _SOKOL_PRIVATE uint32_t _sg_mtl_create_sampler(id<MTLDevice> mtl_device, const sg_image_desc* img_desc) { |
6248 | SOKOL_ASSERT(img_desc); |
6249 | SOKOL_ASSERT(_sg_mtl_sampler_cache); |
6250 | /* sampler state cache is full */ |
6251 | const sg_filter min_filter = _sg_def(img_desc->min_filter, SG_FILTER_NEAREST); |
6252 | const sg_filter mag_filter = _sg_def(img_desc->mag_filter, SG_FILTER_NEAREST); |
6253 | const sg_wrap wrap_u = _sg_def(img_desc->wrap_u, SG_WRAP_REPEAT); |
6254 | const sg_wrap wrap_v = _sg_def(img_desc->wrap_v, SG_WRAP_REPEAT); |
6255 | const sg_wrap wrap_w = _sg_def(img_desc->wrap_w, SG_WRAP_REPEAT); |
6256 | const uint32_t max_anisotropy = _sg_def(img_desc->max_anisotropy, 1); |
6257 | /* convert floats to valid int for proper comparison */ |
6258 | const int min_lod = (int)(img_desc->min_lod * 1000.0f); |
6259 | const int max_lod = (int)(_sg_def_flt(img_desc->max_lod, 1000.0f) * 1000.0f); |
6260 | /* first try to find identical sampler, number of samplers will be small, so linear search is ok */ |
6261 | for (int i = 0; i < _sg_mtl_sampler_cache_size; i++) { |
6262 | _sg_mtl_sampler_cache_item* item = &_sg_mtl_sampler_cache[i]; |
6263 | if ((min_filter == item->min_filter) && |
6264 | (mag_filter == item->mag_filter) && |
6265 | (wrap_u == item->wrap_u) && |
6266 | (wrap_v == item->wrap_v) && |
6267 | (wrap_w == item->wrap_w) && |
6268 | (max_anisotropy == item->max_anisotropy) && |
6269 | (min_lod == item->min_lod) && |
6270 | (max_lod == item->max_lod)) |
6271 | { |
6272 | return item->mtl_sampler_state; |
6273 | } |
6274 | } |
6275 | /* fallthrough: need to create a new MTLSamplerState object */ |
6276 | SOKOL_ASSERT(_sg_mtl_sampler_cache_size < _sg_mtl_sampler_cache_capacity); |
6277 | _sg_mtl_sampler_cache_item* new_item = &_sg_mtl_sampler_cache[_sg_mtl_sampler_cache_size++]; |
6278 | new_item->min_filter = min_filter; |
6279 | new_item->mag_filter = mag_filter; |
6280 | new_item->wrap_u = wrap_u; |
6281 | new_item->wrap_v = wrap_v; |
6282 | new_item->wrap_w = wrap_w; |
6283 | new_item->min_lod = min_lod; |
6284 | new_item->max_lod = max_lod; |
6285 | new_item->max_anisotropy = max_anisotropy; |
6286 | MTLSamplerDescriptor* mtl_desc = [[MTLSamplerDescriptor alloc] init]; |
6287 | mtl_desc.sAddressMode = _sg_mtl_address_mode(wrap_u); |
6288 | mtl_desc.tAddressMode = _sg_mtl_address_mode(wrap_v); |
6289 | if (SG_IMAGETYPE_3D == img_desc->type) { |
6290 | mtl_desc.rAddressMode = _sg_mtl_address_mode(wrap_w); |
6291 | } |
6292 | mtl_desc.minFilter = _sg_mtl_minmag_filter(min_filter); |
6293 | mtl_desc.magFilter = _sg_mtl_minmag_filter(mag_filter); |
6294 | mtl_desc.mipFilter = _sg_mtl_mip_filter(min_filter); |
6295 | mtl_desc.lodMinClamp = img_desc->min_lod; |
6296 | mtl_desc.lodMaxClamp = _sg_def_flt(img_desc->max_lod, FLT_MAX); |
6297 | mtl_desc.maxAnisotropy = max_anisotropy; |
6298 | mtl_desc.normalizedCoordinates = YES; |
6299 | id<MTLSamplerState> mtl_sampler = [mtl_device newSamplerStateWithDescriptor:mtl_desc]; |
6300 | new_item->mtl_sampler_state = _sg_mtl_add_resource(mtl_sampler); |
6301 | return new_item->mtl_sampler_state; |
6302 | } |
6303 | |
6304 | /*-- Metal backend resource structs ------------------------------------------*/ |
6305 | typedef struct { |
6306 | _sg_slot slot; |
6307 | int size; |
6308 | int append_pos; |
6309 | bool append_overflow; |
6310 | sg_buffer_type type; |
6311 | sg_usage usage; |
6312 | uint32_t update_frame_index; |
6313 | uint32_t append_frame_index; |
6314 | int num_slots; |
6315 | int active_slot; |
6316 | uint32_t mtl_buf[SG_NUM_INFLIGHT_FRAMES]; /* index intp _sg_mtl_pool */ |
6317 | } _sg_buffer; |
6318 | |
6319 | _SOKOL_PRIVATE void _sg_init_buffer_slot(_sg_buffer* buf) { |
6320 | SOKOL_ASSERT(buf); |
6321 | memset(buf, 0, sizeof(_sg_buffer)); |
6322 | } |
6323 | |
6324 | typedef struct { |
6325 | _sg_slot slot; |
6326 | sg_image_type type; |
6327 | bool render_target; |
6328 | int width; |
6329 | int height; |
6330 | int depth; |
6331 | int num_mipmaps; |
6332 | sg_usage usage; |
6333 | sg_pixel_format pixel_format; |
6334 | int sample_count; |
6335 | sg_filter min_filter; |
6336 | sg_filter mag_filter; |
6337 | sg_wrap wrap_u; |
6338 | sg_wrap wrap_v; |
6339 | sg_wrap wrap_w; |
6340 | uint32_t max_anisotropy; |
6341 | uint32_t upd_frame_index; |
6342 | int num_slots; |
6343 | int active_slot; |
6344 | uint32_t mtl_tex[SG_NUM_INFLIGHT_FRAMES]; |
6345 | uint32_t mtl_depth_tex; |
6346 | uint32_t mtl_msaa_tex; |
6347 | uint32_t mtl_sampler_state; |
6348 | } _sg_image; |
6349 | |
6350 | _SOKOL_PRIVATE void _sg_init_image_slot(_sg_image* img) { |
6351 | SOKOL_ASSERT(img); |
6352 | memset(img, 0, sizeof(_sg_image)); |
6353 | } |
6354 | |
6355 | typedef struct { |
6356 | int size; |
6357 | } _sg_uniform_block; |
6358 | |
6359 | typedef struct { |
6360 | sg_image_type type; |
6361 | } _sg_shader_image; |
6362 | |
6363 | typedef struct { |
6364 | int num_uniform_blocks; |
6365 | int num_images; |
6366 | _sg_uniform_block uniform_blocks[SG_MAX_SHADERSTAGE_UBS]; |
6367 | _sg_shader_image images[SG_MAX_SHADERSTAGE_IMAGES]; |
6368 | uint32_t mtl_lib; |
6369 | uint32_t mtl_func; |
6370 | } _sg_shader_stage; |
6371 | |
6372 | typedef struct { |
6373 | _sg_slot slot; |
6374 | _sg_shader_stage stage[SG_NUM_SHADER_STAGES]; |
6375 | } _sg_shader; |
6376 | |
6377 | _SOKOL_PRIVATE void _sg_init_shader_slot(_sg_shader* shd) { |
6378 | SOKOL_ASSERT(shd); |
6379 | memset(shd, 0, sizeof(_sg_shader)); |
6380 | } |
6381 | |
6382 | typedef struct { |
6383 | _sg_slot slot; |
6384 | _sg_shader* shader; |
6385 | sg_shader shader_id; |
6386 | bool vertex_layout_valid[SG_MAX_SHADERSTAGE_BUFFERS]; |
6387 | int color_attachment_count; |
6388 | sg_pixel_format color_format; |
6389 | sg_pixel_format depth_format; |
6390 | int sample_count; |
6391 | float depth_bias; |
6392 | float depth_bias_slope_scale; |
6393 | float depth_bias_clamp; |
6394 | MTLPrimitiveType mtl_prim_type; |
6395 | sg_index_type index_type; |
6396 | NSUInteger mtl_index_size; |
6397 | MTLIndexType mtl_index_type; |
6398 | MTLCullMode mtl_cull_mode; |
6399 | MTLWinding mtl_winding; |
6400 | float blend_color[4]; |
6401 | uint32_t mtl_stencil_ref; |
6402 | uint32_t mtl_rps; |
6403 | uint32_t mtl_dss; |
6404 | } _sg_pipeline; |
6405 | |
6406 | _SOKOL_PRIVATE void _sg_init_pipeline_slot(_sg_pipeline* pip) { |
6407 | SOKOL_ASSERT(pip); |
6408 | memset(pip, 0, sizeof(_sg_pipeline)); |
6409 | } |
6410 | |
6411 | typedef struct { |
6412 | _sg_image* image; |
6413 | sg_image image_id; |
6414 | int mip_level; |
6415 | int slice; |
6416 | } _sg_attachment; |
6417 | |
6418 | typedef struct { |
6419 | _sg_slot slot; |
6420 | int num_color_atts; |
6421 | _sg_attachment color_atts[SG_MAX_COLOR_ATTACHMENTS]; |
6422 | _sg_attachment ds_att; |
6423 | } _sg_pass; |
6424 | |
6425 | _SOKOL_PRIVATE void _sg_init_pass_slot(_sg_pass* pass) { |
6426 | SOKOL_ASSERT(pass); |
6427 | memset(pass, 0, sizeof(_sg_pass)); |
6428 | } |
6429 | |
6430 | typedef struct { |
6431 | _sg_slot slot; |
6432 | } _sg_context; |
6433 | |
6434 | _SOKOL_PRIVATE void _sg_init_context_slot(_sg_context* ctx) { |
6435 | SOKOL_ASSERT(ctx); |
6436 | memset(ctx, 0, sizeof(_sg_context)); |
6437 | } |
6438 | |
6439 | /*-- a simple state cache for the resource bindings --------------------------*/ |
6440 | static const _sg_pipeline* _sg_mtl_cur_pipeline; |
6441 | static sg_pipeline _sg_mtl_cur_pipeline_id; |
6442 | static const _sg_buffer* _sg_mtl_cur_indexbuffer; |
6443 | static int _sg_mtl_cur_indexbuffer_offset; |
6444 | static sg_buffer _sg_mtl_cur_indexbuffer_id; |
6445 | static const _sg_buffer* _sg_mtl_cur_vertexbuffers[SG_MAX_SHADERSTAGE_BUFFERS]; |
6446 | static int _sg_mtl_cur_vertexbuffer_offsets[SG_MAX_SHADERSTAGE_BUFFERS]; |
6447 | static sg_buffer _sg_mtl_cur_vertexbuffer_ids[SG_MAX_SHADERSTAGE_BUFFERS]; |
6448 | static const _sg_image* _sg_mtl_cur_vs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
6449 | static sg_image _sg_mtl_cur_vs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; |
6450 | static const _sg_image* _sg_mtl_cur_fs_images[SG_MAX_SHADERSTAGE_IMAGES]; |
6451 | static sg_image _sg_mtl_cur_fs_image_ids[SG_MAX_SHADERSTAGE_IMAGES]; |
6452 | |
6453 | _SOKOL_PRIVATE void _sg_mtl_clear_state_cache() { |
6454 | _sg_mtl_cur_pipeline = 0; |
6455 | _sg_mtl_cur_pipeline_id.id = SG_INVALID_ID; |
6456 | _sg_mtl_cur_indexbuffer = 0; |
6457 | _sg_mtl_cur_indexbuffer_offset = 0; |
6458 | _sg_mtl_cur_indexbuffer_id.id = SG_INVALID_ID; |
6459 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
6460 | _sg_mtl_cur_vertexbuffers[i] = 0; |
6461 | _sg_mtl_cur_vertexbuffer_offsets[i] = 0; |
6462 | _sg_mtl_cur_vertexbuffer_ids[i].id = SG_INVALID_ID; |
6463 | } |
6464 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
6465 | _sg_mtl_cur_vs_images[i] = 0; |
6466 | _sg_mtl_cur_vs_image_ids[i].id = SG_INVALID_ID; |
6467 | _sg_mtl_cur_fs_images[i] = 0; |
6468 | _sg_mtl_cur_fs_image_ids[i].id = SG_INVALID_ID; |
6469 | } |
6470 | } |
6471 | |
6472 | /*-- main Metal backend state and functions ----------------------------------*/ |
6473 | static bool _sg_mtl_valid; |
6474 | static const void*(*_sg_mtl_renderpass_descriptor_cb)(void); |
6475 | static const void*(*_sg_mtl_drawable_cb)(void); |
6476 | static id<MTLDevice> _sg_mtl_device; |
6477 | static id<MTLCommandQueue> _sg_mtl_cmd_queue; |
6478 | static id<MTLCommandBuffer> _sg_mtl_cmd_buffer; |
6479 | static id<MTLRenderCommandEncoder> _sg_mtl_cmd_encoder; |
6480 | static uint32_t _sg_mtl_frame_index; |
6481 | static uint32_t _sg_mtl_cur_frame_rotate_index; |
6482 | static uint32_t _sg_mtl_ub_size; |
6483 | static uint32_t _sg_mtl_cur_ub_offset; |
6484 | static uint8_t* _sg_mtl_cur_ub_base_ptr; |
6485 | static id<MTLBuffer> _sg_mtl_uniform_buffers[SG_NUM_INFLIGHT_FRAMES]; |
6486 | static dispatch_semaphore_t _sg_mtl_sem; |
6487 | static bool _sg_mtl_in_pass; |
6488 | static bool _sg_mtl_pass_valid; |
6489 | static int _sg_mtl_cur_width; |
6490 | static int _sg_mtl_cur_height; |
6491 | |
6492 | _SOKOL_PRIVATE void _sg_setup_backend(const sg_desc* desc) { |
6493 | SOKOL_ASSERT(desc); |
6494 | SOKOL_ASSERT(desc->mtl_device); |
6495 | SOKOL_ASSERT(desc->mtl_renderpass_descriptor_cb); |
6496 | SOKOL_ASSERT(desc->mtl_drawable_cb); |
6497 | _sg_mtl_init_pool(desc); |
6498 | _sg_mtl_init_sampler_cache(desc); |
6499 | _sg_mtl_clear_state_cache(); |
6500 | _sg_mtl_valid = true; |
6501 | _sg_mtl_renderpass_descriptor_cb = desc->mtl_renderpass_descriptor_cb; |
6502 | _sg_mtl_drawable_cb = desc->mtl_drawable_cb; |
6503 | _sg_mtl_in_pass = false; |
6504 | _sg_mtl_pass_valid = false; |
6505 | _sg_mtl_cur_width = 0; |
6506 | _sg_mtl_cur_height = 0; |
6507 | _sg_mtl_frame_index = 1; |
6508 | _sg_mtl_cur_frame_rotate_index = 0; |
6509 | _sg_mtl_cur_ub_offset = 0; |
6510 | _sg_mtl_cur_ub_base_ptr = 0; |
6511 | _sg_mtl_device = (__bridge id<MTLDevice>) desc->mtl_device; |
6512 | _sg_mtl_sem = dispatch_semaphore_create(SG_NUM_INFLIGHT_FRAMES); |
6513 | _sg_mtl_cmd_queue = [_sg_mtl_device newCommandQueue]; |
6514 | _sg_mtl_ub_size = _sg_def(desc->mtl_global_uniform_buffer_size, _SG_MTL_DEFAULT_UB_SIZE); |
6515 | MTLResourceOptions res_opts = MTLResourceCPUCacheModeWriteCombined; |
6516 | #if !TARGET_OS_IPHONE |
6517 | res_opts |= MTLResourceStorageModeManaged; |
6518 | #endif |
6519 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
6520 | _sg_mtl_uniform_buffers[i] = [_sg_mtl_device |
6521 | newBufferWithLength:_sg_mtl_ub_size |
6522 | options:res_opts |
6523 | ]; |
6524 | } |
6525 | } |
6526 | |
6527 | _SOKOL_PRIVATE void _sg_discard_backend() { |
6528 | SOKOL_ASSERT(_sg_mtl_valid); |
6529 | /* wait for the last frame to finish */ |
6530 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
6531 | dispatch_semaphore_wait(_sg_mtl_sem, DISPATCH_TIME_FOREVER); |
6532 | } |
6533 | _sg_mtl_destroy_sampler_cache(_sg_mtl_frame_index); |
6534 | _sg_mtl_garbage_collect(_sg_mtl_frame_index + SG_NUM_INFLIGHT_FRAMES + 2); |
6535 | _sg_mtl_destroy_pool(); |
6536 | _sg_mtl_valid = false; |
6537 | _sg_mtl_cmd_encoder = nil; |
6538 | _sg_mtl_cmd_buffer = nil; |
6539 | _sg_mtl_cmd_queue = nil; |
6540 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
6541 | _sg_mtl_uniform_buffers[i] = nil; |
6542 | } |
6543 | _sg_mtl_device = nil; |
6544 | } |
6545 | |
6546 | _SOKOL_PRIVATE bool _sg_query_feature(sg_feature f) { |
6547 | switch (f) { |
6548 | case SG_FEATURE_INSTANCING: |
6549 | #if !TARGET_OS_IPHONE |
6550 | case SG_FEATURE_TEXTURE_COMPRESSION_DXT: |
6551 | #else |
6552 | case SG_FEATURE_TEXTURE_COMPRESSION_PVRTC: |
6553 | case SG_FEATURE_TEXTURE_COMPRESSION_ETC2: |
6554 | #endif |
6555 | case SG_FEATURE_TEXTURE_FLOAT: |
6556 | case SG_FEATURE_ORIGIN_TOP_LEFT: |
6557 | case SG_FEATURE_MSAA_RENDER_TARGETS: |
6558 | case SG_FEATURE_PACKED_VERTEX_FORMAT_10_2: |
6559 | case SG_FEATURE_MULTIPLE_RENDER_TARGET: |
6560 | case SG_FEATURE_IMAGETYPE_3D: |
6561 | case SG_FEATURE_IMAGETYPE_ARRAY: |
6562 | return true; |
6563 | default: |
6564 | return false; |
6565 | } |
6566 | } |
6567 | |
6568 | _SOKOL_PRIVATE void _sg_reset_state_cache() { |
6569 | _sg_mtl_clear_state_cache(); |
6570 | } |
6571 | |
6572 | _SOKOL_PRIVATE void _sg_create_context(_sg_context* ctx) { |
6573 | SOKOL_ASSERT(ctx); |
6574 | SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_ALLOC); |
6575 | ctx->slot.state = SG_RESOURCESTATE_VALID; |
6576 | } |
6577 | |
6578 | _SOKOL_PRIVATE void _sg_destroy_context(_sg_context* ctx) { |
6579 | SOKOL_ASSERT(ctx); |
6580 | _sg_init_context_slot(ctx); |
6581 | } |
6582 | |
6583 | _SOKOL_PRIVATE void _sg_activate_context(_sg_context* ctx) { |
6584 | _sg_reset_state_cache(); |
6585 | } |
6586 | |
6587 | _SOKOL_PRIVATE void _sg_create_buffer(_sg_buffer* buf, const sg_buffer_desc* desc) { |
6588 | SOKOL_ASSERT(buf && desc); |
6589 | SOKOL_ASSERT(buf->slot.state == SG_RESOURCESTATE_ALLOC); |
6590 | buf->size = desc->size; |
6591 | buf->append_pos = 0; |
6592 | buf->append_overflow = false; |
6593 | buf->type = _sg_def(desc->type, SG_BUFFERTYPE_VERTEXBUFFER); |
6594 | buf->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
6595 | buf->update_frame_index = 0; |
6596 | buf->append_frame_index = 0; |
6597 | buf->num_slots = (buf->usage == SG_USAGE_IMMUTABLE) ? 1 : SG_NUM_INFLIGHT_FRAMES; |
6598 | buf->active_slot = 0; |
6599 | const bool injected = (0 != desc->mtl_buffers[0]); |
6600 | MTLResourceOptions mtl_options = _sg_mtl_buffer_resource_options(buf->usage); |
6601 | for (int slot = 0; slot < buf->num_slots; slot++) { |
6602 | id<MTLBuffer> mtl_buf; |
6603 | if (injected) { |
6604 | SOKOL_ASSERT(desc->mtl_buffers[slot]); |
6605 | mtl_buf = (__bridge id<MTLBuffer>) desc->mtl_buffers[slot]; |
6606 | } |
6607 | else { |
6608 | if (buf->usage == SG_USAGE_IMMUTABLE) { |
6609 | SOKOL_ASSERT(desc->content); |
6610 | mtl_buf = [_sg_mtl_device newBufferWithBytes:desc->content length:buf->size options:mtl_options]; |
6611 | } |
6612 | else { |
6613 | mtl_buf = [_sg_mtl_device newBufferWithLength:buf->size options:mtl_options]; |
6614 | } |
6615 | } |
6616 | buf->mtl_buf[slot] = _sg_mtl_add_resource(mtl_buf); |
6617 | } |
6618 | buf->slot.state = SG_RESOURCESTATE_VALID; |
6619 | } |
6620 | |
6621 | _SOKOL_PRIVATE void _sg_destroy_buffer(_sg_buffer* buf) { |
6622 | SOKOL_ASSERT(buf); |
6623 | if (buf->slot.state == SG_RESOURCESTATE_VALID) { |
6624 | for (int slot = 0; slot < buf->num_slots; slot++) { |
6625 | _sg_mtl_release_resource(_sg_mtl_frame_index, buf->mtl_buf[slot]); |
6626 | } |
6627 | } |
6628 | _sg_init_buffer_slot(buf); |
6629 | } |
6630 | |
6631 | _SOKOL_PRIVATE void _sg_mtl_copy_image_content(const _sg_image* img, __unsafe_unretained id<MTLTexture> mtl_tex, const sg_image_content* content) { |
6632 | const int num_faces = (img->type == SG_IMAGETYPE_CUBE) ? 6:1; |
6633 | const int num_slices = (img->type == SG_IMAGETYPE_ARRAY) ? img->depth : 1; |
6634 | for (int face_index = 0; face_index < num_faces; face_index++) { |
6635 | for (int mip_index = 0; mip_index < img->num_mipmaps; mip_index++) { |
6636 | SOKOL_ASSERT(content->subimage[face_index][mip_index].ptr); |
6637 | SOKOL_ASSERT(content->subimage[face_index][mip_index].size > 0); |
6638 | const uint8_t* data_ptr = (const uint8_t*)content->subimage[face_index][mip_index].ptr; |
6639 | const int mip_width = _sg_max(img->width >> mip_index, 1); |
6640 | const int mip_height = _sg_max(img->height >> mip_index, 1); |
6641 | /* special case PVRTC formats: bytePerRow must be 0 */ |
6642 | int bytes_per_row = 0; |
6643 | int bytes_per_slice = _sg_surface_pitch(img->pixel_format, mip_width, mip_height); |
6644 | if (!_sg_mtl_is_pvrtc(img->pixel_format)) { |
6645 | bytes_per_row = _sg_row_pitch(img->pixel_format, mip_width); |
6646 | } |
6647 | MTLRegion region; |
6648 | if (img->type == SG_IMAGETYPE_3D) { |
6649 | const int mip_depth = _sg_max(img->depth >> mip_index, 1); |
6650 | region = MTLRegionMake3D(0, 0, 0, mip_width, mip_height, mip_depth); |
6651 | /* FIXME: apparently the minimal bytes_per_image size for 3D texture |
6652 | is 4 KByte... somehow need to handle this */ |
6653 | } |
6654 | else { |
6655 | region = MTLRegionMake2D(0, 0, mip_width, mip_height); |
6656 | } |
6657 | for (int slice_index = 0; slice_index < num_slices; slice_index++) { |
6658 | const int mtl_slice_index = (img->type == SG_IMAGETYPE_CUBE) ? face_index : slice_index; |
6659 | const int slice_offset = slice_index * bytes_per_slice; |
6660 | SOKOL_ASSERT((slice_offset + bytes_per_slice) <= (int)content->subimage[face_index][mip_index].size); |
6661 | [mtl_tex replaceRegion:region |
6662 | mipmapLevel:mip_index |
6663 | slice:mtl_slice_index |
6664 | withBytes:data_ptr + slice_offset |
6665 | bytesPerRow:bytes_per_row |
6666 | bytesPerImage:bytes_per_slice]; |
6667 | } |
6668 | } |
6669 | } |
6670 | } |
6671 | |
6672 | _SOKOL_PRIVATE void _sg_create_image(_sg_image* img, const sg_image_desc* desc) { |
6673 | SOKOL_ASSERT(img && desc); |
6674 | SOKOL_ASSERT(img->slot.state == SG_RESOURCESTATE_ALLOC); |
6675 | img->type = _sg_def(desc->type, SG_IMAGETYPE_2D); |
6676 | img->render_target = desc->render_target; |
6677 | img->width = desc->width; |
6678 | img->height = desc->height; |
6679 | img->depth = _sg_def(desc->depth, 1); |
6680 | img->num_mipmaps = _sg_def(desc->num_mipmaps, 1); |
6681 | img->usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
6682 | img->pixel_format = _sg_def(desc->pixel_format, SG_PIXELFORMAT_RGBA8); |
6683 | img->sample_count = _sg_def(desc->sample_count, 1); |
6684 | img->min_filter = _sg_def(desc->min_filter, SG_FILTER_NEAREST); |
6685 | img->mag_filter = _sg_def(desc->mag_filter, SG_FILTER_NEAREST); |
6686 | img->wrap_u = _sg_def(desc->wrap_u, SG_WRAP_REPEAT); |
6687 | img->wrap_v = _sg_def(desc->wrap_v, SG_WRAP_REPEAT); |
6688 | img->wrap_w = _sg_def(desc->wrap_w, SG_WRAP_REPEAT); |
6689 | img->max_anisotropy = _sg_def(desc->max_anisotropy, 1); |
6690 | img->upd_frame_index = 0; |
6691 | img->num_slots = (img->usage == SG_USAGE_IMMUTABLE) ? 1 :SG_NUM_INFLIGHT_FRAMES; |
6692 | img->active_slot = 0; |
6693 | const bool injected = (0 != desc->mtl_textures[0]); |
6694 | |
6695 | /* first initialize all Metal resource pool slots to 'empty' */ |
6696 | for (int i = 0; i < SG_NUM_INFLIGHT_FRAMES; i++) { |
6697 | img->mtl_tex[i] = _sg_mtl_add_resource(nil); |
6698 | } |
6699 | img->mtl_sampler_state = _sg_mtl_add_resource(nil); |
6700 | img->mtl_depth_tex = _sg_mtl_add_resource(nil); |
6701 | img->mtl_msaa_tex = _sg_mtl_add_resource(nil); |
6702 | |
6703 | /* initialize a Metal texture descriptor with common attributes */ |
6704 | MTLTextureDescriptor* mtl_desc = [[MTLTextureDescriptor alloc] init]; |
6705 | mtl_desc.textureType = _sg_mtl_texture_type(img->type); |
6706 | if (img->render_target) { |
6707 | if (_sg_is_valid_rendertarget_color_format(img->pixel_format)) { |
6708 | mtl_desc.pixelFormat = _sg_mtl_rendertarget_color_format(img->pixel_format); |
6709 | } |
6710 | else { |
6711 | mtl_desc.pixelFormat = _sg_mtl_rendertarget_depth_format(img->pixel_format); |
6712 | } |
6713 | } |
6714 | else { |
6715 | mtl_desc.pixelFormat = _sg_mtl_texture_format(img->pixel_format); |
6716 | } |
6717 | if (MTLPixelFormatInvalid == mtl_desc.pixelFormat) { |
6718 | SOKOL_LOG("Unsupported texture pixel format!\n" ); |
6719 | img->slot.state = SG_RESOURCESTATE_FAILED; |
6720 | return; |
6721 | } |
6722 | mtl_desc.width = img->width; |
6723 | mtl_desc.height = img->height; |
6724 | if (SG_IMAGETYPE_3D == img->type) { |
6725 | mtl_desc.depth = img->depth; |
6726 | } |
6727 | else { |
6728 | mtl_desc.depth = 1; |
6729 | } |
6730 | mtl_desc.mipmapLevelCount = img->num_mipmaps; |
6731 | if (SG_IMAGETYPE_ARRAY == img->type) { |
6732 | mtl_desc.arrayLength = img->depth; |
6733 | } |
6734 | else { |
6735 | mtl_desc.arrayLength = 1; |
6736 | } |
6737 | if (img->render_target) { |
6738 | mtl_desc.resourceOptions = MTLResourceStorageModePrivate; |
6739 | mtl_desc.cpuCacheMode = MTLCPUCacheModeDefaultCache; |
6740 | mtl_desc.storageMode = MTLStorageModePrivate; |
6741 | mtl_desc.usage |= MTLTextureUsageRenderTarget; |
6742 | } |
6743 | |
6744 | /* special case depth-stencil-buffer? */ |
6745 | if (_sg_is_valid_rendertarget_depth_format(img->pixel_format)) { |
6746 | /* create only a depth texture */ |
6747 | SOKOL_ASSERT(img->render_target); |
6748 | SOKOL_ASSERT(img->type == SG_IMAGETYPE_2D); |
6749 | SOKOL_ASSERT(img->num_mipmaps == 1); |
6750 | SOKOL_ASSERT(!injected); |
6751 | if (img->sample_count > 1) { |
6752 | mtl_desc.textureType = MTLTextureType2DMultisample; |
6753 | mtl_desc.sampleCount = img->sample_count; |
6754 | } |
6755 | id<MTLTexture> tex = [_sg_mtl_device newTextureWithDescriptor:mtl_desc]; |
6756 | SOKOL_ASSERT(nil != tex); |
6757 | img->mtl_depth_tex = _sg_mtl_add_resource(tex); |
6758 | } |
6759 | else { |
6760 | /* create the color texture(s) */ |
6761 | for (int slot = 0; slot < img->num_slots; slot++) { |
6762 | id<MTLTexture> tex; |
6763 | if (injected) { |
6764 | SOKOL_ASSERT(desc->mtl_textures[slot]); |
6765 | tex = (__bridge id<MTLTexture>) desc->mtl_textures[slot]; |
6766 | } |
6767 | else { |
6768 | tex = [_sg_mtl_device newTextureWithDescriptor:mtl_desc]; |
6769 | if ((img->usage == SG_USAGE_IMMUTABLE) && !img->render_target) { |
6770 | _sg_mtl_copy_image_content(img, tex, &desc->content); |
6771 | } |
6772 | } |
6773 | img->mtl_tex[slot] = _sg_mtl_add_resource(tex); |
6774 | } |
6775 | |
6776 | /* if MSAA color render target, create an additional MSAA render-surface texture */ |
6777 | if (img->render_target && (img->sample_count > 1)) { |
6778 | mtl_desc.textureType = MTLTextureType2DMultisample; |
6779 | mtl_desc.depth = 1; |
6780 | mtl_desc.arrayLength = 1; |
6781 | mtl_desc.mipmapLevelCount = 1; |
6782 | mtl_desc.sampleCount = img->sample_count; |
6783 | id<MTLTexture> tex = [_sg_mtl_device newTextureWithDescriptor:mtl_desc]; |
6784 | img->mtl_msaa_tex = _sg_mtl_add_resource(tex); |
6785 | } |
6786 | |
6787 | /* create (possibly shared) sampler state */ |
6788 | img->mtl_sampler_state = _sg_mtl_create_sampler(_sg_mtl_device, desc); |
6789 | } |
6790 | img->slot.state = SG_RESOURCESTATE_VALID; |
6791 | } |
6792 | |
6793 | _SOKOL_PRIVATE void _sg_destroy_image(_sg_image* img) { |
6794 | SOKOL_ASSERT(img); |
6795 | if (img->slot.state == SG_RESOURCESTATE_VALID) { |
6796 | for (int slot = 0; slot < img->num_slots; slot++) { |
6797 | _sg_mtl_release_resource(_sg_mtl_frame_index, img->mtl_tex[slot]); |
6798 | } |
6799 | _sg_mtl_release_resource(_sg_mtl_frame_index, img->mtl_depth_tex); |
6800 | _sg_mtl_release_resource(_sg_mtl_frame_index, img->mtl_msaa_tex); |
6801 | /* NOTE: sampler state objects are shared and not released until shutdown */ |
6802 | } |
6803 | _sg_init_image_slot(img); |
6804 | } |
6805 | |
6806 | _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_compile_library(const char* src) { |
6807 | NSError* err = NULL; |
6808 | id<MTLLibrary> lib = [_sg_mtl_device |
6809 | newLibraryWithSource:[NSString stringWithUTF8String:src] |
6810 | options:nil |
6811 | error:&err |
6812 | ]; |
6813 | if (err) { |
6814 | SOKOL_LOG([err.localizedDescription UTF8String]); |
6815 | } |
6816 | return lib; |
6817 | } |
6818 | |
6819 | _SOKOL_PRIVATE id<MTLLibrary> _sg_mtl_library_from_bytecode(const uint8_t* ptr, int num_bytes) { |
6820 | NSError* err = NULL; |
6821 | dispatch_data_t lib_data = dispatch_data_create(ptr, num_bytes, NULL, DISPATCH_DATA_DESTRUCTOR_DEFAULT); |
6822 | id<MTLLibrary> lib = [_sg_mtl_device newLibraryWithData:lib_data error:&err]; |
6823 | if (err) { |
6824 | SOKOL_LOG([err.localizedDescription UTF8String]); |
6825 | } |
6826 | return lib; |
6827 | } |
6828 | |
6829 | _SOKOL_PRIVATE void _sg_create_shader(_sg_shader* shd, const sg_shader_desc* desc) { |
6830 | SOKOL_ASSERT(shd && desc); |
6831 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_ALLOC); |
6832 | |
6833 | /* uniform block sizes and image types */ |
6834 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
6835 | const sg_shader_stage_desc* stage_desc = (stage_index == SG_SHADERSTAGE_VS) ? &desc->vs : &desc->fs; |
6836 | _sg_shader_stage* stage = &shd->stage[stage_index]; |
6837 | SOKOL_ASSERT(stage->num_uniform_blocks == 0); |
6838 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
6839 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
6840 | if (0 == ub_desc->size) { |
6841 | break; |
6842 | } |
6843 | _sg_uniform_block* ub = &stage->uniform_blocks[ub_index]; |
6844 | ub->size = ub_desc->size; |
6845 | stage->num_uniform_blocks++; |
6846 | } |
6847 | SOKOL_ASSERT(stage->num_images == 0); |
6848 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
6849 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
6850 | if (img_desc->type == _SG_IMAGETYPE_DEFAULT) { |
6851 | break; |
6852 | } |
6853 | stage->images[img_index].type = img_desc->type; |
6854 | stage->num_images++; |
6855 | } |
6856 | } |
6857 | |
6858 | /* create metal libray objects and lookup entry functions */ |
6859 | id<MTLLibrary> vs_lib; |
6860 | id<MTLLibrary> fs_lib; |
6861 | id<MTLFunction> vs_func; |
6862 | id<MTLFunction> fs_func; |
6863 | const char* vs_entry = _sg_def(desc->vs.entry, "_main" ); |
6864 | const char* fs_entry = _sg_def(desc->fs.entry, "_main" ); |
6865 | if (desc->vs.byte_code && desc->fs.byte_code) { |
6866 | /* separate byte code provided */ |
6867 | vs_lib = _sg_mtl_library_from_bytecode(desc->vs.byte_code, desc->vs.byte_code_size); |
6868 | fs_lib = _sg_mtl_library_from_bytecode(desc->fs.byte_code, desc->fs.byte_code_size); |
6869 | if (nil == vs_lib || nil == fs_lib) { |
6870 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
6871 | return; |
6872 | } |
6873 | vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; |
6874 | fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; |
6875 | } |
6876 | else if (desc->vs.source && desc->fs.source) { |
6877 | /* separate sources provided */ |
6878 | vs_lib = _sg_mtl_compile_library(desc->vs.source); |
6879 | fs_lib = _sg_mtl_compile_library(desc->fs.source); |
6880 | if (nil == vs_lib || nil == fs_lib) { |
6881 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
6882 | return; |
6883 | } |
6884 | vs_func = [vs_lib newFunctionWithName:[NSString stringWithUTF8String:vs_entry]]; |
6885 | fs_func = [fs_lib newFunctionWithName:[NSString stringWithUTF8String:fs_entry]]; |
6886 | } |
6887 | else { |
6888 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
6889 | return; |
6890 | } |
6891 | if (nil == vs_func) { |
6892 | SOKOL_LOG("vertex shader entry function not found\n" ); |
6893 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
6894 | return; |
6895 | } |
6896 | if (nil == fs_func) { |
6897 | SOKOL_LOG("fragment shader entry function not found\n" ); |
6898 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
6899 | return; |
6900 | } |
6901 | /* it is legal to call _sg_mtl_add_resource with a nil value, this will return a special 0xFFFFFFFF index */ |
6902 | shd->stage[SG_SHADERSTAGE_VS].mtl_lib = _sg_mtl_add_resource(vs_lib); |
6903 | shd->stage[SG_SHADERSTAGE_FS].mtl_lib = _sg_mtl_add_resource(fs_lib); |
6904 | shd->stage[SG_SHADERSTAGE_VS].mtl_func = _sg_mtl_add_resource(vs_func); |
6905 | shd->stage[SG_SHADERSTAGE_FS].mtl_func = _sg_mtl_add_resource(fs_func); |
6906 | shd->slot.state = SG_RESOURCESTATE_VALID; |
6907 | } |
6908 | |
6909 | _SOKOL_PRIVATE void _sg_destroy_shader(_sg_shader* shd) { |
6910 | SOKOL_ASSERT(shd); |
6911 | if (shd->slot.state == SG_RESOURCESTATE_VALID) { |
6912 | /* it is valid to call _sg_mtl_release_resource with the special 0xFFFFFFFF index */ |
6913 | _sg_mtl_release_resource(_sg_mtl_frame_index, shd->stage[SG_SHADERSTAGE_VS].mtl_func); |
6914 | _sg_mtl_release_resource(_sg_mtl_frame_index, shd->stage[SG_SHADERSTAGE_VS].mtl_lib); |
6915 | _sg_mtl_release_resource(_sg_mtl_frame_index, shd->stage[SG_SHADERSTAGE_FS].mtl_func); |
6916 | _sg_mtl_release_resource(_sg_mtl_frame_index, shd->stage[SG_SHADERSTAGE_FS].mtl_lib); |
6917 | } |
6918 | _sg_init_shader_slot(shd); |
6919 | } |
6920 | |
6921 | _SOKOL_PRIVATE void _sg_create_pipeline(_sg_pipeline* pip, _sg_shader* shd, const sg_pipeline_desc* desc) { |
6922 | SOKOL_ASSERT(pip && shd && desc); |
6923 | SOKOL_ASSERT(pip->slot.state == SG_RESOURCESTATE_ALLOC); |
6924 | SOKOL_ASSERT(desc->shader.id == shd->slot.id); |
6925 | SOKOL_ASSERT(shd->slot.state == SG_RESOURCESTATE_VALID); |
6926 | |
6927 | pip->shader = shd; |
6928 | pip->shader_id = desc->shader; |
6929 | pip->color_attachment_count = _sg_def(desc->blend.color_attachment_count, 1); |
6930 | pip->color_format = _sg_def(desc->blend.color_format, SG_PIXELFORMAT_RGBA8); |
6931 | pip->depth_format = _sg_def(desc->blend.depth_format, SG_PIXELFORMAT_DEPTHSTENCIL); |
6932 | pip->sample_count = _sg_def(desc->rasterizer.sample_count, 1); |
6933 | pip->depth_bias = desc->rasterizer.depth_bias; |
6934 | pip->depth_bias_slope_scale = desc->rasterizer.depth_bias_slope_scale; |
6935 | pip->depth_bias_clamp = desc->rasterizer.depth_bias_clamp; |
6936 | sg_primitive_type prim_type = _sg_def(desc->primitive_type, SG_PRIMITIVETYPE_TRIANGLES); |
6937 | pip->mtl_prim_type = _sg_mtl_primitive_type(prim_type); |
6938 | pip->index_type = _sg_def(desc->index_type, SG_INDEXTYPE_NONE); |
6939 | pip->mtl_index_size = _sg_mtl_index_size(pip->index_type); |
6940 | if (SG_INDEXTYPE_NONE != pip->index_type) { |
6941 | pip->mtl_index_type = _sg_mtl_index_type(pip->index_type); |
6942 | } |
6943 | pip->mtl_cull_mode = _sg_mtl_cull_mode(_sg_def(desc->rasterizer.cull_mode, SG_CULLMODE_NONE)); |
6944 | pip->mtl_winding = _sg_mtl_winding(_sg_def(desc->rasterizer.face_winding, SG_FACEWINDING_CW)); |
6945 | pip->mtl_stencil_ref = desc->depth_stencil.stencil_ref; |
6946 | for (int i = 0; i < 4; i++) { |
6947 | pip->blend_color[i] = desc->blend.blend_color[i]; |
6948 | } |
6949 | |
6950 | /* create vertex-descriptor */ |
6951 | MTLVertexDescriptor* vtx_desc = [MTLVertexDescriptor vertexDescriptor]; |
6952 | int auto_offset[SG_MAX_SHADERSTAGE_BUFFERS]; |
6953 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
6954 | auto_offset[layout_index] = 0; |
6955 | } |
6956 | /* to use computed offsets, *all* attr offsets must be 0 */ |
6957 | bool use_auto_offset = true; |
6958 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
6959 | if (desc->layout.attrs[attr_index].offset != 0) { |
6960 | use_auto_offset = false; |
6961 | break; |
6962 | } |
6963 | } |
6964 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
6965 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
6966 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
6967 | break; |
6968 | } |
6969 | SOKOL_ASSERT((a_desc->buffer_index >= 0) && (a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS)); |
6970 | vtx_desc.attributes[attr_index].format = _sg_mtl_vertex_format(a_desc->format); |
6971 | vtx_desc.attributes[attr_index].offset = use_auto_offset ? auto_offset[a_desc->buffer_index] : a_desc->offset; |
6972 | vtx_desc.attributes[attr_index].bufferIndex = a_desc->buffer_index + SG_MAX_SHADERSTAGE_UBS; |
6973 | auto_offset[a_desc->buffer_index] += _sg_vertexformat_bytesize(a_desc->format); |
6974 | pip->vertex_layout_valid[a_desc->buffer_index] = true; |
6975 | } |
6976 | for (int layout_index = 0; layout_index < SG_MAX_SHADERSTAGE_BUFFERS; layout_index++) { |
6977 | if (pip->vertex_layout_valid[layout_index]) { |
6978 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[layout_index]; |
6979 | const int mtl_vb_slot = layout_index + SG_MAX_SHADERSTAGE_UBS; |
6980 | const int stride = l_desc->stride ? l_desc->stride : auto_offset[layout_index]; |
6981 | SOKOL_ASSERT(stride > 0); |
6982 | vtx_desc.layouts[mtl_vb_slot].stride = stride; |
6983 | vtx_desc.layouts[mtl_vb_slot].stepFunction = _sg_mtl_step_function(_sg_def(l_desc->step_func, SG_VERTEXSTEP_PER_VERTEX)); |
6984 | vtx_desc.layouts[mtl_vb_slot].stepRate = _sg_def(l_desc->step_rate, 1); |
6985 | } |
6986 | } |
6987 | |
6988 | /* render-pipeline descriptor */ |
6989 | MTLRenderPipelineDescriptor* rp_desc = [[MTLRenderPipelineDescriptor alloc] init]; |
6990 | rp_desc.vertexDescriptor = vtx_desc; |
6991 | SOKOL_ASSERT(shd->stage[SG_SHADERSTAGE_VS].mtl_func != _SG_MTL_INVALID_POOL_INDEX); |
6992 | rp_desc.vertexFunction = _sg_mtl_pool[shd->stage[SG_SHADERSTAGE_VS].mtl_func]; |
6993 | SOKOL_ASSERT(shd->stage[SG_SHADERSTAGE_FS].mtl_func != _SG_MTL_INVALID_POOL_INDEX); |
6994 | rp_desc.fragmentFunction = _sg_mtl_pool[shd->stage[SG_SHADERSTAGE_FS].mtl_func]; |
6995 | rp_desc.sampleCount = _sg_def(desc->rasterizer.sample_count, 1); |
6996 | rp_desc.alphaToCoverageEnabled = desc->rasterizer.alpha_to_coverage_enabled; |
6997 | rp_desc.alphaToOneEnabled = NO; |
6998 | rp_desc.rasterizationEnabled = YES; |
6999 | rp_desc.depthAttachmentPixelFormat = _sg_mtl_rendertarget_depth_format(_sg_def(desc->blend.depth_format, SG_PIXELFORMAT_DEPTHSTENCIL)); |
7000 | rp_desc.stencilAttachmentPixelFormat = _sg_mtl_rendertarget_stencil_format(_sg_def(desc->blend.depth_format, SG_PIXELFORMAT_DEPTHSTENCIL)); |
7001 | /* FIXME: this only works on macOS 10.13! |
7002 | for (int i = 0; i < (SG_MAX_SHADERSTAGE_UBS+SG_MAX_SHADERSTAGE_BUFFERS); i++) { |
7003 | rp_desc.vertexBuffers[i].mutability = MTLMutabilityImmutable; |
7004 | } |
7005 | for (int i = 0; i < SG_MAX_SHADERSTAGE_UBS; i++) { |
7006 | rp_desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable; |
7007 | } |
7008 | */ |
7009 | const int att_count = _sg_def(desc->blend.color_attachment_count, 1); |
7010 | for (int i = 0; i < att_count; i++) { |
7011 | rp_desc.colorAttachments[i].pixelFormat = _sg_mtl_rendertarget_color_format(_sg_def(desc->blend.color_format, SG_PIXELFORMAT_RGBA8)); |
7012 | rp_desc.colorAttachments[i].writeMask = _sg_mtl_color_write_mask((sg_color_mask)_sg_def(desc->blend.color_write_mask, SG_COLORMASK_RGBA)); |
7013 | rp_desc.colorAttachments[i].blendingEnabled = desc->blend.enabled; |
7014 | rp_desc.colorAttachments[i].alphaBlendOperation = _sg_mtl_blend_op(_sg_def(desc->blend.op_alpha, SG_BLENDOP_ADD)); |
7015 | rp_desc.colorAttachments[i].rgbBlendOperation = _sg_mtl_blend_op(_sg_def(desc->blend.op_rgb, SG_BLENDOP_ADD)); |
7016 | rp_desc.colorAttachments[i].destinationAlphaBlendFactor = _sg_mtl_blend_factor(_sg_def(desc->blend.dst_factor_alpha, SG_BLENDFACTOR_ZERO)); |
7017 | rp_desc.colorAttachments[i].destinationRGBBlendFactor = _sg_mtl_blend_factor(_sg_def(desc->blend.dst_factor_rgb, SG_BLENDFACTOR_ZERO)); |
7018 | rp_desc.colorAttachments[i].sourceAlphaBlendFactor = _sg_mtl_blend_factor(_sg_def(desc->blend.src_factor_alpha, SG_BLENDFACTOR_ONE)); |
7019 | rp_desc.colorAttachments[i].sourceRGBBlendFactor = _sg_mtl_blend_factor(_sg_def(desc->blend.src_factor_rgb, SG_BLENDFACTOR_ONE)); |
7020 | } |
7021 | NSError* err = NULL; |
7022 | id<MTLRenderPipelineState> mtl_rps = [_sg_mtl_device newRenderPipelineStateWithDescriptor:rp_desc error:&err]; |
7023 | if (nil == mtl_rps) { |
7024 | SOKOL_ASSERT(err); |
7025 | SOKOL_LOG([err.localizedDescription UTF8String]); |
7026 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
7027 | return; |
7028 | } |
7029 | |
7030 | /* depth-stencil-state */ |
7031 | MTLDepthStencilDescriptor* ds_desc = [[MTLDepthStencilDescriptor alloc] init]; |
7032 | ds_desc.depthCompareFunction = _sg_mtl_compare_func(_sg_def(desc->depth_stencil.depth_compare_func, SG_COMPAREFUNC_ALWAYS)); |
7033 | ds_desc.depthWriteEnabled = desc->depth_stencil.depth_write_enabled; |
7034 | if (desc->depth_stencil.stencil_enabled) { |
7035 | const sg_stencil_state* sb = &desc->depth_stencil.stencil_back; |
7036 | ds_desc.backFaceStencil = [[MTLStencilDescriptor alloc] init]; |
7037 | ds_desc.backFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(_sg_def(sb->fail_op, SG_STENCILOP_KEEP)); |
7038 | ds_desc.backFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(_sg_def(sb->depth_fail_op, SG_STENCILOP_KEEP)); |
7039 | ds_desc.backFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(_sg_def(sb->pass_op, SG_STENCILOP_KEEP)); |
7040 | ds_desc.backFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(_sg_def(sb->compare_func, SG_COMPAREFUNC_ALWAYS)); |
7041 | ds_desc.backFaceStencil.readMask = desc->depth_stencil.stencil_read_mask; |
7042 | ds_desc.backFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask; |
7043 | const sg_stencil_state* sf = &desc->depth_stencil.stencil_front; |
7044 | ds_desc.frontFaceStencil = [[MTLStencilDescriptor alloc] init]; |
7045 | ds_desc.frontFaceStencil.stencilFailureOperation = _sg_mtl_stencil_op(_sg_def(sf->fail_op, SG_STENCILOP_KEEP)); |
7046 | ds_desc.frontFaceStencil.depthFailureOperation = _sg_mtl_stencil_op(_sg_def(sf->depth_fail_op, SG_STENCILOP_KEEP)); |
7047 | ds_desc.frontFaceStencil.depthStencilPassOperation = _sg_mtl_stencil_op(_sg_def(sf->pass_op, SG_STENCILOP_KEEP)); |
7048 | ds_desc.frontFaceStencil.stencilCompareFunction = _sg_mtl_compare_func(_sg_def(sf->compare_func, SG_COMPAREFUNC_ALWAYS)); |
7049 | ds_desc.frontFaceStencil.readMask = desc->depth_stencil.stencil_read_mask; |
7050 | ds_desc.frontFaceStencil.writeMask = desc->depth_stencil.stencil_write_mask; |
7051 | } |
7052 | id<MTLDepthStencilState> mtl_dss = [_sg_mtl_device newDepthStencilStateWithDescriptor:ds_desc]; |
7053 | |
7054 | pip->mtl_rps = _sg_mtl_add_resource(mtl_rps); |
7055 | pip->mtl_dss = _sg_mtl_add_resource(mtl_dss); |
7056 | pip->slot.state = SG_RESOURCESTATE_VALID; |
7057 | } |
7058 | |
7059 | _SOKOL_PRIVATE void _sg_destroy_pipeline(_sg_pipeline* pip) { |
7060 | SOKOL_ASSERT(pip); |
7061 | if (pip->slot.state == SG_RESOURCESTATE_VALID) { |
7062 | _sg_mtl_release_resource(_sg_mtl_frame_index, pip->mtl_rps); |
7063 | _sg_mtl_release_resource(_sg_mtl_frame_index, pip->mtl_dss); |
7064 | } |
7065 | _sg_init_pipeline_slot(pip); |
7066 | } |
7067 | |
7068 | _SOKOL_PRIVATE void _sg_create_pass(_sg_pass* pass, _sg_image** att_images, const sg_pass_desc* desc) { |
7069 | SOKOL_ASSERT(pass && desc); |
7070 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_ALLOC); |
7071 | SOKOL_ASSERT(att_images && att_images[0]); |
7072 | |
7073 | /* copy image pointers and desc attributes */ |
7074 | const sg_attachment_desc* att_desc; |
7075 | _sg_attachment* att; |
7076 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7077 | SOKOL_ASSERT(0 == pass->color_atts[i].image); |
7078 | att_desc = &desc->color_attachments[i]; |
7079 | if (att_desc->image.id != SG_INVALID_ID) { |
7080 | pass->num_color_atts++; |
7081 | SOKOL_ASSERT(att_images[i] && (att_images[i]->slot.id == att_desc->image.id)); |
7082 | SOKOL_ASSERT(_sg_is_valid_rendertarget_color_format(att_images[i]->pixel_format)); |
7083 | att = &pass->color_atts[i]; |
7084 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
7085 | att->image = att_images[i]; |
7086 | att->image_id = att_desc->image; |
7087 | att->mip_level = att_desc->mip_level; |
7088 | att->slice = att_desc->slice; |
7089 | } |
7090 | } |
7091 | SOKOL_ASSERT(0 == pass->ds_att.image); |
7092 | att_desc = &desc->depth_stencil_attachment; |
7093 | const int ds_img_index = SG_MAX_COLOR_ATTACHMENTS; |
7094 | if (att_desc->image.id != SG_INVALID_ID) { |
7095 | SOKOL_ASSERT(att_images[ds_img_index] && (att_images[ds_img_index]->slot.id == att_desc->image.id)); |
7096 | SOKOL_ASSERT(_sg_is_valid_rendertarget_depth_format(att_images[ds_img_index]->pixel_format)); |
7097 | att = &pass->ds_att; |
7098 | SOKOL_ASSERT((att->image == 0) && (att->image_id.id == SG_INVALID_ID)); |
7099 | att->image = att_images[ds_img_index]; |
7100 | att->image_id = att_desc->image; |
7101 | att->mip_level = att_desc->mip_level; |
7102 | att->slice = att_desc->slice; |
7103 | } |
7104 | pass->slot.state = SG_RESOURCESTATE_VALID; |
7105 | } |
7106 | |
7107 | _SOKOL_PRIVATE void _sg_destroy_pass(_sg_pass* pass) { |
7108 | SOKOL_ASSERT(pass); |
7109 | _sg_init_pass_slot(pass); |
7110 | } |
7111 | |
7112 | _SOKOL_PRIVATE void _sg_begin_pass(_sg_pass* pass, const sg_pass_action* action, int w, int h) { |
7113 | SOKOL_ASSERT(action); |
7114 | SOKOL_ASSERT(!_sg_mtl_in_pass); |
7115 | SOKOL_ASSERT(_sg_mtl_cmd_queue); |
7116 | SOKOL_ASSERT(!_sg_mtl_cmd_encoder); |
7117 | SOKOL_ASSERT(_sg_mtl_renderpass_descriptor_cb); |
7118 | _sg_mtl_in_pass = true; |
7119 | _sg_mtl_cur_width = w; |
7120 | _sg_mtl_cur_height = h; |
7121 | _sg_mtl_clear_state_cache(); |
7122 | |
7123 | /* if this is the first pass in the frame, create a command buffer */ |
7124 | if (nil == _sg_mtl_cmd_buffer) { |
7125 | /* block until the oldest frame in flight has finished */ |
7126 | dispatch_semaphore_wait(_sg_mtl_sem, DISPATCH_TIME_FOREVER); |
7127 | _sg_mtl_cmd_buffer = [_sg_mtl_cmd_queue commandBufferWithUnretainedReferences]; |
7128 | } |
7129 | |
7130 | /* if this is first pass in frame, get uniform buffer base pointer */ |
7131 | if (0 == _sg_mtl_cur_ub_base_ptr) { |
7132 | _sg_mtl_cur_ub_base_ptr = (uint8_t*)[_sg_mtl_uniform_buffers[_sg_mtl_cur_frame_rotate_index] contents]; |
7133 | } |
7134 | |
7135 | /* initialize a render pass descriptor */ |
7136 | MTLRenderPassDescriptor* pass_desc = nil; |
7137 | if (pass) { |
7138 | /* offscreen render pass */ |
7139 | pass_desc = [MTLRenderPassDescriptor renderPassDescriptor]; |
7140 | } |
7141 | else { |
7142 | /* default render pass, call user-provided callback to provide render pass descriptor */ |
7143 | pass_desc = (__bridge MTLRenderPassDescriptor*) _sg_mtl_renderpass_descriptor_cb(); |
7144 | |
7145 | } |
7146 | if (pass_desc) { |
7147 | _sg_mtl_pass_valid = true; |
7148 | } |
7149 | else { |
7150 | /* default pass descriptor will not be valid if window is minimized, |
7151 | don't do any rendering in this case */ |
7152 | _sg_mtl_pass_valid = false; |
7153 | return; |
7154 | } |
7155 | if (pass) { |
7156 | /* setup pass descriptor for offscreen rendering */ |
7157 | SOKOL_ASSERT(pass->slot.state == SG_RESOURCESTATE_VALID); |
7158 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
7159 | const _sg_attachment* att = &pass->color_atts[i]; |
7160 | if (0 == att->image) { |
7161 | break; |
7162 | } |
7163 | SOKOL_ASSERT(att->image->slot.state == SG_RESOURCESTATE_VALID); |
7164 | SOKOL_ASSERT(att->image->slot.id == att->image_id.id); |
7165 | const bool is_msaa = (att->image->sample_count > 1); |
7166 | pass_desc.colorAttachments[i].loadAction = _sg_mtl_load_action(action->colors[i].action); |
7167 | pass_desc.colorAttachments[i].storeAction = is_msaa ? MTLStoreActionMultisampleResolve : MTLStoreActionStore; |
7168 | const float* c = &(action->colors[i].val[0]); |
7169 | pass_desc.colorAttachments[i].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]); |
7170 | if (is_msaa) { |
7171 | SOKOL_ASSERT(att->image->mtl_msaa_tex != _SG_MTL_INVALID_POOL_INDEX); |
7172 | SOKOL_ASSERT(att->image->mtl_tex[att->image->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7173 | pass_desc.colorAttachments[i].texture = _sg_mtl_pool[att->image->mtl_msaa_tex]; |
7174 | pass_desc.colorAttachments[i].resolveTexture = _sg_mtl_pool[att->image->mtl_tex[att->image->active_slot]]; |
7175 | pass_desc.colorAttachments[i].resolveLevel = att->mip_level; |
7176 | switch (att->image->type) { |
7177 | case SG_IMAGETYPE_CUBE: |
7178 | case SG_IMAGETYPE_ARRAY: |
7179 | pass_desc.colorAttachments[i].resolveSlice = att->slice; |
7180 | break; |
7181 | case SG_IMAGETYPE_3D: |
7182 | pass_desc.colorAttachments[i].resolveDepthPlane = att->slice; |
7183 | break; |
7184 | default: break; |
7185 | } |
7186 | } |
7187 | else { |
7188 | SOKOL_ASSERT(att->image->mtl_tex[att->image->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7189 | pass_desc.colorAttachments[i].texture = _sg_mtl_pool[att->image->mtl_tex[att->image->active_slot]]; |
7190 | pass_desc.colorAttachments[i].level = att->mip_level; |
7191 | switch (att->image->type) { |
7192 | case SG_IMAGETYPE_CUBE: |
7193 | case SG_IMAGETYPE_ARRAY: |
7194 | pass_desc.colorAttachments[i].slice = att->slice; |
7195 | break; |
7196 | case SG_IMAGETYPE_3D: |
7197 | pass_desc.colorAttachments[i].depthPlane = att->slice; |
7198 | break; |
7199 | default: break; |
7200 | } |
7201 | } |
7202 | } |
7203 | if (0 != pass->ds_att.image) { |
7204 | const _sg_attachment* att = &pass->ds_att; |
7205 | SOKOL_ASSERT(att->image->slot.state == SG_RESOURCESTATE_VALID); |
7206 | SOKOL_ASSERT(att->image->slot.id == att->image_id.id); |
7207 | SOKOL_ASSERT(att->image->mtl_depth_tex != _SG_MTL_INVALID_POOL_INDEX); |
7208 | pass_desc.depthAttachment.texture = _sg_mtl_pool[att->image->mtl_depth_tex]; |
7209 | pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); |
7210 | pass_desc.depthAttachment.clearDepth = action->depth.val; |
7211 | if (_sg_is_depth_stencil_format(att->image->pixel_format)) { |
7212 | pass_desc.stencilAttachment.texture = _sg_mtl_pool[att->image->mtl_depth_tex]; |
7213 | pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); |
7214 | pass_desc.stencilAttachment.clearStencil = action->stencil.val; |
7215 | } |
7216 | } |
7217 | } |
7218 | else { |
7219 | /* setup pass descriptor for default rendering */ |
7220 | pass_desc.colorAttachments[0].loadAction = _sg_mtl_load_action(action->colors[0].action); |
7221 | const float* c = &(action->colors[0].val[0]); |
7222 | pass_desc.colorAttachments[0].clearColor = MTLClearColorMake(c[0], c[1], c[2], c[3]); |
7223 | pass_desc.depthAttachment.loadAction = _sg_mtl_load_action(action->depth.action); |
7224 | pass_desc.depthAttachment.clearDepth = action->depth.val; |
7225 | pass_desc.stencilAttachment.loadAction = _sg_mtl_load_action(action->stencil.action); |
7226 | pass_desc.stencilAttachment.clearStencil = action->stencil.val; |
7227 | } |
7228 | |
7229 | /* create a render command encoder, this might return nil if window is minimized */ |
7230 | _sg_mtl_cmd_encoder = [_sg_mtl_cmd_buffer renderCommandEncoderWithDescriptor:pass_desc]; |
7231 | if (_sg_mtl_cmd_encoder == nil) { |
7232 | _sg_mtl_pass_valid = false; |
7233 | return; |
7234 | } |
7235 | |
7236 | /* bind the global uniform buffer, this only happens once per pass */ |
7237 | for (int slot = 0; slot < SG_MAX_SHADERSTAGE_UBS; slot++) { |
7238 | [_sg_mtl_cmd_encoder |
7239 | setVertexBuffer:_sg_mtl_uniform_buffers[_sg_mtl_cur_frame_rotate_index] |
7240 | offset:0 |
7241 | atIndex:slot]; |
7242 | [_sg_mtl_cmd_encoder |
7243 | setFragmentBuffer:_sg_mtl_uniform_buffers[_sg_mtl_cur_frame_rotate_index] |
7244 | offset:0 |
7245 | atIndex:slot]; |
7246 | } |
7247 | } |
7248 | |
7249 | _SOKOL_PRIVATE void _sg_end_pass() { |
7250 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7251 | _sg_mtl_in_pass = false; |
7252 | _sg_mtl_pass_valid = false; |
7253 | if (nil != _sg_mtl_cmd_encoder) { |
7254 | [_sg_mtl_cmd_encoder endEncoding]; |
7255 | _sg_mtl_cmd_encoder = nil; |
7256 | } |
7257 | } |
7258 | |
7259 | _SOKOL_PRIVATE void _sg_commit() { |
7260 | SOKOL_ASSERT(!_sg_mtl_in_pass); |
7261 | SOKOL_ASSERT(!_sg_mtl_pass_valid); |
7262 | SOKOL_ASSERT(_sg_mtl_drawable_cb); |
7263 | SOKOL_ASSERT(nil == _sg_mtl_cmd_encoder); |
7264 | SOKOL_ASSERT(nil != _sg_mtl_cmd_buffer); |
7265 | |
7266 | #if !TARGET_OS_IPHONE |
7267 | [_sg_mtl_uniform_buffers[_sg_mtl_cur_frame_rotate_index] didModifyRange:NSMakeRange(0, _sg_mtl_cur_ub_offset)]; |
7268 | #endif |
7269 | |
7270 | /* present, commit and signal semaphore when done */ |
7271 | id<MTLDrawable> cur_drawable = (__bridge id<MTLDrawable>) _sg_mtl_drawable_cb(); |
7272 | [_sg_mtl_cmd_buffer presentDrawable:cur_drawable]; |
7273 | __block dispatch_semaphore_t sem = _sg_mtl_sem; |
7274 | [_sg_mtl_cmd_buffer addCompletedHandler:^(id<MTLCommandBuffer> cmd_buffer) { |
7275 | dispatch_semaphore_signal(sem); |
7276 | }]; |
7277 | [_sg_mtl_cmd_buffer commit]; |
7278 | |
7279 | /* garbage-collect resources pending for release */ |
7280 | _sg_mtl_garbage_collect(_sg_mtl_frame_index); |
7281 | |
7282 | /* rotate uniform buffer slot */ |
7283 | if (++_sg_mtl_cur_frame_rotate_index >= SG_NUM_INFLIGHT_FRAMES) { |
7284 | _sg_mtl_cur_frame_rotate_index = 0; |
7285 | } |
7286 | _sg_mtl_frame_index++; |
7287 | _sg_mtl_cur_ub_offset = 0; |
7288 | _sg_mtl_cur_ub_base_ptr = 0; |
7289 | _sg_mtl_cmd_buffer = nil; |
7290 | } |
7291 | |
7292 | _SOKOL_PRIVATE void _sg_apply_viewport(int x, int y, int w, int h, bool origin_top_left) { |
7293 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7294 | if (!_sg_mtl_pass_valid) { |
7295 | return; |
7296 | } |
7297 | SOKOL_ASSERT(_sg_mtl_cmd_encoder); |
7298 | MTLViewport vp; |
7299 | vp.originX = (double) x; |
7300 | vp.originY = (double) (origin_top_left ? y : (_sg_mtl_cur_height - (y + h))); |
7301 | vp.width = (double) w; |
7302 | vp.height = (double) h; |
7303 | vp.znear = 0.0; |
7304 | vp.zfar = 1.0; |
7305 | [_sg_mtl_cmd_encoder setViewport:vp]; |
7306 | } |
7307 | |
7308 | _SOKOL_PRIVATE void _sg_apply_scissor_rect(int x, int y, int w, int h, bool origin_top_left) { |
7309 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7310 | if (!_sg_mtl_pass_valid) { |
7311 | return; |
7312 | } |
7313 | SOKOL_ASSERT(_sg_mtl_cmd_encoder); |
7314 | /* clip against framebuffer rect */ |
7315 | x = _sg_min(_sg_max(0, x), _sg_mtl_cur_width-1); |
7316 | y = _sg_min(_sg_max(0, y), _sg_mtl_cur_height-1); |
7317 | if ((x + w) > _sg_mtl_cur_width) { |
7318 | w = _sg_mtl_cur_width - x; |
7319 | } |
7320 | if ((y + h) > _sg_mtl_cur_height) { |
7321 | h = _sg_mtl_cur_height - y; |
7322 | } |
7323 | w = _sg_max(w, 1); |
7324 | h = _sg_max(h, 1); |
7325 | |
7326 | MTLScissorRect r; |
7327 | r.x = x; |
7328 | r.y = origin_top_left ? y : (_sg_mtl_cur_height - (y + h)); |
7329 | r.width = w; |
7330 | r.height = h; |
7331 | [_sg_mtl_cmd_encoder setScissorRect:r]; |
7332 | } |
7333 | |
7334 | _SOKOL_PRIVATE void _sg_apply_draw_state( |
7335 | _sg_pipeline* pip, |
7336 | _sg_buffer** vbs, const int* vb_offsets, int num_vbs, |
7337 | _sg_buffer* ib, int ib_offset, |
7338 | _sg_image** vs_imgs, int num_vs_imgs, |
7339 | _sg_image** fs_imgs, int num_fs_imgs) |
7340 | { |
7341 | SOKOL_ASSERT(pip); |
7342 | SOKOL_ASSERT(pip->shader); |
7343 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7344 | if (!_sg_mtl_pass_valid) { |
7345 | return; |
7346 | } |
7347 | SOKOL_ASSERT(_sg_mtl_cmd_encoder); |
7348 | |
7349 | /* store index buffer binding, this will be needed later in sg_draw() */ |
7350 | _sg_mtl_cur_indexbuffer = ib; |
7351 | _sg_mtl_cur_indexbuffer_offset = ib_offset; |
7352 | if (ib) { |
7353 | SOKOL_ASSERT(pip->index_type != SG_INDEXTYPE_NONE); |
7354 | _sg_mtl_cur_indexbuffer_id.id = ib->slot.id; |
7355 | } |
7356 | else { |
7357 | SOKOL_ASSERT(pip->index_type == SG_INDEXTYPE_NONE); |
7358 | _sg_mtl_cur_indexbuffer_id.id = SG_INVALID_ID; |
7359 | } |
7360 | |
7361 | /* apply pipeline state */ |
7362 | if ((_sg_mtl_cur_pipeline != pip) || (_sg_mtl_cur_pipeline_id.id != pip->slot.id)) { |
7363 | _sg_mtl_cur_pipeline = pip; |
7364 | _sg_mtl_cur_pipeline_id.id = pip->slot.id; |
7365 | const float* c = pip->blend_color; |
7366 | /* FIXME: those should be filtered through a simple state cache */ |
7367 | [_sg_mtl_cmd_encoder setBlendColorRed:c[0] green:c[1] blue:c[2] alpha:c[3]]; |
7368 | [_sg_mtl_cmd_encoder setCullMode:pip->mtl_cull_mode]; |
7369 | [_sg_mtl_cmd_encoder setFrontFacingWinding:pip->mtl_winding]; |
7370 | [_sg_mtl_cmd_encoder setStencilReferenceValue:pip->mtl_stencil_ref]; |
7371 | [_sg_mtl_cmd_encoder setDepthBias:pip->depth_bias slopeScale:pip->depth_bias_slope_scale clamp:pip->depth_bias_clamp]; |
7372 | SOKOL_ASSERT(pip->mtl_rps != _SG_MTL_INVALID_POOL_INDEX); |
7373 | [_sg_mtl_cmd_encoder setRenderPipelineState:_sg_mtl_pool[pip->mtl_rps]]; |
7374 | SOKOL_ASSERT(pip->mtl_dss != _SG_MTL_INVALID_POOL_INDEX); |
7375 | [_sg_mtl_cmd_encoder setDepthStencilState:_sg_mtl_pool[pip->mtl_dss]]; |
7376 | } |
7377 | |
7378 | /* apply vertex buffers */ |
7379 | int slot; |
7380 | for (slot = 0; slot < num_vbs; slot++) { |
7381 | const _sg_buffer* vb = vbs[slot]; |
7382 | if ((_sg_mtl_cur_vertexbuffers[slot] != vb) || |
7383 | (_sg_mtl_cur_vertexbuffer_offsets[slot] != vb_offsets[slot]) || |
7384 | (_sg_mtl_cur_vertexbuffer_ids[slot].id != vb->slot.id)) |
7385 | { |
7386 | _sg_mtl_cur_vertexbuffers[slot] = vb; |
7387 | _sg_mtl_cur_vertexbuffer_offsets[slot] = vb_offsets[slot]; |
7388 | _sg_mtl_cur_vertexbuffer_ids[slot].id = vb->slot.id; |
7389 | const NSUInteger mtl_slot = SG_MAX_SHADERSTAGE_UBS + slot; |
7390 | SOKOL_ASSERT(vb->mtl_buf[vb->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7391 | [_sg_mtl_cmd_encoder setVertexBuffer:_sg_mtl_pool[vb->mtl_buf[vb->active_slot]] |
7392 | offset:vb_offsets[slot] |
7393 | atIndex:mtl_slot]; |
7394 | } |
7395 | } |
7396 | |
7397 | /* apply vertex shader images */ |
7398 | for (slot = 0; slot < num_vs_imgs; slot++) { |
7399 | const _sg_image* img = vs_imgs[slot]; |
7400 | if ((_sg_mtl_cur_vs_images[slot] != img) || (_sg_mtl_cur_vs_image_ids[slot].id != img->slot.id)) { |
7401 | _sg_mtl_cur_vs_images[slot] = img; |
7402 | _sg_mtl_cur_vs_image_ids[slot].id = img->slot.id; |
7403 | SOKOL_ASSERT(img->mtl_tex[img->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7404 | [_sg_mtl_cmd_encoder setVertexTexture:_sg_mtl_pool[img->mtl_tex[img->active_slot]] atIndex:slot]; |
7405 | SOKOL_ASSERT(img->mtl_sampler_state != _SG_MTL_INVALID_POOL_INDEX); |
7406 | [_sg_mtl_cmd_encoder setVertexSamplerState:_sg_mtl_pool[img->mtl_sampler_state] atIndex:slot]; |
7407 | } |
7408 | } |
7409 | |
7410 | /* apply fragment shader images */ |
7411 | for (slot = 0; slot < num_fs_imgs; slot++) { |
7412 | const _sg_image* img = fs_imgs[slot]; |
7413 | if ((_sg_mtl_cur_fs_images[slot] != img) || (_sg_mtl_cur_fs_image_ids[slot].id != img->slot.id)) { |
7414 | _sg_mtl_cur_fs_images[slot] = img; |
7415 | _sg_mtl_cur_fs_image_ids[slot].id = img->slot.id; |
7416 | SOKOL_ASSERT(img->mtl_tex[img->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7417 | [_sg_mtl_cmd_encoder setFragmentTexture:_sg_mtl_pool[img->mtl_tex[img->active_slot]] atIndex:slot]; |
7418 | SOKOL_ASSERT(img->mtl_sampler_state != _SG_MTL_INVALID_POOL_INDEX); |
7419 | [_sg_mtl_cmd_encoder setFragmentSamplerState:_sg_mtl_pool[img->mtl_sampler_state] atIndex:slot]; |
7420 | } |
7421 | } |
7422 | } |
7423 | |
7424 | #define _sg_mtl_roundup(val, round_to) (((val)+((round_to)-1))&~((round_to)-1)) |
7425 | |
7426 | _SOKOL_PRIVATE void _sg_apply_uniform_block(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { |
7427 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7428 | if (!_sg_mtl_pass_valid) { |
7429 | return; |
7430 | } |
7431 | SOKOL_ASSERT(_sg_mtl_cmd_encoder); |
7432 | SOKOL_ASSERT(data && (num_bytes > 0)); |
7433 | SOKOL_ASSERT((stage_index >= 0) && ((int)stage_index < SG_NUM_SHADER_STAGES)); |
7434 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
7435 | SOKOL_ASSERT((_sg_mtl_cur_ub_offset + num_bytes) <= _sg_mtl_ub_size); |
7436 | SOKOL_ASSERT((_sg_mtl_cur_ub_offset & (_SG_MTL_UB_ALIGN-1)) == 0); |
7437 | SOKOL_ASSERT(_sg_mtl_cur_pipeline && _sg_mtl_cur_pipeline->shader); |
7438 | SOKOL_ASSERT(_sg_mtl_cur_pipeline->slot.id == _sg_mtl_cur_pipeline_id.id); |
7439 | SOKOL_ASSERT(_sg_mtl_cur_pipeline->shader->slot.id == _sg_mtl_cur_pipeline->shader_id.id); |
7440 | SOKOL_ASSERT(ub_index < _sg_mtl_cur_pipeline->shader->stage[stage_index].num_uniform_blocks); |
7441 | SOKOL_ASSERT(num_bytes <= _sg_mtl_cur_pipeline->shader->stage[stage_index].uniform_blocks[ub_index].size); |
7442 | |
7443 | /* copy to global uniform buffer, record offset into cmd encoder, and advance offset */ |
7444 | uint8_t* dst = &_sg_mtl_cur_ub_base_ptr[_sg_mtl_cur_ub_offset]; |
7445 | memcpy(dst, data, num_bytes); |
7446 | if (stage_index == SG_SHADERSTAGE_VS) { |
7447 | [_sg_mtl_cmd_encoder setVertexBufferOffset:_sg_mtl_cur_ub_offset atIndex:ub_index]; |
7448 | } |
7449 | else { |
7450 | [_sg_mtl_cmd_encoder setFragmentBufferOffset:_sg_mtl_cur_ub_offset atIndex:ub_index]; |
7451 | } |
7452 | _sg_mtl_cur_ub_offset = _sg_mtl_roundup(_sg_mtl_cur_ub_offset + num_bytes, _SG_MTL_UB_ALIGN); |
7453 | } |
7454 | |
7455 | _SOKOL_PRIVATE void _sg_draw(int base_element, int num_elements, int num_instances) { |
7456 | SOKOL_ASSERT(_sg_mtl_in_pass); |
7457 | if (!_sg_mtl_pass_valid) { |
7458 | return; |
7459 | } |
7460 | SOKOL_ASSERT(_sg_mtl_cmd_encoder); |
7461 | SOKOL_ASSERT(_sg_mtl_cur_pipeline && (_sg_mtl_cur_pipeline->slot.id == _sg_mtl_cur_pipeline_id.id)); |
7462 | if (SG_INDEXTYPE_NONE != _sg_mtl_cur_pipeline->index_type) { |
7463 | /* indexed rendering */ |
7464 | SOKOL_ASSERT(_sg_mtl_cur_indexbuffer && (_sg_mtl_cur_indexbuffer->slot.id == _sg_mtl_cur_indexbuffer_id.id)); |
7465 | const _sg_buffer* ib = _sg_mtl_cur_indexbuffer; |
7466 | SOKOL_ASSERT(ib->mtl_buf[ib->active_slot] != _SG_MTL_INVALID_POOL_INDEX); |
7467 | const NSUInteger index_buffer_offset = _sg_mtl_cur_indexbuffer_offset + |
7468 | base_element * _sg_mtl_cur_pipeline->mtl_index_size; |
7469 | [_sg_mtl_cmd_encoder drawIndexedPrimitives:_sg_mtl_cur_pipeline->mtl_prim_type |
7470 | indexCount:num_elements |
7471 | indexType:_sg_mtl_cur_pipeline->mtl_index_type |
7472 | indexBuffer:_sg_mtl_pool[ib->mtl_buf[ib->active_slot]] |
7473 | indexBufferOffset:index_buffer_offset |
7474 | instanceCount:num_instances]; |
7475 | } |
7476 | else { |
7477 | /* non-indexed rendering */ |
7478 | [_sg_mtl_cmd_encoder drawPrimitives:_sg_mtl_cur_pipeline->mtl_prim_type |
7479 | vertexStart:base_element |
7480 | vertexCount:num_elements |
7481 | instanceCount:num_instances]; |
7482 | } |
7483 | } |
7484 | |
7485 | _SOKOL_PRIVATE void _sg_update_buffer(_sg_buffer* buf, const void* data, int data_size) { |
7486 | SOKOL_ASSERT(buf && data && (data_size > 0)); |
7487 | if (++buf->active_slot >= buf->num_slots) { |
7488 | buf->active_slot = 0; |
7489 | } |
7490 | __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_pool[buf->mtl_buf[buf->active_slot]]; |
7491 | void* dst_ptr = [mtl_buf contents]; |
7492 | memcpy(dst_ptr, data, data_size); |
7493 | #if !TARGET_OS_IPHONE |
7494 | [mtl_buf didModifyRange:NSMakeRange(0, data_size)]; |
7495 | #endif |
7496 | } |
7497 | |
7498 | _SOKOL_PRIVATE void _sg_append_buffer(_sg_buffer* buf, const void* data, int data_size, bool new_frame) { |
7499 | SOKOL_ASSERT(buf && data && (data_size > 0)); |
7500 | if (new_frame) { |
7501 | if (++buf->active_slot >= buf->num_slots) { |
7502 | buf->active_slot = 0; |
7503 | } |
7504 | } |
7505 | __unsafe_unretained id<MTLBuffer> mtl_buf = _sg_mtl_pool[buf->mtl_buf[buf->active_slot]]; |
7506 | uint8_t* dst_ptr = (uint8_t*) [mtl_buf contents]; |
7507 | dst_ptr += buf->append_pos; |
7508 | memcpy(dst_ptr, data, data_size); |
7509 | #if !TARGET_OS_IPHONE |
7510 | [mtl_buf didModifyRange:NSMakeRange(buf->append_pos, data_size)]; |
7511 | #endif |
7512 | } |
7513 | |
7514 | _SOKOL_PRIVATE void _sg_update_image(_sg_image* img, const sg_image_content* data) { |
7515 | SOKOL_ASSERT(img && data); |
7516 | if (++img->active_slot >= img->num_slots) { |
7517 | img->active_slot = 0; |
7518 | } |
7519 | __unsafe_unretained id<MTLTexture> mtl_tex = _sg_mtl_pool[img->mtl_tex[img->active_slot]]; |
7520 | _sg_mtl_copy_image_content(img, mtl_tex, data); |
7521 | } |
7522 | |
7523 | #else |
7524 | #error "No rendering backend selected" |
7525 | #endif |
7526 | /*== RESOURCE POOLS ==========================================================*/ |
7527 | typedef struct { |
7528 | int size; |
7529 | uint32_t unique_counter; |
7530 | int queue_top; |
7531 | int* free_queue; |
7532 | } _sg_pool; |
7533 | |
7534 | _SOKOL_PRIVATE void _sg_init_pool(_sg_pool* pool, int num) { |
7535 | SOKOL_ASSERT(pool && (num > 1)); |
7536 | /* slot 0 is reserved for the 'invalid id', so bump the pool size by 1 */ |
7537 | pool->size = num + 1; |
7538 | pool->queue_top = 0; |
7539 | pool->unique_counter = 0; |
7540 | /* it's not a bug to only reserve 'num' here */ |
7541 | pool->free_queue = (int*) SOKOL_MALLOC(sizeof(int)*num); |
7542 | SOKOL_ASSERT(pool->free_queue); |
7543 | /* never allocate the zero-th pool item since the invalid id is 0 */ |
7544 | for (int i = pool->size-1; i >= 1; i--) { |
7545 | pool->free_queue[pool->queue_top++] = i; |
7546 | } |
7547 | } |
7548 | |
7549 | _SOKOL_PRIVATE void _sg_discard_pool(_sg_pool* pool) { |
7550 | SOKOL_ASSERT(pool); |
7551 | SOKOL_FREE(pool->free_queue); |
7552 | pool->free_queue = 0; |
7553 | pool->size = 0; |
7554 | pool->queue_top = 0; |
7555 | pool->unique_counter = 0; |
7556 | } |
7557 | |
7558 | _SOKOL_PRIVATE uint32_t _sg_pool_alloc_id(_sg_pool* pool) { |
7559 | SOKOL_ASSERT(pool); |
7560 | SOKOL_ASSERT(pool->free_queue); |
7561 | if (pool->queue_top > 0) { |
7562 | int slot_index = pool->free_queue[--pool->queue_top]; |
7563 | return ((pool->unique_counter++)<<_SG_SLOT_SHIFT)|slot_index; |
7564 | } |
7565 | else { |
7566 | /* pool exhausted */ |
7567 | return SG_INVALID_ID; |
7568 | } |
7569 | } |
7570 | |
7571 | _SOKOL_PRIVATE void _sg_pool_free_id(_sg_pool* pool, uint32_t id) { |
7572 | SOKOL_ASSERT(id != SG_INVALID_ID); |
7573 | SOKOL_ASSERT(pool); |
7574 | SOKOL_ASSERT(pool->free_queue); |
7575 | SOKOL_ASSERT(pool->queue_top < pool->size); |
7576 | #ifdef SOKOL_DEBUG |
7577 | /* debug check against double-free */ |
7578 | int slot_index = _sg_slot_index(id); |
7579 | for (int i = 0; i < pool->queue_top; i++) { |
7580 | SOKOL_ASSERT(pool->free_queue[i] != slot_index); |
7581 | } |
7582 | #endif |
7583 | pool->free_queue[pool->queue_top++] = id; |
7584 | SOKOL_ASSERT(pool->queue_top <= (pool->size-1)); |
7585 | } |
7586 | |
7587 | typedef struct { |
7588 | _sg_pool buffer_pool; |
7589 | _sg_pool image_pool; |
7590 | _sg_pool shader_pool; |
7591 | _sg_pool pipeline_pool; |
7592 | _sg_pool pass_pool; |
7593 | _sg_pool context_pool; |
7594 | _sg_buffer* buffers; |
7595 | _sg_image* images; |
7596 | _sg_shader* shaders; |
7597 | _sg_pipeline* pipelines; |
7598 | _sg_pass* passes; |
7599 | _sg_context* contexts; |
7600 | } _sg_pools; |
7601 | |
7602 | _SOKOL_PRIVATE void _sg_setup_pools(_sg_pools* p, const sg_desc* desc) { |
7603 | SOKOL_ASSERT(p); |
7604 | SOKOL_ASSERT(desc); |
7605 | /* note: the pools here will have an additional item, since slot 0 is reserved */ |
7606 | SOKOL_ASSERT((desc->buffer_pool_size >= 0) && (desc->buffer_pool_size < _SG_MAX_POOL_SIZE)); |
7607 | _sg_init_pool(&p->buffer_pool, _sg_def(desc->buffer_pool_size, _SG_DEFAULT_BUFFER_POOL_SIZE)); |
7608 | p->buffers = (_sg_buffer*) SOKOL_MALLOC(sizeof(_sg_buffer) * p->buffer_pool.size); |
7609 | SOKOL_ASSERT(p->buffers); |
7610 | for (int i = 0; i < p->buffer_pool.size; i++) { |
7611 | _sg_init_buffer_slot(&p->buffers[i]); |
7612 | } |
7613 | |
7614 | SOKOL_ASSERT((desc->image_pool_size >= 0) && (desc->image_pool_size < _SG_MAX_POOL_SIZE)); |
7615 | _sg_init_pool(&p->image_pool, _sg_def(desc->image_pool_size, _SG_DEFAULT_IMAGE_POOL_SIZE)); |
7616 | p->images = (_sg_image*) SOKOL_MALLOC(sizeof(_sg_image) * p->image_pool.size); |
7617 | SOKOL_ASSERT(p->images); |
7618 | for (int i = 0; i < p->image_pool.size; i++) { |
7619 | _sg_init_image_slot(&p->images[i]); |
7620 | } |
7621 | |
7622 | SOKOL_ASSERT((desc->shader_pool_size >= 0) && (desc->shader_pool_size < _SG_MAX_POOL_SIZE)); |
7623 | _sg_init_pool(&p->shader_pool, _sg_def(desc->shader_pool_size, _SG_DEFAULT_SHADER_POOL_SIZE)); |
7624 | p->shaders = (_sg_shader*) SOKOL_MALLOC(sizeof(_sg_shader) * p->shader_pool.size); |
7625 | SOKOL_ASSERT(p->shaders); |
7626 | for (int i = 0; i < p->shader_pool.size; i++) { |
7627 | _sg_init_shader_slot(&p->shaders[i]); |
7628 | } |
7629 | |
7630 | SOKOL_ASSERT((desc->pipeline_pool_size >= 0) && (desc->pipeline_pool_size < _SG_MAX_POOL_SIZE)); |
7631 | _sg_init_pool(&p->pipeline_pool, _sg_def(desc->pipeline_pool_size, _SG_DEFAULT_PIPELINE_POOL_SIZE)); |
7632 | p->pipelines = (_sg_pipeline*) SOKOL_MALLOC(sizeof(_sg_pipeline) * p->pipeline_pool.size); |
7633 | SOKOL_ASSERT(p->pipelines); |
7634 | for (int i = 0; i < p->pipeline_pool.size; i++) { |
7635 | _sg_init_pipeline_slot(&p->pipelines[i]); |
7636 | } |
7637 | |
7638 | SOKOL_ASSERT((desc->pass_pool_size >= 0) && (desc->pass_pool_size < _SG_MAX_POOL_SIZE)); |
7639 | _sg_init_pool(&p->pass_pool, _sg_def(desc->pass_pool_size, _SG_DEFAULT_PASS_POOL_SIZE)); |
7640 | p->passes = (_sg_pass*) SOKOL_MALLOC(sizeof(_sg_pass) * p->pass_pool.size); |
7641 | SOKOL_ASSERT(p->passes); |
7642 | for (int i = 0; i < p->pass_pool.size; i++) { |
7643 | _sg_init_pass_slot(&p->passes[i]); |
7644 | } |
7645 | |
7646 | SOKOL_ASSERT((desc->context_pool_size >= 0) && (desc->context_pool_size < _SG_MAX_POOL_SIZE)); |
7647 | _sg_init_pool(&p->context_pool, _sg_def(desc->context_pool_size, _SG_DEFAULT_CONTEXT_POOL_SIZE)); |
7648 | p->contexts = (_sg_context*) SOKOL_MALLOC(sizeof(_sg_context) * p->context_pool.size); |
7649 | SOKOL_ASSERT(p->contexts); |
7650 | for (int i = 0; i < p->context_pool.size; i++) { |
7651 | _sg_init_context_slot(&p->contexts[i]); |
7652 | } |
7653 | } |
7654 | |
7655 | _SOKOL_PRIVATE void _sg_discard_pools(_sg_pools* p) { |
7656 | SOKOL_ASSERT(p); |
7657 | SOKOL_FREE(p->contexts); p->contexts = 0; |
7658 | SOKOL_FREE(p->passes); p->passes = 0; |
7659 | SOKOL_FREE(p->pipelines); p->pipelines = 0; |
7660 | SOKOL_FREE(p->shaders); p->shaders = 0; |
7661 | SOKOL_FREE(p->images); p->images = 0; |
7662 | SOKOL_FREE(p->buffers); p->buffers = 0; |
7663 | _sg_discard_pool(&p->context_pool); |
7664 | _sg_discard_pool(&p->pass_pool); |
7665 | _sg_discard_pool(&p->pipeline_pool); |
7666 | _sg_discard_pool(&p->shader_pool); |
7667 | _sg_discard_pool(&p->image_pool); |
7668 | _sg_discard_pool(&p->buffer_pool); |
7669 | } |
7670 | |
7671 | /* returns pointer to resource by id without matching id check */ |
7672 | _SOKOL_PRIVATE _sg_buffer* _sg_buffer_at(const _sg_pools* p, uint32_t buf_id) { |
7673 | SOKOL_ASSERT(p && SG_INVALID_ID != buf_id); |
7674 | int slot_index = _sg_slot_index(buf_id); |
7675 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->buffer_pool.size)); |
7676 | return &p->buffers[slot_index]; |
7677 | } |
7678 | |
7679 | _SOKOL_PRIVATE _sg_image* _sg_image_at(const _sg_pools* p, uint32_t img_id) { |
7680 | SOKOL_ASSERT(p && SG_INVALID_ID != img_id); |
7681 | int slot_index = _sg_slot_index(img_id); |
7682 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->image_pool.size)); |
7683 | return &p->images[slot_index]; |
7684 | } |
7685 | |
7686 | _SOKOL_PRIVATE _sg_shader* _sg_shader_at(const _sg_pools* p, uint32_t shd_id) { |
7687 | SOKOL_ASSERT(p && SG_INVALID_ID != shd_id); |
7688 | int slot_index = _sg_slot_index(shd_id); |
7689 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->shader_pool.size)); |
7690 | return &p->shaders[slot_index]; |
7691 | } |
7692 | |
7693 | _SOKOL_PRIVATE _sg_pipeline* _sg_pipeline_at(const _sg_pools* p, uint32_t pip_id) { |
7694 | SOKOL_ASSERT(p && SG_INVALID_ID != pip_id); |
7695 | int slot_index = _sg_slot_index(pip_id); |
7696 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->pipeline_pool.size)); |
7697 | return &p->pipelines[slot_index]; |
7698 | } |
7699 | |
7700 | _SOKOL_PRIVATE _sg_pass* _sg_pass_at(const _sg_pools* p, uint32_t pass_id) { |
7701 | SOKOL_ASSERT(p && SG_INVALID_ID != pass_id); |
7702 | int slot_index = _sg_slot_index(pass_id); |
7703 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->pass_pool.size)); |
7704 | return &p->passes[slot_index]; |
7705 | } |
7706 | |
7707 | _SOKOL_PRIVATE _sg_context* _sg_context_at(const _sg_pools* p, uint32_t context_id) { |
7708 | SOKOL_ASSERT(p && SG_INVALID_ID != context_id); |
7709 | int slot_index = _sg_slot_index(context_id); |
7710 | SOKOL_ASSERT((slot_index >= 0) && (slot_index < p->context_pool.size)); |
7711 | return &p->contexts[slot_index]; |
7712 | } |
7713 | |
7714 | /* returns pointer to resource with matching id check, may return 0 */ |
7715 | _SOKOL_PRIVATE _sg_buffer* _sg_lookup_buffer(const _sg_pools* p, uint32_t buf_id) { |
7716 | if (SG_INVALID_ID != buf_id) { |
7717 | _sg_buffer* buf = _sg_buffer_at(p, buf_id); |
7718 | if (buf->slot.id == buf_id) { |
7719 | return buf; |
7720 | } |
7721 | } |
7722 | return 0; |
7723 | } |
7724 | |
7725 | _SOKOL_PRIVATE _sg_image* _sg_lookup_image(const _sg_pools* p, uint32_t img_id) { |
7726 | if (SG_INVALID_ID != img_id) { |
7727 | _sg_image* img = _sg_image_at(p, img_id); |
7728 | if (img->slot.id == img_id) { |
7729 | return img; |
7730 | } |
7731 | } |
7732 | return 0; |
7733 | } |
7734 | |
7735 | _SOKOL_PRIVATE _sg_shader* _sg_lookup_shader(const _sg_pools* p, uint32_t shd_id) { |
7736 | SOKOL_ASSERT(p); |
7737 | if (SG_INVALID_ID != shd_id) { |
7738 | _sg_shader* shd = _sg_shader_at(p, shd_id); |
7739 | if (shd->slot.id == shd_id) { |
7740 | return shd; |
7741 | } |
7742 | } |
7743 | return 0; |
7744 | } |
7745 | |
7746 | _SOKOL_PRIVATE _sg_pipeline* _sg_lookup_pipeline(const _sg_pools* p, uint32_t pip_id) { |
7747 | SOKOL_ASSERT(p); |
7748 | if (SG_INVALID_ID != pip_id) { |
7749 | _sg_pipeline* pip = _sg_pipeline_at(p, pip_id); |
7750 | if (pip->slot.id == pip_id) { |
7751 | return pip; |
7752 | } |
7753 | } |
7754 | return 0; |
7755 | } |
7756 | |
7757 | _SOKOL_PRIVATE _sg_pass* _sg_lookup_pass(const _sg_pools* p, uint32_t pass_id) { |
7758 | SOKOL_ASSERT(p); |
7759 | if (SG_INVALID_ID != pass_id) { |
7760 | _sg_pass* pass = _sg_pass_at(p, pass_id); |
7761 | if (pass->slot.id == pass_id) { |
7762 | return pass; |
7763 | } |
7764 | } |
7765 | return 0; |
7766 | } |
7767 | |
7768 | _SOKOL_PRIVATE _sg_context* _sg_lookup_context(const _sg_pools* p, uint32_t ctx_id) { |
7769 | SOKOL_ASSERT(p); |
7770 | if (SG_INVALID_ID != ctx_id) { |
7771 | _sg_context* ctx = _sg_context_at(p, ctx_id); |
7772 | if (ctx->slot.id == ctx_id) { |
7773 | return ctx; |
7774 | } |
7775 | } |
7776 | return 0; |
7777 | } |
7778 | |
7779 | _SOKOL_PRIVATE void _sg_destroy_all_resources(_sg_pools* p, uint32_t ctx_id) { |
7780 | /* this is a bit dumb since it loops over all pool slots to |
7781 | find the occupied slots, on the other hand it is only ever |
7782 | executed at shutdown |
7783 | */ |
7784 | for (int i = 0; i < p->buffer_pool.size; i++) { |
7785 | if (p->buffers[i].slot.state == SG_RESOURCESTATE_VALID) { |
7786 | if (p->buffers[i].slot.ctx_id == ctx_id) { |
7787 | _sg_destroy_buffer(&p->buffers[i]); |
7788 | } |
7789 | } |
7790 | } |
7791 | for (int i = 0; i < p->image_pool.size; i++) { |
7792 | if (p->images[i].slot.state == SG_RESOURCESTATE_VALID) { |
7793 | if (p->images[i].slot.ctx_id == ctx_id) { |
7794 | _sg_destroy_image(&p->images[i]); |
7795 | } |
7796 | } |
7797 | } |
7798 | for (int i = 0; i < p->shader_pool.size; i++) { |
7799 | if (p->shaders[i].slot.state == SG_RESOURCESTATE_VALID) { |
7800 | if (p->shaders[i].slot.ctx_id == ctx_id) { |
7801 | _sg_destroy_shader(&p->shaders[i]); |
7802 | } |
7803 | } |
7804 | } |
7805 | for (int i = 0; i < p->pipeline_pool.size; i++) { |
7806 | if (p->pipelines[i].slot.state == SG_RESOURCESTATE_VALID) { |
7807 | if (p->pipelines[i].slot.ctx_id == ctx_id) { |
7808 | _sg_destroy_pipeline(&p->pipelines[i]); |
7809 | } |
7810 | } |
7811 | } |
7812 | for (int i = 0; i < p->pass_pool.size; i++) { |
7813 | if (p->passes[i].slot.state == SG_RESOURCESTATE_VALID) { |
7814 | if (p->passes[i].slot.ctx_id == ctx_id) { |
7815 | _sg_destroy_pass(&p->passes[i]); |
7816 | } |
7817 | } |
7818 | } |
7819 | } |
7820 | |
7821 | /*== VALIDATION LAYER ========================================================*/ |
7822 | #if defined(SOKOL_DEBUG) |
7823 | typedef enum { |
7824 | /* special case 'validation was successful' */ |
7825 | _SG_VALIDATE_SUCCESS, |
7826 | |
7827 | /* buffer creation */ |
7828 | _SG_VALIDATE_BUFFERDESC_CANARY, |
7829 | _SG_VALIDATE_BUFFERDESC_SIZE, |
7830 | _SG_VALIDATE_BUFFERDESC_CONTENT, |
7831 | _SG_VALIDATE_BUFFERDESC_NO_CONTENT, |
7832 | |
7833 | /* image creation */ |
7834 | _SG_VALIDATE_IMAGEDESC_CANARY, |
7835 | _SG_VALIDATE_IMAGEDESC_WIDTH, |
7836 | _SG_VALIDATE_IMAGEDESC_HEIGHT, |
7837 | _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT, |
7838 | _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT, |
7839 | _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT, |
7840 | _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT, |
7841 | _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE, |
7842 | _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT, |
7843 | _SG_VALIDATE_IMAGEDESC_CONTENT, |
7844 | _SG_VALIDATE_IMAGEDESC_NO_CONTENT, |
7845 | |
7846 | /* shader creation */ |
7847 | _SG_VALIDATE_SHADERDESC_CANARY, |
7848 | _SG_VALIDATE_SHADERDESC_SOURCE, |
7849 | _SG_VALIDATE_SHADERDESC_BYTECODE, |
7850 | _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE, |
7851 | _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE, |
7852 | _SG_VALIDATE_SHADERDESC_NO_CONT_UBS, |
7853 | _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS, |
7854 | _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS, |
7855 | _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS, |
7856 | _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME, |
7857 | _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH, |
7858 | _SG_VALIDATE_SHADERDESC_IMG_NAME, |
7859 | |
7860 | /* pipeline creation */ |
7861 | _SG_VALIDATE_PIPELINEDESC_CANARY, |
7862 | _SG_VALIDATE_PIPELINEDESC_SHADER, |
7863 | _SG_VALIDATE_PIPELINEDESC_NO_ATTRS, |
7864 | _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4, |
7865 | _SG_VALIDATE_PIPELINEDESC_ATTR_NAME, |
7866 | _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS, |
7867 | |
7868 | /* pass creation */ |
7869 | _SG_VALIDATE_PASSDESC_CANARY, |
7870 | _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS, |
7871 | _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS, |
7872 | _SG_VALIDATE_PASSDESC_IMAGE, |
7873 | _SG_VALIDATE_PASSDESC_MIPLEVEL, |
7874 | _SG_VALIDATE_PASSDESC_FACE, |
7875 | _SG_VALIDATE_PASSDESC_LAYER, |
7876 | _SG_VALIDATE_PASSDESC_SLICE, |
7877 | _SG_VALIDATE_PASSDESC_IMAGE_NO_RT, |
7878 | _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS, |
7879 | _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT, |
7880 | _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT, |
7881 | _SG_VALIDATE_PASSDESC_IMAGE_SIZES, |
7882 | _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS, |
7883 | |
7884 | /* sg_begin_pass validation */ |
7885 | _SG_VALIDATE_BEGINPASS_PASS, |
7886 | _SG_VALIDATE_BEGINPASS_IMAGE, |
7887 | |
7888 | /* sg_apply_draw_state validation */ |
7889 | _SG_VALIDATE_ADS_PIP, |
7890 | _SG_VALIDATE_ADS_VBS, |
7891 | _SG_VALIDATE_ADS_VB_TYPE, |
7892 | _SG_VALIDATE_ADS_VB_OVERFLOW, |
7893 | _SG_VALIDATE_ADS_NO_IB, |
7894 | _SG_VALIDATE_ADS_IB, |
7895 | _SG_VALIDATE_ADS_IB_TYPE, |
7896 | _SG_VALIDATE_ADS_IB_OVERFLOW, |
7897 | _SG_VALIDATE_ADS_VS_IMGS, |
7898 | _SG_VALIDATE_ADS_VS_IMG_TYPES, |
7899 | _SG_VALIDATE_ADS_FS_IMGS, |
7900 | _SG_VALIDATE_ADS_FS_IMG_TYPES, |
7901 | _SG_VALIDATE_ADS_ATT_COUNT, |
7902 | _SG_VALIDATE_ADS_COLOR_FORMAT, |
7903 | _SG_VALIDATE_ADS_DEPTH_FORMAT, |
7904 | _SG_VALIDATE_ADS_SAMPLE_COUNT, |
7905 | |
7906 | /* sg_apply_uniform_block validation */ |
7907 | _SG_VALIDATE_AUB_NO_PIPELINE, |
7908 | _SG_VALIDATE_AUB_NO_UB_AT_SLOT, |
7909 | _SG_VALIDATE_AUB_SIZE, |
7910 | |
7911 | /* sg_update_buffer validation */ |
7912 | _SG_VALIDATE_UPDATEBUF_USAGE, |
7913 | _SG_VALIDATE_UPDATEBUF_SIZE, |
7914 | _SG_VALIDATE_UPDATEBUF_ONCE, |
7915 | _SG_VALIDATE_UPDATEBUF_APPEND, |
7916 | |
7917 | /* sg_append_buffer validation */ |
7918 | _SG_VALIDATE_APPENDBUF_USAGE, |
7919 | _SG_VALIDATE_APPENDBUF_SIZE, |
7920 | _SG_VALIDATE_APPENDBUF_UPDATE, |
7921 | |
7922 | /* sg_update_image validation */ |
7923 | _SG_VALIDATE_UPDIMG_USAGE, |
7924 | _SG_VALIDATE_UPDIMG_NOTENOUGHDATA, |
7925 | _SG_VALIDATE_UPDIMG_SIZE, |
7926 | _SG_VALIDATE_UPDIMG_COMPRESSED, |
7927 | _SG_VALIDATE_UPDIMG_ONCE |
7928 | |
7929 | } _sg_validate_error; |
7930 | |
7931 | /* return a human readable string for an _sg_validate_error */ |
7932 | _SOKOL_PRIVATE const char* _sg_validate_string(_sg_validate_error err) { |
7933 | switch (err) { |
7934 | /* buffer creation validation errors */ |
7935 | case _SG_VALIDATE_BUFFERDESC_CANARY: return "sg_buffer_desc not initialized" ; |
7936 | case _SG_VALIDATE_BUFFERDESC_SIZE: return "sg_buffer_desc.size cannot be 0" ; |
7937 | case _SG_VALIDATE_BUFFERDESC_CONTENT: return "immutable buffers must be initialized with content (sg_buffer_desc.content)" ; |
7938 | case _SG_VALIDATE_BUFFERDESC_NO_CONTENT: return "dynamic/stream usage buffers cannot be initialized with content" ; |
7939 | |
7940 | /* image creation validation errros */ |
7941 | case _SG_VALIDATE_IMAGEDESC_CANARY: return "sg_image_desc not initialized" ; |
7942 | case _SG_VALIDATE_IMAGEDESC_WIDTH: return "sg_image_desc.width must be > 0" ; |
7943 | case _SG_VALIDATE_IMAGEDESC_HEIGHT: return "sg_image_desc.height must be > 0" ; |
7944 | case _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT: return "invalid pixel format for render-target image" ; |
7945 | case _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT: return "invalid pixel format for non-render-target image" ; |
7946 | case _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT: return "non-render-target images cannot be multisampled" ; |
7947 | case _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT: return "MSAA render targets not supported (SG_FEATURE_MSAA_RENDER_TARGETS)" ; |
7948 | case _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE: return "render target images must be SG_USAGE_IMMUTABLE" ; |
7949 | case _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT: return "render target images cannot be initialized with content" ; |
7950 | case _SG_VALIDATE_IMAGEDESC_CONTENT: return "missing or invalid content for immutable image" ; |
7951 | case _SG_VALIDATE_IMAGEDESC_NO_CONTENT: return "dynamic/stream usage images cannot be initialized with content" ; |
7952 | |
7953 | /* shader creation */ |
7954 | case _SG_VALIDATE_SHADERDESC_CANARY: return "sg_shader_desc not initialized" ; |
7955 | case _SG_VALIDATE_SHADERDESC_SOURCE: return "shader source code required" ; |
7956 | case _SG_VALIDATE_SHADERDESC_BYTECODE: return "shader byte code required" ; |
7957 | case _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE: return "shader source or byte code required" ; |
7958 | case _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE: return "shader byte code length (in bytes) required" ; |
7959 | case _SG_VALIDATE_SHADERDESC_NO_CONT_UBS: return "shader uniform blocks must occupy continuous slots" ; |
7960 | case _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS: return "uniform block members must occupy continuous slots" ; |
7961 | case _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS: return "GL backend requires uniform block member declarations" ; |
7962 | case _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME: return "uniform block member name missing" ; |
7963 | case _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH: return "size of uniform block members doesn't match uniform block size" ; |
7964 | case _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS: return "shader images must occupy continuous slots" ; |
7965 | case _SG_VALIDATE_SHADERDESC_IMG_NAME: return "GL backend requires uniform block member names" ; |
7966 | |
7967 | /* pipeline creation */ |
7968 | case _SG_VALIDATE_PIPELINEDESC_CANARY: return "sg_pipeline_desc not initialized" ; |
7969 | case _SG_VALIDATE_PIPELINEDESC_SHADER: return "sg_pipeline_desc.shader missing or invalid" ; |
7970 | case _SG_VALIDATE_PIPELINEDESC_NO_ATTRS: return "sg_pipeline_desc.layout.attrs is empty or not continuous" ; |
7971 | case _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4: return "sg_pipeline_desc.layout.buffers[].stride must be multiple of 4" ; |
7972 | case _SG_VALIDATE_PIPELINEDESC_ATTR_NAME: return "GLES2/WebGL vertex layouts must have attribute names" ; |
7973 | case _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS: return "D3D11 vertex layouts must have attribute semantics (sem_name and sem_index)" ; |
7974 | |
7975 | /* pass creation */ |
7976 | case _SG_VALIDATE_PASSDESC_CANARY: return "sg_pass_desc not initialized" ; |
7977 | case _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS: return "sg_pass_desc.color_attachments[0] must be valid" ; |
7978 | case _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS: return "color attachments must occupy continuous slots" ; |
7979 | case _SG_VALIDATE_PASSDESC_IMAGE: return "pass attachment image is not valid" ; |
7980 | case _SG_VALIDATE_PASSDESC_MIPLEVEL: return "pass attachment mip level is bigger than image has mipmaps" ; |
7981 | case _SG_VALIDATE_PASSDESC_FACE: return "pass attachment image is cubemap, but face index is too big" ; |
7982 | case _SG_VALIDATE_PASSDESC_LAYER: return "pass attachment image is array texture, but layer index is too big" ; |
7983 | case _SG_VALIDATE_PASSDESC_SLICE: return "pass attachment image is 3d texture, but slice value is too big" ; |
7984 | case _SG_VALIDATE_PASSDESC_IMAGE_NO_RT: return "pass attachment image must be render targets" ; |
7985 | case _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS: return "all pass color attachment images must have the same pixel format" ; |
7986 | case _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT: return "pass color-attachment images must have a renderable pixel format" ; |
7987 | case _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT: return "pass depth-attachment image must have depth pixel format" ; |
7988 | case _SG_VALIDATE_PASSDESC_IMAGE_SIZES: return "all pass attachments must have the same size" ; |
7989 | case _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS: return "all pass attachments must have the same sample count" ; |
7990 | |
7991 | /* sg_begin_pass */ |
7992 | case _SG_VALIDATE_BEGINPASS_PASS: return "sg_begin_pass: pass must be valid" ; |
7993 | case _SG_VALIDATE_BEGINPASS_IMAGE: return "sg_begin_pass: one or more attachment images are not valid" ; |
7994 | |
7995 | /* sg_apply_draw_state */ |
7996 | case _SG_VALIDATE_ADS_PIP: return "sg_apply_draw_state: pipeline object required" ; |
7997 | case _SG_VALIDATE_ADS_VBS: return "sg_apply_draw_state: number of vertex buffers doesn't match number of pipeline vertex layouts" ; |
7998 | case _SG_VALIDATE_ADS_VB_TYPE: return "sg_apply_draw_state: buffer in vertex buffer slot is not a SG_BUFFERTYPE_VERTEXBUFFER" ; |
7999 | case _SG_VALIDATE_ADS_VB_OVERFLOW: return "sg_apply_draw_state: buffer in vertex buffer slot is overflown" ; |
8000 | case _SG_VALIDATE_ADS_NO_IB: return "sg_apply_draw_state: pipeline object defines indexed rendering, but no index buffer provided" ; |
8001 | case _SG_VALIDATE_ADS_IB: return "sg_apply_draw_state: pipeline object defines non-indexed rendering, but index buffer provided" ; |
8002 | case _SG_VALIDATE_ADS_IB_TYPE: return "sg_apply_draw_state: buffer in index buffer slot is not a SG_BUFFERTYPE_INDEXBUFFER" ; |
8003 | case _SG_VALIDATE_ADS_IB_OVERFLOW: return "sg_apply_draw_state: buffer in index buffer slot is overflown" ; |
8004 | case _SG_VALIDATE_ADS_VS_IMGS: return "sg_apply_draw_state: vertex shader image count doesn't match sg_shader_desc" ; |
8005 | case _SG_VALIDATE_ADS_VS_IMG_TYPES: return "sg_apply_draw_state: one or more vertex shader image types don't match sg_shader_desc" ; |
8006 | case _SG_VALIDATE_ADS_FS_IMGS: return "sg_apply_draw_state: fragment shader image count doesn't match sg_shader_desc" ; |
8007 | case _SG_VALIDATE_ADS_FS_IMG_TYPES: return "sg_apply_draw_state: one or more fragment shader image types don't match sg_shader_desc" ; |
8008 | case _SG_VALIDATE_ADS_ATT_COUNT: return "sg_apply_draw_state: color_attachment_count in pipeline doesn't match number of pass color attachments" ; |
8009 | case _SG_VALIDATE_ADS_COLOR_FORMAT: return "sg_apply_draw_state: color_format in pipeline doesn't match pass color attachment pixel format" ; |
8010 | case _SG_VALIDATE_ADS_DEPTH_FORMAT: return "sg_apply_draw_state: depth_format in pipeline doesn't match pass depth attachment pixel format" ; |
8011 | case _SG_VALIDATE_ADS_SAMPLE_COUNT: return "sg_apply_draw_state: MSAA sample count in pipeline doesn't match render pass attachment sample count" ; |
8012 | |
8013 | /* sg_apply_uniform_block */ |
8014 | case _SG_VALIDATE_AUB_NO_PIPELINE: return "sg_apply_uniform_block: must be called after sg_apply_draw_state()" ; |
8015 | case _SG_VALIDATE_AUB_NO_UB_AT_SLOT: return "sg_apply_uniform_block: no uniform block declaration at this shader stage UB slot" ; |
8016 | case _SG_VALIDATE_AUB_SIZE: return "sg_apply_uniform_block: data size exceeds declared uniform block size" ; |
8017 | |
8018 | /* sg_update_buffer */ |
8019 | case _SG_VALIDATE_UPDATEBUF_USAGE: return "sg_update_buffer: cannot update immutable buffer" ; |
8020 | case _SG_VALIDATE_UPDATEBUF_SIZE: return "sg_update_buffer: update size is bigger than buffer size" ; |
8021 | case _SG_VALIDATE_UPDATEBUF_ONCE: return "sg_update_buffer: only one update allowed per buffer and frame" ; |
8022 | case _SG_VALIDATE_UPDATEBUF_APPEND: return "sg_update_buffer: cannot call sg_update_buffer and sg_append_buffer in same frame" ; |
8023 | |
8024 | /* sg_append_buffer */ |
8025 | case _SG_VALIDATE_APPENDBUF_USAGE: return "sg_append_buffer: cannot append to immutable buffer" ; |
8026 | case _SG_VALIDATE_APPENDBUF_SIZE: return "sg_append_buffer: overall appended size is bigger than buffer size" ; |
8027 | case _SG_VALIDATE_APPENDBUF_UPDATE: return "sg_append_buffer: cannot call sg_append_buffer and sg_update_buffer in same frame" ; |
8028 | |
8029 | /* sg_update_image */ |
8030 | case _SG_VALIDATE_UPDIMG_USAGE: return "sg_update_image: cannot update immutable image" ; |
8031 | case _SG_VALIDATE_UPDIMG_NOTENOUGHDATA: return "sg_update_image: not enough subimage data provided" ; |
8032 | case _SG_VALIDATE_UPDIMG_SIZE: return "sg_update_image: provided subimage data size too big" ; |
8033 | case _SG_VALIDATE_UPDIMG_COMPRESSED: return "sg_update_image: cannot update images with compressed format" ; |
8034 | case _SG_VALIDATE_UPDIMG_ONCE: return "sg_update_image: only one update allowed per image and frame" ; |
8035 | |
8036 | default: return "unknown validation error" ; |
8037 | } |
8038 | } |
8039 | #endif /* defined(SOKOL_DEBUG) */ |
8040 | |
8041 | /*-- generic backend state ---------------------------------------------------*/ |
8042 | typedef struct { |
8043 | _sg_pools pools; |
8044 | bool valid; |
8045 | uint32_t frame_index; |
8046 | sg_context active_context; |
8047 | sg_pass cur_pass; |
8048 | sg_pipeline cur_pipeline; |
8049 | bool pass_valid; |
8050 | bool next_draw_valid; |
8051 | #if defined(SOKOL_DEBUG) |
8052 | _sg_validate_error validate_error; |
8053 | #endif |
8054 | } _sg_state; |
8055 | static _sg_state _sg; |
8056 | |
8057 | /*-- validation checks -------------------------------------------------------*/ |
8058 | #if defined(SOKOL_DEBUG) |
8059 | _SOKOL_PRIVATE void _sg_validate_begin() { |
8060 | _sg.validate_error = _SG_VALIDATE_SUCCESS; |
8061 | } |
8062 | |
8063 | _SOKOL_PRIVATE void _sg_validate(bool cond, _sg_validate_error err) { |
8064 | if (!cond) { |
8065 | _sg.validate_error = err; |
8066 | SOKOL_LOG(_sg_validate_string(err)); |
8067 | } |
8068 | } |
8069 | |
8070 | _SOKOL_PRIVATE bool _sg_validate_end() { |
8071 | if (_sg.validate_error != _SG_VALIDATE_SUCCESS) { |
8072 | #if !defined(SOKOL_VALIDATE_NON_FATAL) |
8073 | SOKOL_LOG("^^^^ VALIDATION FAILED, TERMINATING ^^^^" ); |
8074 | SOKOL_ASSERT(false); |
8075 | #endif |
8076 | return false; |
8077 | } |
8078 | else { |
8079 | return true; |
8080 | } |
8081 | } |
8082 | #endif |
8083 | |
8084 | _SOKOL_PRIVATE bool _sg_validate_buffer_desc(const sg_buffer_desc* desc) { |
8085 | #if !defined(SOKOL_DEBUG) |
8086 | _SOKOL_UNUSED(desc); |
8087 | return true; |
8088 | #else |
8089 | SOKOL_ASSERT(desc); |
8090 | SOKOL_VALIDATE_BEGIN(); |
8091 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); |
8092 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_BUFFERDESC_CANARY); |
8093 | SOKOL_VALIDATE(desc->size > 0, _SG_VALIDATE_BUFFERDESC_SIZE); |
8094 | bool ext = (0 != desc->gl_buffers[0]) || (0 != desc->mtl_buffers[0]) || (0 != desc->d3d11_buffer); |
8095 | if (!ext && (_sg_def(desc->usage, SG_USAGE_IMMUTABLE) == SG_USAGE_IMMUTABLE)) { |
8096 | SOKOL_VALIDATE(0 != desc->content, _SG_VALIDATE_BUFFERDESC_CONTENT); |
8097 | } |
8098 | else { |
8099 | SOKOL_VALIDATE(0 == desc->content, _SG_VALIDATE_BUFFERDESC_NO_CONTENT); |
8100 | } |
8101 | return SOKOL_VALIDATE_END(); |
8102 | #endif |
8103 | } |
8104 | |
8105 | _SOKOL_PRIVATE bool _sg_validate_image_desc(const sg_image_desc* desc) { |
8106 | #if !defined(SOKOL_DEBUG) |
8107 | _SOKOL_UNUSED(desc); |
8108 | return true; |
8109 | #else |
8110 | SOKOL_ASSERT(desc); |
8111 | SOKOL_VALIDATE_BEGIN(); |
8112 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); |
8113 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_IMAGEDESC_CANARY); |
8114 | SOKOL_VALIDATE(desc->width > 0, _SG_VALIDATE_IMAGEDESC_WIDTH); |
8115 | SOKOL_VALIDATE(desc->height > 0, _SG_VALIDATE_IMAGEDESC_HEIGHT); |
8116 | const sg_pixel_format fmt = _sg_def(desc->pixel_format, SG_PIXELFORMAT_RGBA8); |
8117 | const sg_usage usage = _sg_def(desc->usage, SG_USAGE_IMMUTABLE); |
8118 | const bool ext = (0 != desc->gl_textures[0]) || (0 != desc->mtl_textures[0]) || (0 != desc->d3d11_texture); |
8119 | if (desc->render_target) { |
8120 | if (desc->sample_count > 1) { |
8121 | SOKOL_VALIDATE(_sg_query_feature(SG_FEATURE_MSAA_RENDER_TARGETS), _SG_VALIDATE_IMAGEDESC_NO_MSAA_RT_SUPPORT); |
8122 | } |
8123 | const bool valid_color_fmt = _sg_is_valid_rendertarget_color_format(fmt); |
8124 | const bool valid_depth_fmt = _sg_is_valid_rendertarget_depth_format(fmt); |
8125 | SOKOL_VALIDATE(valid_color_fmt || valid_depth_fmt, _SG_VALIDATE_IMAGEDESC_RT_PIXELFORMAT); |
8126 | SOKOL_VALIDATE(usage == SG_USAGE_IMMUTABLE, _SG_VALIDATE_IMAGEDESC_RT_IMMUTABLE); |
8127 | SOKOL_VALIDATE(desc->content.subimage[0][0].ptr==0, _SG_VALIDATE_IMAGEDESC_RT_NO_CONTENT); |
8128 | } |
8129 | else { |
8130 | SOKOL_VALIDATE(desc->sample_count <= 1, _SG_VALIDATE_IMAGEDESC_MSAA_BUT_NO_RT); |
8131 | const bool valid_nonrt_fmt = !_sg_is_valid_rendertarget_depth_format(fmt); |
8132 | SOKOL_VALIDATE(valid_nonrt_fmt, _SG_VALIDATE_IMAGEDESC_NONRT_PIXELFORMAT); |
8133 | /* FIXME: should use the same "expected size" computation as in _sg_validate_update_image() here */ |
8134 | if (!ext && (usage == SG_USAGE_IMMUTABLE)) { |
8135 | const int num_faces = _sg_def(desc->type, SG_IMAGETYPE_2D)==SG_IMAGETYPE_CUBE ? 6:1; |
8136 | const int num_mips = _sg_def(desc->num_mipmaps, 1); |
8137 | for (int face_index = 0; face_index < num_faces; face_index++) { |
8138 | for (int mip_index = 0; mip_index < num_mips; mip_index++) { |
8139 | const bool has_data = desc->content.subimage[face_index][mip_index].ptr != 0; |
8140 | const bool has_size = desc->content.subimage[face_index][mip_index].size > 0; |
8141 | SOKOL_VALIDATE(has_data && has_size, _SG_VALIDATE_IMAGEDESC_CONTENT); |
8142 | } |
8143 | } |
8144 | } |
8145 | else { |
8146 | for (int face_index = 0; face_index < SG_CUBEFACE_NUM; face_index++) { |
8147 | for (int mip_index = 0; mip_index < SG_MAX_MIPMAPS; mip_index++) { |
8148 | const bool no_data = 0 == desc->content.subimage[face_index][mip_index].ptr; |
8149 | const bool no_size = 0 == desc->content.subimage[face_index][mip_index].size; |
8150 | SOKOL_VALIDATE(no_data && no_size, _SG_VALIDATE_IMAGEDESC_NO_CONTENT); |
8151 | } |
8152 | } |
8153 | } |
8154 | } |
8155 | return SOKOL_VALIDATE_END(); |
8156 | #endif |
8157 | } |
8158 | |
8159 | _SOKOL_PRIVATE bool _sg_validate_shader_desc(const sg_shader_desc* desc) { |
8160 | #if !defined(SOKOL_DEBUG) |
8161 | _SOKOL_UNUSED(desc); |
8162 | return true; |
8163 | #else |
8164 | SOKOL_ASSERT(desc); |
8165 | SOKOL_VALIDATE_BEGIN(); |
8166 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); |
8167 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_SHADERDESC_CANARY); |
8168 | #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
8169 | /* on GL, must provide shader source code */ |
8170 | SOKOL_VALIDATE(0 != desc->vs.source, _SG_VALIDATE_SHADERDESC_SOURCE); |
8171 | SOKOL_VALIDATE(0 != desc->fs.source, _SG_VALIDATE_SHADERDESC_SOURCE); |
8172 | #elif defined(SOKOL_METAL) || defined(SOKOL_D3D11_SHADER_COMPILER) |
8173 | /* on Metal or D3D with shader compiler, must provide shader source code or byte code */ |
8174 | SOKOL_VALIDATE((0 != desc->vs.source)||(0 != desc->vs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); |
8175 | SOKOL_VALIDATE((0 != desc->fs.source)||(0 != desc->fs.byte_code), _SG_VALIDATE_SHADERDESC_SOURCE_OR_BYTECODE); |
8176 | #else |
8177 | /* on D3D11 without shader compiler, must provide byte code */ |
8178 | SOKOL_VALIDATE(0 != desc->vs.byte_code, _SG_VALIDATE_SHADERDESC_BYTECODE); |
8179 | SOKOL_VALIDATE(0 != desc->fs.byte_code, _SG_VALIDATE_SHADERDESC_BYTECODE); |
8180 | #endif |
8181 | /* if shader byte code, the size must also be provided */ |
8182 | if (0 != desc->vs.byte_code) { |
8183 | SOKOL_VALIDATE(desc->vs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); |
8184 | } |
8185 | if (0 != desc->fs.byte_code) { |
8186 | SOKOL_VALIDATE(desc->fs.byte_code_size > 0, _SG_VALIDATE_SHADERDESC_NO_BYTECODE_SIZE); |
8187 | } |
8188 | for (int stage_index = 0; stage_index < SG_NUM_SHADER_STAGES; stage_index++) { |
8189 | const sg_shader_stage_desc* stage_desc = (stage_index == 0)? &desc->vs : &desc->fs; |
8190 | bool uniform_blocks_continuous = true; |
8191 | for (int ub_index = 0; ub_index < SG_MAX_SHADERSTAGE_UBS; ub_index++) { |
8192 | const sg_shader_uniform_block_desc* ub_desc = &stage_desc->uniform_blocks[ub_index]; |
8193 | if (ub_desc->size > 0) { |
8194 | SOKOL_VALIDATE(uniform_blocks_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UBS); |
8195 | bool uniforms_continuous = true; |
8196 | int uniform_offset = 0; |
8197 | int num_uniforms = 0; |
8198 | for (int u_index = 0; u_index < SG_MAX_UB_MEMBERS; u_index++) { |
8199 | const sg_shader_uniform_desc* u_desc = &ub_desc->uniforms[u_index]; |
8200 | if (u_desc->type != SG_UNIFORMTYPE_INVALID) { |
8201 | SOKOL_VALIDATE(uniforms_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_UB_MEMBERS); |
8202 | #if defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
8203 | SOKOL_VALIDATE(u_desc->name, _SG_VALIDATE_SHADERDESC_UB_MEMBER_NAME); |
8204 | #endif |
8205 | const int array_count = _sg_def(u_desc->array_count, 1); |
8206 | uniform_offset += _sg_uniform_size(u_desc->type, array_count); |
8207 | num_uniforms++; |
8208 | } |
8209 | else { |
8210 | uniforms_continuous = false; |
8211 | } |
8212 | } |
8213 | #if defined(SOKOL_GLCORE33) || defined(SOKOL_GLES2) || defined(SOKOL_GLES3) |
8214 | SOKOL_VALIDATE(uniform_offset == ub_desc->size, _SG_VALIDATE_SHADERDESC_UB_SIZE_MISMATCH); |
8215 | SOKOL_VALIDATE(num_uniforms > 0, _SG_VALIDATE_SHADERDESC_NO_UB_MEMBERS); |
8216 | #endif |
8217 | } |
8218 | else { |
8219 | uniform_blocks_continuous = false; |
8220 | } |
8221 | } |
8222 | bool images_continuous = true; |
8223 | for (int img_index = 0; img_index < SG_MAX_SHADERSTAGE_IMAGES; img_index++) { |
8224 | const sg_shader_image_desc* img_desc = &stage_desc->images[img_index]; |
8225 | if (img_desc->type != _SG_IMAGETYPE_DEFAULT) { |
8226 | SOKOL_VALIDATE(images_continuous, _SG_VALIDATE_SHADERDESC_NO_CONT_IMGS); |
8227 | #if defined(SOKOL_GLES2) |
8228 | SOKOL_VALIDATE(img_desc->name, _SG_VALIDATE_SHADERDESC_IMG_NAME); |
8229 | #endif |
8230 | } |
8231 | else { |
8232 | images_continuous = false; |
8233 | } |
8234 | } |
8235 | } |
8236 | return SOKOL_VALIDATE_END(); |
8237 | #endif |
8238 | } |
8239 | |
8240 | _SOKOL_PRIVATE bool _sg_validate_pipeline_desc(const sg_pipeline_desc* desc) { |
8241 | #if !defined(SOKOL_DEBUG) |
8242 | _SOKOL_UNUSED(desc); |
8243 | return true; |
8244 | #else |
8245 | SOKOL_ASSERT(desc); |
8246 | SOKOL_VALIDATE_BEGIN(); |
8247 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); |
8248 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PIPELINEDESC_CANARY); |
8249 | SOKOL_VALIDATE(desc->shader.id != SG_INVALID_ID, _SG_VALIDATE_PIPELINEDESC_SHADER); |
8250 | const _sg_shader* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); |
8251 | SOKOL_VALIDATE(shd && shd->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PIPELINEDESC_SHADER); |
8252 | for (int buf_index = 0; buf_index < SG_MAX_SHADERSTAGE_BUFFERS; buf_index++) { |
8253 | const sg_buffer_layout_desc* l_desc = &desc->layout.buffers[buf_index]; |
8254 | if (l_desc->stride == 0) { |
8255 | continue; |
8256 | } |
8257 | SOKOL_VALIDATE((l_desc->stride & 3) == 0, _SG_VALIDATE_PIPELINEDESC_LAYOUT_STRIDE4); |
8258 | } |
8259 | SOKOL_VALIDATE(desc->layout.attrs[0].format != SG_VERTEXFORMAT_INVALID, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); |
8260 | bool attrs_cont = true; |
8261 | for (int attr_index = 0; attr_index < SG_MAX_VERTEX_ATTRIBUTES; attr_index++) { |
8262 | const sg_vertex_attr_desc* a_desc = &desc->layout.attrs[attr_index]; |
8263 | if (a_desc->format == SG_VERTEXFORMAT_INVALID) { |
8264 | attrs_cont = false; |
8265 | continue; |
8266 | } |
8267 | SOKOL_VALIDATE(attrs_cont, _SG_VALIDATE_PIPELINEDESC_NO_ATTRS); |
8268 | SOKOL_ASSERT(a_desc->buffer_index < SG_MAX_SHADERSTAGE_BUFFERS); |
8269 | #if defined(SOKOL_GLES2) |
8270 | /* on GLES2, vertex attribute names must be provided */ |
8271 | SOKOL_VALIDATE(a_desc->name, _SG_VALIDATE_PIPELINEDESC_ATTR_NAME); |
8272 | #elif defined(SOKOL_D3D11) |
8273 | /* on D3D11, semantic names (and semantic indices) must be provided */ |
8274 | SOKOL_VALIDATE(a_desc->sem_name, _SG_VALIDATE_PIPELINEDESC_ATTR_SEMANTICS); |
8275 | #endif |
8276 | } |
8277 | return SOKOL_VALIDATE_END(); |
8278 | #endif |
8279 | } |
8280 | |
8281 | _SOKOL_PRIVATE bool _sg_validate_pass_desc(const sg_pass_desc* desc) { |
8282 | #if !defined(SOKOL_DEBUG) |
8283 | _SOKOL_UNUSED(desc); |
8284 | return true; |
8285 | #else |
8286 | SOKOL_ASSERT(desc); |
8287 | SOKOL_VALIDATE_BEGIN(); |
8288 | SOKOL_VALIDATE(desc->_start_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); |
8289 | SOKOL_VALIDATE(desc->_end_canary == 0, _SG_VALIDATE_PASSDESC_CANARY); |
8290 | bool atts_cont = true; |
8291 | sg_pixel_format color_fmt = SG_PIXELFORMAT_NONE; |
8292 | int width = -1, height = -1, sample_count = -1; |
8293 | for (int att_index = 0; att_index < SG_MAX_COLOR_ATTACHMENTS; att_index++) { |
8294 | const sg_attachment_desc* att = &desc->color_attachments[att_index]; |
8295 | if (att->image.id == SG_INVALID_ID) { |
8296 | SOKOL_VALIDATE(att_index > 0, _SG_VALIDATE_PASSDESC_NO_COLOR_ATTS); |
8297 | atts_cont = false; |
8298 | continue; |
8299 | } |
8300 | SOKOL_VALIDATE(atts_cont, _SG_VALIDATE_PASSDESC_NO_CONT_COLOR_ATTS); |
8301 | const _sg_image* img = _sg_lookup_image(&_sg.pools, att->image.id); |
8302 | SOKOL_VALIDATE(img && img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); |
8303 | SOKOL_VALIDATE(att->mip_level < img->num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); |
8304 | if (img->type == SG_IMAGETYPE_CUBE) { |
8305 | SOKOL_VALIDATE(att->face < 6, _SG_VALIDATE_PASSDESC_FACE); |
8306 | } |
8307 | else if (img->type == SG_IMAGETYPE_ARRAY) { |
8308 | SOKOL_VALIDATE(att->layer < img->depth, _SG_VALIDATE_PASSDESC_LAYER); |
8309 | } |
8310 | else if (img->type == SG_IMAGETYPE_3D) { |
8311 | SOKOL_VALIDATE(att->slice < img->depth, _SG_VALIDATE_PASSDESC_SLICE); |
8312 | } |
8313 | SOKOL_VALIDATE(img->render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); |
8314 | if (att_index == 0) { |
8315 | color_fmt = img->pixel_format; |
8316 | width = img->width >> att->mip_level; |
8317 | height = img->height >> att->mip_level; |
8318 | sample_count = img->sample_count; |
8319 | } |
8320 | else { |
8321 | SOKOL_VALIDATE(img->pixel_format == color_fmt, _SG_VALIDATE_PASSDESC_COLOR_PIXELFORMATS); |
8322 | SOKOL_VALIDATE(width == img->width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
8323 | SOKOL_VALIDATE(height == img->height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
8324 | SOKOL_VALIDATE(sample_count == img->sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); |
8325 | } |
8326 | SOKOL_VALIDATE(_sg_is_valid_rendertarget_color_format(img->pixel_format), _SG_VALIDATE_PASSDESC_COLOR_INV_PIXELFORMAT); |
8327 | } |
8328 | if (desc->depth_stencil_attachment.image.id != SG_INVALID_ID) { |
8329 | const sg_attachment_desc* att = &desc->depth_stencil_attachment; |
8330 | const _sg_image* img = _sg_lookup_image(&_sg.pools, att->image.id); |
8331 | SOKOL_VALIDATE(img && img->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_PASSDESC_IMAGE); |
8332 | SOKOL_VALIDATE(att->mip_level < img->num_mipmaps, _SG_VALIDATE_PASSDESC_MIPLEVEL); |
8333 | if (img->type == SG_IMAGETYPE_CUBE) { |
8334 | SOKOL_VALIDATE(att->face < 6, _SG_VALIDATE_PASSDESC_FACE); |
8335 | } |
8336 | else if (img->type == SG_IMAGETYPE_ARRAY) { |
8337 | SOKOL_VALIDATE(att->layer < img->depth, _SG_VALIDATE_PASSDESC_LAYER); |
8338 | } |
8339 | else if (img->type == SG_IMAGETYPE_3D) { |
8340 | SOKOL_VALIDATE(att->slice < img->depth, _SG_VALIDATE_PASSDESC_SLICE); |
8341 | } |
8342 | SOKOL_VALIDATE(img->render_target, _SG_VALIDATE_PASSDESC_IMAGE_NO_RT); |
8343 | SOKOL_VALIDATE(width == img->width >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
8344 | SOKOL_VALIDATE(height == img->height >> att->mip_level, _SG_VALIDATE_PASSDESC_IMAGE_SIZES); |
8345 | SOKOL_VALIDATE(sample_count == img->sample_count, _SG_VALIDATE_PASSDESC_IMAGE_SAMPLE_COUNTS); |
8346 | SOKOL_VALIDATE(_sg_is_valid_rendertarget_depth_format(img->pixel_format), _SG_VALIDATE_PASSDESC_DEPTH_INV_PIXELFORMAT); |
8347 | } |
8348 | return SOKOL_VALIDATE_END(); |
8349 | #endif |
8350 | } |
8351 | |
8352 | _SOKOL_PRIVATE bool _sg_validate_begin_pass(_sg_pass* pass) { |
8353 | #if !defined(SOKOL_DEBUG) |
8354 | _SOKOL_UNUSED(pass); |
8355 | return true; |
8356 | #else |
8357 | SOKOL_VALIDATE_BEGIN(); |
8358 | SOKOL_VALIDATE(pass->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_PASS); |
8359 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
8360 | const _sg_attachment* att = &pass->color_atts[i]; |
8361 | if (att->image) { |
8362 | SOKOL_VALIDATE(att->image->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); |
8363 | SOKOL_VALIDATE(att->image->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); |
8364 | } |
8365 | } |
8366 | if (pass->ds_att.image) { |
8367 | const _sg_attachment* att = &pass->ds_att; |
8368 | SOKOL_VALIDATE(att->image->slot.state == SG_RESOURCESTATE_VALID, _SG_VALIDATE_BEGINPASS_IMAGE); |
8369 | SOKOL_VALIDATE(att->image->slot.id == att->image_id.id, _SG_VALIDATE_BEGINPASS_IMAGE); |
8370 | } |
8371 | return SOKOL_VALIDATE_END(); |
8372 | #endif |
8373 | } |
8374 | |
8375 | _SOKOL_PRIVATE bool _sg_validate_draw_state(const sg_draw_state* ds) { |
8376 | #if !defined(SOKOL_DEBUG) |
8377 | _SOKOL_UNUSED(ds); |
8378 | return true; |
8379 | #else |
8380 | SOKOL_VALIDATE_BEGIN(); |
8381 | /* has pipeline and pipeline still exists */ |
8382 | SOKOL_VALIDATE(ds->pipeline.id != SG_INVALID_ID, _SG_VALIDATE_ADS_PIP); |
8383 | const _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, ds->pipeline.id); |
8384 | if (!pip) { |
8385 | /* cannot continue with validation without pipeline object */ |
8386 | return SOKOL_VALIDATE_END(); |
8387 | } |
8388 | SOKOL_ASSERT(pip->shader); |
8389 | |
8390 | /* has expected vertex buffers, and vertex buffers still exist */ |
8391 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++) { |
8392 | if (ds->vertex_buffers[i].id != SG_INVALID_ID) { |
8393 | SOKOL_VALIDATE(pip->vertex_layout_valid[i], _SG_VALIDATE_ADS_VBS); |
8394 | /* buffers in vertex-buffer-slots must be of type SG_BUFFERTYPE_VERTEXBUFFER */ |
8395 | const _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, ds->vertex_buffers[i].id); |
8396 | SOKOL_ASSERT(buf); |
8397 | if (buf->slot.state == SG_RESOURCESTATE_VALID) { |
8398 | SOKOL_VALIDATE(SG_BUFFERTYPE_VERTEXBUFFER == buf->type, _SG_VALIDATE_ADS_VB_TYPE); |
8399 | SOKOL_VALIDATE(!buf->append_overflow, _SG_VALIDATE_ADS_VB_OVERFLOW); |
8400 | } |
8401 | } |
8402 | else { |
8403 | /* vertex buffer provided in a slot which has no vertex layout in pipeline */ |
8404 | SOKOL_VALIDATE(!pip->vertex_layout_valid[i], _SG_VALIDATE_ADS_VBS); |
8405 | } |
8406 | } |
8407 | |
8408 | /* index buffer expected or not, and index buffer still exists */ |
8409 | if (pip->index_type == SG_INDEXTYPE_NONE) { |
8410 | /* pipeline defines non-indexed rendering, but index buffer provided */ |
8411 | SOKOL_VALIDATE(ds->index_buffer.id == SG_INVALID_ID, _SG_VALIDATE_ADS_IB); |
8412 | } |
8413 | else { |
8414 | /* pipeline defines indexed rendering, but no index buffer provided */ |
8415 | SOKOL_VALIDATE(ds->index_buffer.id != SG_INVALID_ID, _SG_VALIDATE_ADS_NO_IB); |
8416 | } |
8417 | if (ds->index_buffer.id != SG_INVALID_ID) { |
8418 | /* buffer in index-buffer-slot must be of type SG_BUFFERTYPE_INDEXBUFFER */ |
8419 | const _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, ds->index_buffer.id); |
8420 | SOKOL_ASSERT(buf); |
8421 | if (buf->slot.state == SG_RESOURCESTATE_VALID) { |
8422 | SOKOL_VALIDATE(SG_BUFFERTYPE_INDEXBUFFER == buf->type, _SG_VALIDATE_ADS_IB_TYPE); |
8423 | SOKOL_VALIDATE(!buf->append_overflow, _SG_VALIDATE_ADS_IB_OVERFLOW); |
8424 | } |
8425 | } |
8426 | |
8427 | /* has expected vertex shader images */ |
8428 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
8429 | _sg_shader_stage* stage = &pip->shader->stage[SG_SHADERSTAGE_VS]; |
8430 | if (ds->vs_images[i].id != SG_INVALID_ID) { |
8431 | SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ADS_VS_IMGS); |
8432 | const _sg_image* img = _sg_lookup_image(&_sg.pools, ds->vs_images[i].id); |
8433 | SOKOL_ASSERT(img); |
8434 | if (img->slot.state == SG_RESOURCESTATE_VALID) { |
8435 | SOKOL_VALIDATE(img->type == stage->images[i].type, _SG_VALIDATE_ADS_VS_IMG_TYPES); |
8436 | } |
8437 | } |
8438 | else { |
8439 | SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ADS_VS_IMGS); |
8440 | } |
8441 | } |
8442 | |
8443 | /* has expected fragment shader images */ |
8444 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++) { |
8445 | _sg_shader_stage* stage = &pip->shader->stage[SG_SHADERSTAGE_FS]; |
8446 | if (ds->fs_images[i].id != SG_INVALID_ID) { |
8447 | SOKOL_VALIDATE(i < stage->num_images, _SG_VALIDATE_ADS_FS_IMGS); |
8448 | const _sg_image* img = _sg_lookup_image(&_sg.pools, ds->fs_images[i].id); |
8449 | SOKOL_ASSERT(img); |
8450 | if (img->slot.state == SG_RESOURCESTATE_VALID) { |
8451 | SOKOL_VALIDATE(img->type == stage->images[i].type, _SG_VALIDATE_ADS_FS_IMG_TYPES); |
8452 | } |
8453 | } |
8454 | else { |
8455 | SOKOL_VALIDATE(i >= stage->num_images, _SG_VALIDATE_ADS_FS_IMGS); |
8456 | } |
8457 | } |
8458 | |
8459 | /* check that pipeline attributes match current pass attributes */ |
8460 | const _sg_pass* pass = _sg_lookup_pass(&_sg.pools, _sg.cur_pass.id); |
8461 | if (pass) { |
8462 | /* an offscreen pass */ |
8463 | SOKOL_VALIDATE(pip->color_attachment_count == pass->num_color_atts, _SG_VALIDATE_ADS_ATT_COUNT); |
8464 | SOKOL_VALIDATE(pip->color_format == pass->color_atts[0].image->pixel_format, _SG_VALIDATE_ADS_COLOR_FORMAT); |
8465 | SOKOL_VALIDATE(pip->sample_count == pass->color_atts[0].image->sample_count, _SG_VALIDATE_ADS_SAMPLE_COUNT); |
8466 | if (pass->ds_att.image) { |
8467 | SOKOL_VALIDATE(pip->depth_format == pass->ds_att.image->pixel_format, _SG_VALIDATE_ADS_DEPTH_FORMAT); |
8468 | } |
8469 | else { |
8470 | SOKOL_VALIDATE(pip->depth_format == SG_PIXELFORMAT_NONE, _SG_VALIDATE_ADS_DEPTH_FORMAT); |
8471 | } |
8472 | } |
8473 | else { |
8474 | /* default pass */ |
8475 | SOKOL_VALIDATE(pip->color_attachment_count == 1, _SG_VALIDATE_ADS_ATT_COUNT); |
8476 | SOKOL_VALIDATE(pip->color_format == SG_PIXELFORMAT_RGBA8, _SG_VALIDATE_ADS_COLOR_FORMAT); |
8477 | SOKOL_VALIDATE(pip->depth_format == SG_PIXELFORMAT_DEPTHSTENCIL, _SG_VALIDATE_ADS_DEPTH_FORMAT); |
8478 | /* FIXME: hmm, we don't know if the default framebuffer is multisampled here */ |
8479 | } |
8480 | return SOKOL_VALIDATE_END(); |
8481 | #endif |
8482 | } |
8483 | |
8484 | _SOKOL_PRIVATE bool _sg_validate_apply_uniform_block(sg_shader_stage stage_index, int ub_index, const void* data, int num_bytes) { |
8485 | _SOKOL_UNUSED(data); |
8486 | #if !defined(SOKOL_DEBUG) |
8487 | _SOKOL_UNUSED(stage_index); |
8488 | _SOKOL_UNUSED(ub_index); |
8489 | _SOKOL_UNUSED(num_bytes); |
8490 | return true; |
8491 | #else |
8492 | SOKOL_ASSERT((stage_index == SG_SHADERSTAGE_VS) || (stage_index == SG_SHADERSTAGE_FS)); |
8493 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
8494 | SOKOL_VALIDATE_BEGIN(); |
8495 | SOKOL_VALIDATE(_sg.cur_pipeline.id != SG_INVALID_ID, _SG_VALIDATE_AUB_NO_PIPELINE); |
8496 | const _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, _sg.cur_pipeline.id); |
8497 | SOKOL_ASSERT(pip && (pip->slot.id == _sg.cur_pipeline.id)); |
8498 | SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->shader_id.id)); |
8499 | |
8500 | /* check that there is a uniform block at 'stage' and 'ub_index' */ |
8501 | const _sg_shader_stage* stage = &pip->shader->stage[stage_index]; |
8502 | SOKOL_VALIDATE(ub_index < stage->num_uniform_blocks, _SG_VALIDATE_AUB_NO_UB_AT_SLOT); |
8503 | |
8504 | /* check that the provided data size doesn't exceed the uniform block size */ |
8505 | SOKOL_VALIDATE(num_bytes <= stage->uniform_blocks[ub_index].size, _SG_VALIDATE_AUB_SIZE); |
8506 | |
8507 | return SOKOL_VALIDATE_END(); |
8508 | #endif |
8509 | } |
8510 | |
8511 | _SOKOL_PRIVATE bool _sg_validate_update_buffer(const _sg_buffer* buf, const void* data, int size) { |
8512 | #if !defined(SOKOL_DEBUG) |
8513 | _SOKOL_UNUSED(buf); |
8514 | _SOKOL_UNUSED(data); |
8515 | _SOKOL_UNUSED(size); |
8516 | return true; |
8517 | #else |
8518 | SOKOL_ASSERT(buf && data); |
8519 | SOKOL_VALIDATE_BEGIN(); |
8520 | SOKOL_VALIDATE(buf->usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDATEBUF_USAGE); |
8521 | SOKOL_VALIDATE(buf->size >= size, _SG_VALIDATE_UPDATEBUF_SIZE); |
8522 | SOKOL_VALIDATE(buf->update_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_ONCE); |
8523 | SOKOL_VALIDATE(buf->append_frame_index != _sg.frame_index, _SG_VALIDATE_UPDATEBUF_APPEND); |
8524 | return SOKOL_VALIDATE_END(); |
8525 | #endif |
8526 | } |
8527 | |
8528 | _SOKOL_PRIVATE bool _sg_validate_append_buffer(const _sg_buffer* buf, const void* data, int size) { |
8529 | #if !defined(SOKOL_DEBUG) |
8530 | _SOKOL_UNUSED(buf); |
8531 | _SOKOL_UNUSED(data); |
8532 | _SOKOL_UNUSED(size); |
8533 | return true; |
8534 | #else |
8535 | SOKOL_ASSERT(buf && data); |
8536 | SOKOL_VALIDATE_BEGIN(); |
8537 | SOKOL_VALIDATE(buf->usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_APPENDBUF_USAGE); |
8538 | SOKOL_VALIDATE(buf->size >= (buf->append_pos+size), _SG_VALIDATE_APPENDBUF_SIZE); |
8539 | SOKOL_VALIDATE(buf->update_frame_index != _sg.frame_index, _SG_VALIDATE_APPENDBUF_UPDATE); |
8540 | return SOKOL_VALIDATE_END(); |
8541 | #endif |
8542 | } |
8543 | |
8544 | _SOKOL_PRIVATE bool _sg_validate_update_image(const _sg_image* img, const sg_image_content* data) { |
8545 | #if !defined(SOKOL_DEBUG) |
8546 | _SOKOL_UNUSED(img); |
8547 | _SOKOL_UNUSED(data); |
8548 | return true; |
8549 | #else |
8550 | SOKOL_ASSERT(img && data); |
8551 | SOKOL_VALIDATE_BEGIN(); |
8552 | SOKOL_VALIDATE(img->usage != SG_USAGE_IMMUTABLE, _SG_VALIDATE_UPDIMG_USAGE); |
8553 | SOKOL_VALIDATE(img->upd_frame_index != _sg.frame_index, _SG_VALIDATE_UPDIMG_ONCE); |
8554 | SOKOL_VALIDATE(!_sg_is_compressed_pixel_format(img->pixel_format), _SG_VALIDATE_UPDIMG_COMPRESSED); |
8555 | const int num_faces = (img->type == SG_IMAGETYPE_CUBE) ? 6 : 1; |
8556 | const int num_mips = img->num_mipmaps; |
8557 | for (int face_index = 0; face_index < num_faces; face_index++) { |
8558 | for (int mip_index = 0; mip_index < num_mips; mip_index++) { |
8559 | SOKOL_VALIDATE(0 != data->subimage[face_index][mip_index].ptr, _SG_VALIDATE_UPDIMG_NOTENOUGHDATA); |
8560 | const int mip_width = _sg_max(img->width >> mip_index, 1); |
8561 | const int mip_height = _sg_max(img->height >> mip_index, 1); |
8562 | const int bytes_per_slice = _sg_surface_pitch(img->pixel_format, mip_width, mip_height); |
8563 | const int expected_size = bytes_per_slice * img->depth; |
8564 | SOKOL_VALIDATE(data->subimage[face_index][mip_index].size <= expected_size, _SG_VALIDATE_UPDIMG_SIZE); |
8565 | } |
8566 | } |
8567 | return SOKOL_VALIDATE_END(); |
8568 | #endif |
8569 | } |
8570 | |
8571 | /*== PUBLIC API FUNCTIONS ====================================================*/ |
8572 | SOKOL_API_IMPL void sg_setup(const sg_desc* desc) { |
8573 | SOKOL_ASSERT(desc); |
8574 | SOKOL_ASSERT((desc->_start_canary == 0) && (desc->_end_canary == 0)); |
8575 | memset(&_sg, 0, sizeof(_sg)); |
8576 | _sg_setup_pools(&_sg.pools, desc); |
8577 | _sg.frame_index = 1; |
8578 | _sg.next_draw_valid = false; |
8579 | _sg_setup_backend(desc); |
8580 | sg_setup_context(); |
8581 | _sg.valid = true; |
8582 | } |
8583 | |
8584 | SOKOL_API_IMPL void sg_shutdown(void) { |
8585 | /* can only delete resources for the currently set context here, if multiple |
8586 | contexts are used, the app code must take care of properly releasing them |
8587 | (since only the app code can switch between 3D-API contexts) |
8588 | */ |
8589 | if (_sg.active_context.id != SG_INVALID_ID) { |
8590 | _sg_context* ctx = _sg_lookup_context(&_sg.pools, _sg.active_context.id); |
8591 | if (ctx) { |
8592 | _sg_destroy_all_resources(&_sg.pools, _sg.active_context.id); |
8593 | _sg_destroy_context(ctx); |
8594 | } |
8595 | } |
8596 | _sg_discard_backend(); |
8597 | _sg_discard_pools(&_sg.pools); |
8598 | _sg.valid = false; |
8599 | } |
8600 | |
8601 | SOKOL_API_IMPL bool sg_isvalid(void) { |
8602 | return _sg.valid; |
8603 | } |
8604 | |
8605 | SOKOL_API_IMPL bool sg_query_feature(sg_feature f) { |
8606 | return _sg_query_feature(f); |
8607 | } |
8608 | |
8609 | SOKOL_API_IMPL sg_context sg_setup_context(void) { |
8610 | sg_context res; |
8611 | res.id = _sg_pool_alloc_id(&_sg.pools.context_pool); |
8612 | if (res.id != SG_INVALID_ID) { |
8613 | _sg_context* ctx = _sg_context_at(&_sg.pools, res.id); |
8614 | SOKOL_ASSERT(ctx); |
8615 | ctx->slot.id = res.id; |
8616 | ctx->slot.state = SG_RESOURCESTATE_ALLOC; |
8617 | _sg_create_context(ctx); |
8618 | SOKOL_ASSERT(ctx->slot.state == SG_RESOURCESTATE_VALID); |
8619 | _sg_activate_context(ctx); |
8620 | } |
8621 | _sg.active_context = res; |
8622 | return res; |
8623 | } |
8624 | |
8625 | SOKOL_API_IMPL void sg_discard_context(sg_context ctx_id) { |
8626 | _sg_destroy_all_resources(&_sg.pools, ctx_id.id); |
8627 | _sg_context* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); |
8628 | if (ctx) { |
8629 | _sg_destroy_context(ctx); |
8630 | _sg_pool_free_id(&_sg.pools.context_pool, ctx_id.id); |
8631 | } |
8632 | _sg.active_context.id = SG_INVALID_ID; |
8633 | _sg_activate_context(0); |
8634 | } |
8635 | |
8636 | SOKOL_API_IMPL void sg_activate_context(sg_context ctx_id) { |
8637 | _sg.active_context = ctx_id; |
8638 | _sg_context* ctx = _sg_lookup_context(&_sg.pools, ctx_id.id); |
8639 | /* NOTE: ctx can be 0 here if the context is no longer valid */ |
8640 | _sg_activate_context(ctx); |
8641 | } |
8642 | |
8643 | /*-- allocate resource id ----------------------------------------------------*/ |
8644 | SOKOL_API_IMPL sg_buffer sg_alloc_buffer(void) { |
8645 | sg_buffer res; |
8646 | res.id = _sg_pool_alloc_id(&_sg.pools.buffer_pool); |
8647 | if (res.id != SG_INVALID_ID) { |
8648 | _sg_buffer* buf = _sg_buffer_at(&_sg.pools, res.id); |
8649 | SOKOL_ASSERT(buf && (buf->slot.state == SG_RESOURCESTATE_INITIAL) && (buf->slot.id == SG_INVALID_ID)); |
8650 | buf->slot.id = res.id; |
8651 | buf->slot.state = SG_RESOURCESTATE_ALLOC; |
8652 | } |
8653 | return res; |
8654 | } |
8655 | |
8656 | SOKOL_API_IMPL sg_image sg_alloc_image(void) { |
8657 | sg_image res; |
8658 | res.id = _sg_pool_alloc_id(&_sg.pools.image_pool); |
8659 | if (res.id != SG_INVALID_ID) { |
8660 | _sg_image* img = _sg_image_at(&_sg.pools, res.id); |
8661 | SOKOL_ASSERT(img && (img->slot.state == SG_RESOURCESTATE_INITIAL) && (img->slot.id == SG_INVALID_ID)); |
8662 | img->slot.id = res.id; |
8663 | img->slot.state = SG_RESOURCESTATE_ALLOC; |
8664 | } |
8665 | return res; |
8666 | } |
8667 | |
8668 | SOKOL_API_IMPL sg_shader sg_alloc_shader(void) { |
8669 | sg_shader res; |
8670 | res.id = _sg_pool_alloc_id(&_sg.pools.shader_pool); |
8671 | if (res.id != SG_INVALID_ID) { |
8672 | _sg_shader* shd = _sg_shader_at(&_sg.pools, res.id); |
8673 | SOKOL_ASSERT(shd && (shd->slot.state == SG_RESOURCESTATE_INITIAL) && (shd->slot.id == SG_INVALID_ID)); |
8674 | shd->slot.id = res.id; |
8675 | shd->slot.state = SG_RESOURCESTATE_ALLOC; |
8676 | } |
8677 | return res; |
8678 | } |
8679 | |
8680 | SOKOL_API_IMPL sg_pipeline sg_alloc_pipeline(void) { |
8681 | sg_pipeline res; |
8682 | res.id = _sg_pool_alloc_id(&_sg.pools.pipeline_pool); |
8683 | if (res.id != SG_INVALID_ID) { |
8684 | _sg_pipeline* pip = _sg_pipeline_at(&_sg.pools, res.id); |
8685 | SOKOL_ASSERT(pip && (pip->slot.state == SG_RESOURCESTATE_INITIAL) && (pip->slot.id == SG_INVALID_ID)); |
8686 | pip->slot.id = res.id; |
8687 | pip->slot.state = SG_RESOURCESTATE_ALLOC; |
8688 | } |
8689 | return res; |
8690 | } |
8691 | |
8692 | SOKOL_API_IMPL sg_pass sg_alloc_pass(void) { |
8693 | sg_pass res; |
8694 | res.id = _sg_pool_alloc_id(&_sg.pools.pass_pool); |
8695 | if (res.id != SG_INVALID_ID) { |
8696 | _sg_pass* pass = _sg_pass_at(&_sg.pools, res.id); |
8697 | SOKOL_ASSERT(pass && (pass->slot.state == SG_RESOURCESTATE_INITIAL) && (pass->slot.id == SG_INVALID_ID)); |
8698 | pass->slot.id = res.id; |
8699 | pass->slot.state = SG_RESOURCESTATE_ALLOC; |
8700 | } |
8701 | return res; |
8702 | } |
8703 | |
8704 | /*-- initialize an allocated resource ----------------------------------------*/ |
8705 | SOKOL_API_IMPL void sg_init_buffer(sg_buffer buf_id, const sg_buffer_desc* desc) { |
8706 | SOKOL_ASSERT(buf_id.id != SG_INVALID_ID && desc); |
8707 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
8708 | SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC); |
8709 | if (_sg_validate_buffer_desc(desc)) { |
8710 | _sg_create_buffer(buf, desc); |
8711 | buf->slot.ctx_id = _sg.active_context.id; |
8712 | } |
8713 | else { |
8714 | buf->slot.state = SG_RESOURCESTATE_FAILED; |
8715 | } |
8716 | SOKOL_ASSERT((buf->slot.state == SG_RESOURCESTATE_VALID)||(buf->slot.state == SG_RESOURCESTATE_FAILED)); |
8717 | } |
8718 | |
8719 | SOKOL_API_IMPL void sg_init_image(sg_image img_id, const sg_image_desc* desc) { |
8720 | SOKOL_ASSERT(img_id.id != SG_INVALID_ID && desc); |
8721 | _sg_image* img = _sg_lookup_image(&_sg.pools, img_id.id); |
8722 | SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC); |
8723 | if (_sg_validate_image_desc(desc)) { |
8724 | _sg_create_image(img, desc); |
8725 | img->slot.ctx_id = _sg.active_context.id; |
8726 | } |
8727 | else { |
8728 | img->slot.state = SG_RESOURCESTATE_FAILED; |
8729 | } |
8730 | SOKOL_ASSERT((img->slot.state == SG_RESOURCESTATE_VALID)||(img->slot.state == SG_RESOURCESTATE_FAILED)); |
8731 | } |
8732 | |
8733 | SOKOL_API_IMPL void sg_init_shader(sg_shader shd_id, const sg_shader_desc* desc) { |
8734 | SOKOL_ASSERT(shd_id.id != SG_INVALID_ID && desc); |
8735 | _sg_shader* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
8736 | SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC); |
8737 | if (_sg_validate_shader_desc(desc)) { |
8738 | _sg_create_shader(shd, desc); |
8739 | shd->slot.ctx_id = _sg.active_context.id; |
8740 | } |
8741 | else { |
8742 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
8743 | } |
8744 | SOKOL_ASSERT((shd->slot.state == SG_RESOURCESTATE_VALID)||(shd->slot.state == SG_RESOURCESTATE_FAILED)); |
8745 | } |
8746 | |
8747 | SOKOL_API_IMPL void sg_init_pipeline(sg_pipeline pip_id, const sg_pipeline_desc* desc) { |
8748 | SOKOL_ASSERT(pip_id.id != SG_INVALID_ID && desc); |
8749 | _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
8750 | SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC); |
8751 | if (_sg_validate_pipeline_desc(desc)) { |
8752 | _sg_shader* shd = _sg_lookup_shader(&_sg.pools, desc->shader.id); |
8753 | SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_VALID); |
8754 | _sg_create_pipeline(pip, shd, desc); |
8755 | pip->slot.ctx_id = _sg.active_context.id; |
8756 | } |
8757 | else { |
8758 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
8759 | } |
8760 | SOKOL_ASSERT((pip->slot.state == SG_RESOURCESTATE_VALID)||(pip->slot.state == SG_RESOURCESTATE_FAILED)); |
8761 | } |
8762 | |
8763 | SOKOL_API_IMPL void sg_init_pass(sg_pass pass_id, const sg_pass_desc* desc) { |
8764 | SOKOL_ASSERT(pass_id.id != SG_INVALID_ID && desc); |
8765 | _sg_pass* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
8766 | SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); |
8767 | if (_sg_validate_pass_desc(desc)) { |
8768 | /* lookup pass attachment image pointers */ |
8769 | _sg_image* att_imgs[SG_MAX_COLOR_ATTACHMENTS + 1]; |
8770 | for (int i = 0; i < SG_MAX_COLOR_ATTACHMENTS; i++) { |
8771 | if (desc->color_attachments[i].image.id) { |
8772 | att_imgs[i] = _sg_lookup_image(&_sg.pools, desc->color_attachments[i].image.id); |
8773 | SOKOL_ASSERT(att_imgs[i] && att_imgs[i]->slot.state == SG_RESOURCESTATE_VALID); |
8774 | } |
8775 | else { |
8776 | att_imgs[i] = 0; |
8777 | } |
8778 | } |
8779 | const int ds_att_index = SG_MAX_COLOR_ATTACHMENTS; |
8780 | if (desc->depth_stencil_attachment.image.id) { |
8781 | att_imgs[ds_att_index] = _sg_lookup_image(&_sg.pools, desc->depth_stencil_attachment.image.id); |
8782 | SOKOL_ASSERT(att_imgs[ds_att_index] && att_imgs[ds_att_index]->slot.state == SG_RESOURCESTATE_VALID); |
8783 | } |
8784 | else { |
8785 | att_imgs[ds_att_index] = 0; |
8786 | } |
8787 | _sg_create_pass(pass, att_imgs, desc); |
8788 | pass->slot.ctx_id = _sg.active_context.id; |
8789 | } |
8790 | else { |
8791 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
8792 | } |
8793 | SOKOL_ASSERT((pass->slot.state == SG_RESOURCESTATE_VALID)||(pass->slot.state == SG_RESOURCESTATE_FAILED)); |
8794 | } |
8795 | |
8796 | /*-- set allocated resource to failed state ----------------------------------*/ |
8797 | SOKOL_API_IMPL void sg_fail_buffer(sg_buffer buf_id) { |
8798 | SOKOL_ASSERT(buf_id.id != SG_INVALID_ID); |
8799 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
8800 | SOKOL_ASSERT(buf && buf->slot.state == SG_RESOURCESTATE_ALLOC); |
8801 | buf->slot.state = SG_RESOURCESTATE_FAILED; |
8802 | } |
8803 | |
8804 | SOKOL_API_IMPL void sg_fail_image(sg_image img_id) { |
8805 | SOKOL_ASSERT(img_id.id != SG_INVALID_ID); |
8806 | _sg_image* img = _sg_lookup_image(&_sg.pools, img_id.id); |
8807 | SOKOL_ASSERT(img && img->slot.state == SG_RESOURCESTATE_ALLOC); |
8808 | img->slot.state = SG_RESOURCESTATE_FAILED; |
8809 | } |
8810 | |
8811 | SOKOL_API_IMPL void sg_fail_shader(sg_shader shd_id) { |
8812 | SOKOL_ASSERT(shd_id.id != SG_INVALID_ID); |
8813 | _sg_shader* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
8814 | SOKOL_ASSERT(shd && shd->slot.state == SG_RESOURCESTATE_ALLOC); |
8815 | shd->slot.state = SG_RESOURCESTATE_FAILED; |
8816 | } |
8817 | |
8818 | SOKOL_API_IMPL void sg_fail_pipeline(sg_pipeline pip_id) { |
8819 | SOKOL_ASSERT(pip_id.id != SG_INVALID_ID); |
8820 | _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
8821 | SOKOL_ASSERT(pip && pip->slot.state == SG_RESOURCESTATE_ALLOC); |
8822 | pip->slot.state = SG_RESOURCESTATE_FAILED; |
8823 | } |
8824 | |
8825 | SOKOL_API_IMPL void sg_fail_pass(sg_pass pass_id) { |
8826 | SOKOL_ASSERT(pass_id.id != SG_INVALID_ID); |
8827 | _sg_pass* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
8828 | SOKOL_ASSERT(pass && pass->slot.state == SG_RESOURCESTATE_ALLOC); |
8829 | pass->slot.state = SG_RESOURCESTATE_FAILED; |
8830 | } |
8831 | |
8832 | /*-- get resource state */ |
8833 | SOKOL_API_IMPL sg_resource_state sg_query_buffer_state(sg_buffer buf_id) { |
8834 | if (buf_id.id != SG_INVALID_ID) { |
8835 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
8836 | if (buf) { |
8837 | return buf->slot.state; |
8838 | } |
8839 | } |
8840 | return SG_RESOURCESTATE_INVALID; |
8841 | } |
8842 | |
8843 | SOKOL_API_IMPL sg_resource_state sg_query_image_state(sg_image img_id) { |
8844 | if (img_id.id != SG_INVALID_ID) { |
8845 | _sg_image* img = _sg_lookup_image(&_sg.pools, img_id.id); |
8846 | if (img) { |
8847 | return img->slot.state; |
8848 | } |
8849 | } |
8850 | return SG_RESOURCESTATE_INVALID; |
8851 | } |
8852 | |
8853 | SOKOL_API_IMPL sg_resource_state sg_query_shader_state(sg_shader shd_id) { |
8854 | if (shd_id.id != SG_INVALID_ID) { |
8855 | _sg_shader* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
8856 | if (shd) { |
8857 | return shd->slot.state; |
8858 | } |
8859 | } |
8860 | return SG_RESOURCESTATE_INVALID; |
8861 | } |
8862 | |
8863 | SOKOL_API_IMPL sg_resource_state sg_query_pipeline_state(sg_pipeline pip_id) { |
8864 | if (pip_id.id != SG_INVALID_ID) { |
8865 | _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
8866 | if (pip) { |
8867 | return pip->slot.state; |
8868 | } |
8869 | } |
8870 | return SG_RESOURCESTATE_INVALID; |
8871 | } |
8872 | |
8873 | SOKOL_API_IMPL sg_resource_state sg_query_pass_state(sg_pass pass_id) { |
8874 | if (pass_id.id != SG_INVALID_ID) { |
8875 | _sg_pass* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
8876 | if (pass) { |
8877 | return pass->slot.state; |
8878 | } |
8879 | } |
8880 | return SG_RESOURCESTATE_INVALID; |
8881 | } |
8882 | |
8883 | /*-- allocate and initialize resource ----------------------------------------*/ |
8884 | SOKOL_API_IMPL sg_buffer sg_make_buffer(const sg_buffer_desc* desc) { |
8885 | SOKOL_ASSERT(desc); |
8886 | sg_buffer buf_id = sg_alloc_buffer(); |
8887 | if (buf_id.id != SG_INVALID_ID) { |
8888 | sg_init_buffer(buf_id, desc); |
8889 | } |
8890 | else { |
8891 | SOKOL_LOG("buffer pool exhausted!" ); |
8892 | } |
8893 | return buf_id; |
8894 | } |
8895 | |
8896 | SOKOL_API_IMPL sg_image sg_make_image(const sg_image_desc* desc) { |
8897 | SOKOL_ASSERT(desc); |
8898 | sg_image img_id = sg_alloc_image(); |
8899 | if (img_id.id != SG_INVALID_ID) { |
8900 | sg_init_image(img_id, desc); |
8901 | } |
8902 | else { |
8903 | SOKOL_LOG("image pool exhausted!" ); |
8904 | } |
8905 | return img_id; |
8906 | } |
8907 | |
8908 | SOKOL_API_IMPL sg_shader sg_make_shader(const sg_shader_desc* desc) { |
8909 | SOKOL_ASSERT(desc); |
8910 | sg_shader shd_id = sg_alloc_shader(); |
8911 | if (shd_id.id != SG_INVALID_ID) { |
8912 | sg_init_shader(shd_id, desc); |
8913 | } |
8914 | else { |
8915 | SOKOL_LOG("shader pool exhausted!" ); |
8916 | } |
8917 | return shd_id; |
8918 | } |
8919 | |
8920 | SOKOL_API_IMPL sg_pipeline sg_make_pipeline(const sg_pipeline_desc* desc) { |
8921 | SOKOL_ASSERT(desc); |
8922 | sg_pipeline pip_id = sg_alloc_pipeline(); |
8923 | if (pip_id.id != SG_INVALID_ID) { |
8924 | sg_init_pipeline(pip_id, desc); |
8925 | } |
8926 | else { |
8927 | SOKOL_LOG("pipeline pool exhausted!" ); |
8928 | } |
8929 | return pip_id; |
8930 | } |
8931 | |
8932 | SOKOL_API_IMPL sg_pass sg_make_pass(const sg_pass_desc* desc) { |
8933 | SOKOL_ASSERT(desc); |
8934 | sg_pass pass_id = sg_alloc_pass(); |
8935 | if (pass_id.id != SG_INVALID_ID) { |
8936 | sg_init_pass(pass_id, desc); |
8937 | } |
8938 | else { |
8939 | SOKOL_LOG("pass pool exhausted!" ); |
8940 | } |
8941 | return pass_id; |
8942 | } |
8943 | |
8944 | /*-- destroy resource --------------------------------------------------------*/ |
8945 | SOKOL_API_IMPL void sg_destroy_buffer(sg_buffer buf_id) { |
8946 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
8947 | if (buf) { |
8948 | if (buf->slot.ctx_id == _sg.active_context.id) { |
8949 | _sg_destroy_buffer(buf); |
8950 | _sg_pool_free_id(&_sg.pools.buffer_pool, buf_id.id); |
8951 | } |
8952 | else { |
8953 | SOKOL_LOG("sg_destroy_buffer: active context mismatch (must be same as for creation)" ); |
8954 | } |
8955 | } |
8956 | } |
8957 | |
8958 | SOKOL_API_IMPL void sg_destroy_image(sg_image img_id) { |
8959 | _sg_image* img = _sg_lookup_image(&_sg.pools, img_id.id); |
8960 | if (img) { |
8961 | if (img->slot.ctx_id == _sg.active_context.id) { |
8962 | _sg_destroy_image(img); |
8963 | _sg_pool_free_id(&_sg.pools.image_pool, img_id.id); |
8964 | } |
8965 | else { |
8966 | SOKOL_LOG("sg_destroy_image: active context mismatch (must be same as for creation)" ); |
8967 | } |
8968 | } |
8969 | } |
8970 | |
8971 | SOKOL_API_IMPL void sg_destroy_shader(sg_shader shd_id) { |
8972 | _sg_shader* shd = _sg_lookup_shader(&_sg.pools, shd_id.id); |
8973 | if (shd) { |
8974 | if (shd->slot.ctx_id == _sg.active_context.id) { |
8975 | _sg_destroy_shader(shd); |
8976 | _sg_pool_free_id(&_sg.pools.shader_pool, shd_id.id); |
8977 | } |
8978 | else { |
8979 | SOKOL_LOG("sg_destroy_shader: active context mismatch (must be same as for creation)" ); |
8980 | } |
8981 | } |
8982 | } |
8983 | |
8984 | SOKOL_API_IMPL void sg_destroy_pipeline(sg_pipeline pip_id) { |
8985 | _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, pip_id.id); |
8986 | if (pip) { |
8987 | if (pip->slot.ctx_id == _sg.active_context.id) { |
8988 | _sg_destroy_pipeline(pip); |
8989 | _sg_pool_free_id(&_sg.pools.pipeline_pool, pip_id.id); |
8990 | } |
8991 | else { |
8992 | SOKOL_LOG("sg_destroy_pipeline: active context mismatch (must be same as for creation)" ); |
8993 | } |
8994 | } |
8995 | } |
8996 | |
8997 | SOKOL_API_IMPL void sg_destroy_pass(sg_pass pass_id) { |
8998 | _sg_pass* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
8999 | if (pass) { |
9000 | if (pass->slot.ctx_id == _sg.active_context.id) { |
9001 | _sg_destroy_pass(pass); |
9002 | _sg_pool_free_id(&_sg.pools.pass_pool, pass_id.id); |
9003 | } |
9004 | else { |
9005 | SOKOL_LOG("sg_destroy_pass: active context mismatch (must be same as for creation)" ); |
9006 | } |
9007 | } |
9008 | } |
9009 | |
9010 | SOKOL_API_IMPL void sg_begin_default_pass(const sg_pass_action* pass_action, int width, int height) { |
9011 | SOKOL_ASSERT(pass_action); |
9012 | SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); |
9013 | sg_pass_action pa; |
9014 | _sg_resolve_default_pass_action(pass_action, &pa); |
9015 | _sg.cur_pass.id = SG_INVALID_ID; |
9016 | _sg.pass_valid = true; |
9017 | _sg_begin_pass(0, &pa, width, height); |
9018 | } |
9019 | |
9020 | SOKOL_API_IMPL void sg_begin_pass(sg_pass pass_id, const sg_pass_action* pass_action) { |
9021 | SOKOL_ASSERT(pass_action); |
9022 | SOKOL_ASSERT((pass_action->_start_canary == 0) && (pass_action->_end_canary == 0)); |
9023 | _sg.cur_pass = pass_id; |
9024 | _sg_pass* pass = _sg_lookup_pass(&_sg.pools, pass_id.id); |
9025 | if (pass && _sg_validate_begin_pass(pass)) { |
9026 | _sg.pass_valid = true; |
9027 | sg_pass_action pa; |
9028 | _sg_resolve_default_pass_action(pass_action, &pa); |
9029 | const int w = pass->color_atts[0].image->width; |
9030 | const int h = pass->color_atts[0].image->height; |
9031 | _sg_begin_pass(pass, &pa, w, h); |
9032 | } |
9033 | else { |
9034 | _sg.pass_valid = false; |
9035 | } |
9036 | } |
9037 | |
9038 | SOKOL_API_IMPL void sg_apply_viewport(int x, int y, int width, int height, bool origin_top_left) { |
9039 | if (!_sg.pass_valid) { |
9040 | return; |
9041 | } |
9042 | _sg_apply_viewport(x, y, width, height, origin_top_left); |
9043 | } |
9044 | |
9045 | SOKOL_API_IMPL void sg_apply_scissor_rect(int x, int y, int width, int height, bool origin_top_left) { |
9046 | if (!_sg.pass_valid) { |
9047 | return; |
9048 | } |
9049 | _sg_apply_scissor_rect(x, y, width, height, origin_top_left); |
9050 | } |
9051 | |
9052 | SOKOL_API_IMPL void sg_apply_draw_state(const sg_draw_state* ds) { |
9053 | SOKOL_ASSERT(ds); |
9054 | SOKOL_ASSERT((ds->_start_canary==0) && (ds->_end_canary==0)); |
9055 | if (!_sg_validate_draw_state(ds)) { |
9056 | _sg.next_draw_valid = false; |
9057 | return; |
9058 | } |
9059 | if (!_sg.pass_valid) { |
9060 | return; |
9061 | } |
9062 | _sg.next_draw_valid = true; |
9063 | _sg.cur_pipeline = ds->pipeline; |
9064 | |
9065 | /* lookup resource pointers, resources which are not in SG_RESOURCESTATE_VALID |
9066 | are not a fatal error, but suppress the following drawcalls, this is to |
9067 | allow for simple asynchronous resource setup |
9068 | */ |
9069 | _sg_pipeline* pip = _sg_lookup_pipeline(&_sg.pools, ds->pipeline.id); |
9070 | SOKOL_ASSERT(pip); |
9071 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == pip->slot.state); |
9072 | SOKOL_ASSERT(pip->shader && (pip->shader->slot.id == pip->shader_id.id)); |
9073 | |
9074 | _sg_buffer* vbs[SG_MAX_SHADERSTAGE_BUFFERS] = { 0 }; |
9075 | int num_vbs = 0; |
9076 | for (int i = 0; i < SG_MAX_SHADERSTAGE_BUFFERS; i++, num_vbs++) { |
9077 | if (ds->vertex_buffers[i].id) { |
9078 | vbs[i] = _sg_lookup_buffer(&_sg.pools, ds->vertex_buffers[i].id); |
9079 | SOKOL_ASSERT(vbs[i]); |
9080 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vbs[i]->slot.state); |
9081 | _sg.next_draw_valid &= !vbs[i]->append_overflow; |
9082 | } |
9083 | else { |
9084 | break; |
9085 | } |
9086 | } |
9087 | |
9088 | _sg_buffer* ib = 0; |
9089 | if (ds->index_buffer.id) { |
9090 | ib = _sg_lookup_buffer(&_sg.pools, ds->index_buffer.id); |
9091 | SOKOL_ASSERT(ib); |
9092 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == ib->slot.state); |
9093 | _sg.next_draw_valid &= !ib->append_overflow; |
9094 | } |
9095 | |
9096 | _sg_image* vs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; |
9097 | int num_vs_imgs = 0; |
9098 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_vs_imgs++) { |
9099 | if (ds->vs_images[i].id) { |
9100 | vs_imgs[i] = _sg_lookup_image(&_sg.pools, ds->vs_images[i].id); |
9101 | SOKOL_ASSERT(vs_imgs[i]); |
9102 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == vs_imgs[i]->slot.state); |
9103 | } |
9104 | else { |
9105 | break; |
9106 | } |
9107 | } |
9108 | |
9109 | _sg_image* fs_imgs[SG_MAX_SHADERSTAGE_IMAGES] = { 0 }; |
9110 | int num_fs_imgs = 0; |
9111 | for (int i = 0; i < SG_MAX_SHADERSTAGE_IMAGES; i++, num_fs_imgs++) { |
9112 | if (ds->fs_images[i].id) { |
9113 | fs_imgs[i] = _sg_lookup_image(&_sg.pools, ds->fs_images[i].id); |
9114 | SOKOL_ASSERT(fs_imgs[i]); |
9115 | _sg.next_draw_valid &= (SG_RESOURCESTATE_VALID == fs_imgs[i]->slot.state); |
9116 | } |
9117 | else { |
9118 | break; |
9119 | } |
9120 | } |
9121 | if (_sg.next_draw_valid) { |
9122 | const int* vb_offsets = ds->vertex_buffer_offsets; |
9123 | int ib_offset = ds->index_buffer_offset; |
9124 | _sg_apply_draw_state(pip, vbs, vb_offsets, num_vbs, ib, ib_offset, vs_imgs, num_vs_imgs, fs_imgs, num_fs_imgs); |
9125 | } |
9126 | } |
9127 | |
9128 | SOKOL_API_IMPL void sg_apply_uniform_block(sg_shader_stage stage, int ub_index, const void* data, int num_bytes) { |
9129 | SOKOL_ASSERT((stage == SG_SHADERSTAGE_VS) || (stage == SG_SHADERSTAGE_FS)); |
9130 | SOKOL_ASSERT((ub_index >= 0) && (ub_index < SG_MAX_SHADERSTAGE_UBS)); |
9131 | SOKOL_ASSERT(data && (num_bytes > 0)); |
9132 | if (!_sg_validate_apply_uniform_block(stage, ub_index, data, num_bytes)) { |
9133 | _sg.next_draw_valid = false; |
9134 | return; |
9135 | } |
9136 | if (!(_sg.pass_valid && _sg.next_draw_valid)) { |
9137 | return; |
9138 | } |
9139 | _sg_apply_uniform_block(stage, ub_index, data, num_bytes); |
9140 | } |
9141 | |
9142 | SOKOL_API_IMPL void sg_draw(int base_element, int num_elements, int num_instances) { |
9143 | if (!(_sg.pass_valid && _sg.next_draw_valid)) { |
9144 | return; |
9145 | } |
9146 | _sg_draw(base_element, num_elements, num_instances); |
9147 | } |
9148 | |
9149 | SOKOL_API_IMPL void sg_end_pass(void) { |
9150 | if (!_sg.pass_valid) { |
9151 | return; |
9152 | } |
9153 | _sg_end_pass(); |
9154 | _sg.cur_pass.id = SG_INVALID_ID; |
9155 | _sg.cur_pipeline.id = SG_INVALID_ID; |
9156 | _sg.pass_valid = false; |
9157 | } |
9158 | |
9159 | SOKOL_API_IMPL void sg_commit() { |
9160 | _sg_commit(); |
9161 | _sg.frame_index++; |
9162 | } |
9163 | |
9164 | SOKOL_API_IMPL void sg_reset_state_cache(void) { |
9165 | _sg_reset_state_cache(); |
9166 | } |
9167 | |
9168 | SOKOL_API_IMPL void sg_update_buffer(sg_buffer buf_id, const void* data, int num_bytes) { |
9169 | if (num_bytes == 0) { |
9170 | return; |
9171 | } |
9172 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
9173 | if (!(buf && buf->slot.state == SG_RESOURCESTATE_VALID)) { |
9174 | return; |
9175 | } |
9176 | if (_sg_validate_update_buffer(buf, data, num_bytes)) { |
9177 | SOKOL_ASSERT(num_bytes <= buf->size); |
9178 | /* only one update allowed per buffer and frame */ |
9179 | SOKOL_ASSERT(buf->update_frame_index != _sg.frame_index); |
9180 | /* update and append on same buffer in same frame not allowed */ |
9181 | SOKOL_ASSERT(buf->append_frame_index != _sg.frame_index); |
9182 | _sg_update_buffer(buf, data, num_bytes); |
9183 | buf->update_frame_index = _sg.frame_index; |
9184 | } |
9185 | } |
9186 | |
9187 | SOKOL_API_IMPL int sg_append_buffer(sg_buffer buf_id, const void* data, int num_bytes) { |
9188 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
9189 | if (buf) { |
9190 | /* rewind append cursor in a new frame */ |
9191 | if (buf->append_frame_index != _sg.frame_index) { |
9192 | buf->append_pos = 0; |
9193 | buf->append_overflow = false; |
9194 | } |
9195 | if ((buf->append_pos + num_bytes) > buf->size) { |
9196 | buf->append_overflow = true; |
9197 | } |
9198 | const int start_pos = buf->append_pos; |
9199 | if (buf->slot.state == SG_RESOURCESTATE_VALID) { |
9200 | if (_sg_validate_append_buffer(buf, data, num_bytes)) { |
9201 | if (!buf->append_overflow && (num_bytes > 0)) { |
9202 | /* update and append on same buffer in same frame not allowed */ |
9203 | SOKOL_ASSERT(buf->update_frame_index != _sg.frame_index); |
9204 | _sg_append_buffer(buf, data, num_bytes, buf->append_frame_index != _sg.frame_index); |
9205 | buf->append_pos += num_bytes; |
9206 | buf->append_frame_index = _sg.frame_index; |
9207 | } |
9208 | } |
9209 | } |
9210 | return start_pos; |
9211 | } |
9212 | else { |
9213 | /* FIXME: should we return -1 here? */ |
9214 | return 0; |
9215 | } |
9216 | } |
9217 | |
9218 | SOKOL_API_IMPL bool sg_query_buffer_overflow(sg_buffer buf_id) { |
9219 | _sg_buffer* buf = _sg_lookup_buffer(&_sg.pools, buf_id.id); |
9220 | if (buf) { |
9221 | return buf->append_overflow; |
9222 | } |
9223 | else { |
9224 | return false; |
9225 | } |
9226 | } |
9227 | |
9228 | SOKOL_API_IMPL void sg_update_image(sg_image img_id, const sg_image_content* data) { |
9229 | _sg_image* img = _sg_lookup_image(&_sg.pools, img_id.id); |
9230 | if (!(img && img->slot.state == SG_RESOURCESTATE_VALID)) { |
9231 | return; |
9232 | } |
9233 | if (_sg_validate_update_image(img, data)) { |
9234 | SOKOL_ASSERT(img->upd_frame_index != _sg.frame_index); |
9235 | _sg_update_image(img, data); |
9236 | img->upd_frame_index = _sg.frame_index; |
9237 | } |
9238 | } |
9239 | #ifdef _MSC_VER |
9240 | #pragma warning(pop) |
9241 | #endif |
9242 | |
9243 | #endif /* SOKOL_IMPL */ |
9244 | |