1//************************************ bs::framework - Copyright 2018 Marko Pintera **************************************//
2//*********** Licensed under the MIT license. See LICENSE.md for full terms. This notice is not to be removed. ***********//
3#include "BsRendererView.h"
4#include "Renderer/BsCamera.h"
5#include "Renderer/BsRenderable.h"
6#include "Renderer/BsRendererUtility.h"
7#include "Material/BsMaterial.h"
8#include "Material/BsShader.h"
9#include "Material/BsGpuParamsSet.h"
10#include "BsRendererLight.h"
11#include "BsRendererScene.h"
12#include "BsRenderBeast.h"
13#include <BsRendererDecal.h>
14
15namespace bs { namespace ct
16{
17 PerCameraParamDef gPerCameraParamDef;
18 SkyboxParamDef gSkyboxParamDef;
19
20 SkyboxMat::SkyboxMat()
21 {
22 if(mParams->hasTexture(GPT_FRAGMENT_PROGRAM, "gSkyTex"))
23 mParams->getTextureParam(GPT_FRAGMENT_PROGRAM, "gSkyTex", mSkyTextureParam);
24
25 mParamBuffer = gSkyboxParamDef.createBuffer();
26
27 if(mParams->hasParamBlock(GPT_FRAGMENT_PROGRAM, "Params"))
28 mParams->setParamBlockBuffer("Params", mParamBuffer);
29 }
30
31 void SkyboxMat::bind(const SPtr<GpuParamBlockBuffer>& perCamera, const SPtr<Texture>& texture, const Color& solidColor)
32 {
33 mParams->setParamBlockBuffer("PerCamera", perCamera);
34
35 mSkyTextureParam.set(texture);
36
37 gSkyboxParamDef.gClearColor.set(mParamBuffer, solidColor);
38 mParamBuffer->flushToGPU();
39
40 RendererMaterial::bind();
41 }
42
43 SkyboxMat* SkyboxMat::getVariation(bool color)
44 {
45 if (color)
46 return get(getVariation<true>());
47
48 return get(getVariation<false>());
49 }
50
51 RendererViewData::RendererViewData()
52 :encodeDepth(false), depthEncodeNear(0.0f), depthEncodeFar(0.0f)
53 {
54
55 }
56
57 RendererViewProperties::RendererViewProperties(const RENDERER_VIEW_DESC& src)
58 :RendererViewData(src), frameIdx(0), target(src.target)
59 {
60 viewProjTransform = src.projTransform * src.viewTransform;
61 }
62
63 RendererView::RendererView()
64 : mCamera(nullptr), mRenderSettingsHash(0), mViewIdx(-1)
65 {
66 mParamBuffer = gPerCameraParamDef.createBuffer();
67 }
68
69 RendererView::RendererView(const RENDERER_VIEW_DESC& desc)
70 : mProperties(desc), mCamera(desc.sceneCamera), mRenderSettingsHash(0), mViewIdx(-1)
71 {
72 mParamBuffer = gPerCameraParamDef.createBuffer();
73 mProperties.prevViewProjTransform = mProperties.viewProjTransform;
74
75 setStateReductionMode(desc.stateReduction);
76 }
77
78 void RendererView::setStateReductionMode(StateReduction reductionMode)
79 {
80 mDeferredOpaqueQueue = bs_shared_ptr_new<RenderQueue>(reductionMode);
81 mForwardOpaqueQueue = bs_shared_ptr_new<RenderQueue>(reductionMode);
82
83 StateReduction transparentStateReduction = reductionMode;
84 if (transparentStateReduction == StateReduction::Material)
85 transparentStateReduction = StateReduction::Distance; // Transparent object MUST be sorted by distance
86
87 mTransparentQueue = bs_shared_ptr_new<RenderQueue>(transparentStateReduction);
88 mDecalQueue = bs_shared_ptr_new<RenderQueue>(StateReduction::Material);
89 }
90
91 void RendererView::setRenderSettings(const SPtr<RenderSettings>& settings)
92 {
93 if (mRenderSettings == nullptr)
94 mRenderSettings = bs_shared_ptr_new<RenderSettings>();
95
96 if (settings != nullptr)
97 *mRenderSettings = *settings;
98
99 mRenderSettingsHash++;
100
101 // Update compositor hierarchy (Note: Needs to be called even when viewport size (or other information) changes,
102 // but we're currently calling it here as all such calls are followed by setRenderSettings.
103 mCompositor.build(*this, RCNodeFinalResolve::getNodeId());
104 }
105
106 void RendererView::setTransform(const Vector3& origin, const Vector3& direction, const Matrix4& view,
107 const Matrix4& proj, const ConvexVolume& worldFrustum)
108 {
109 mProperties.viewOrigin = origin;
110 mProperties.viewDirection = direction;
111 mProperties.viewTransform = view;
112 mProperties.projTransform = proj;
113 mProperties.cullFrustum = worldFrustum;
114 mProperties.viewProjTransform = proj * view;
115 }
116
117 void RendererView::setView(const RENDERER_VIEW_DESC& desc)
118 {
119 mCamera = desc.sceneCamera;
120 mProperties = desc;
121 mProperties.viewProjTransform = desc.projTransform * desc.viewTransform;
122 mProperties.prevViewProjTransform = Matrix4::IDENTITY;
123 mProperties.target = desc.target;
124
125 setStateReductionMode(desc.stateReduction);
126 }
127
128 void RendererView::beginFrame()
129 {
130 // Check if render target resized and update the view properties accordingly
131 // Note: Normally we rely on the renderer notify* methods to let us know of changes to camera/viewport, but since
132 // render target resize can often originate from the core thread, this avoids the back and forth between
133 // main <-> core thread, and the frame delay that comes with it
134 if(mCamera)
135 {
136 const SPtr<Viewport>& viewport = mCamera->getViewport();
137 if(viewport)
138 {
139 UINT32 newTargetWidth = 0;
140 UINT32 newTargetHeight = 0;
141 if (mProperties.target.target != nullptr)
142 {
143 newTargetWidth = mProperties.target.target->getProperties().width;
144 newTargetHeight = mProperties.target.target->getProperties().height;
145 }
146
147 if(newTargetWidth != mProperties.target.targetWidth ||
148 newTargetHeight != mProperties.target.targetHeight)
149 {
150 mProperties.target.viewRect = viewport->getPixelArea();
151 mProperties.target.targetWidth = newTargetWidth;
152 mProperties.target.targetHeight = newTargetHeight;
153
154 updatePerViewBuffer();
155 }
156 }
157 }
158
159 // Note: inverse view-projection can be cached, it doesn't change every frame
160 Matrix4 viewProj = mProperties.projTransform * mProperties.viewTransform;
161 Matrix4 invViewProj = viewProj.inverse();
162 Matrix4 NDCToPrevNDC = mProperties.prevViewProjTransform * invViewProj;
163
164 gPerCameraParamDef.gNDCToPrevNDC.set(mParamBuffer, NDCToPrevNDC);
165 }
166
167 void RendererView::endFrame()
168 {
169 // Save view-projection matrix to use for temporal filtering
170 mProperties.prevViewProjTransform = mProperties.viewProjTransform;
171
172 // Advance per-view frame index. This is used primarily by temporal rendering effects, and pausing the frame index
173 // allows you to freeze the current rendering as is, without temporal artifacts.
174 mProperties.frameIdx++;
175
176 mDeferredOpaqueQueue->clear();
177 mForwardOpaqueQueue->clear();
178 mTransparentQueue->clear();
179 mDecalQueue->clear();
180 }
181
182 void RendererView::determineVisible(const Vector<RendererRenderable*>& renderables, const Vector<CullInfo>& cullInfos,
183 Vector<bool>* visibility)
184 {
185 mVisibility.renderables.clear();
186 mVisibility.renderables.resize(renderables.size(), false);
187
188 if (mRenderSettings->overlayOnly)
189 return;
190
191 calculateVisibility(cullInfos, mVisibility.renderables);
192
193 if(visibility != nullptr)
194 {
195 for (UINT32 i = 0; i < (UINT32)renderables.size(); i++)
196 {
197 bool visible = (*visibility)[i];
198
199 (*visibility)[i] = visible || mVisibility.renderables[i];
200 }
201 }
202 }
203
204 void RendererView::determineVisible(const Vector<RendererParticles>& particleSystems, const Vector<CullInfo>& cullInfos,
205 Vector<bool>* visibility)
206 {
207 mVisibility.particleSystems.clear();
208 mVisibility.particleSystems.resize(particleSystems.size(), false);
209
210 if (mRenderSettings->overlayOnly)
211 return;
212
213 calculateVisibility(cullInfos, mVisibility.particleSystems);
214
215 if(visibility != nullptr)
216 {
217 for (UINT32 i = 0; i < (UINT32)particleSystems.size(); i++)
218 {
219 bool visible = (*visibility)[i];
220
221 (*visibility)[i] = visible || mVisibility.particleSystems[i];
222 }
223 }
224 }
225
226 void RendererView::determineVisible(const Vector<RendererDecal>& decals, const Vector<CullInfo>& cullInfos,
227 Vector<bool>* visibility)
228 {
229 mVisibility.decals.clear();
230 mVisibility.decals.resize(decals.size(), false);
231
232 if (mRenderSettings->overlayOnly)
233 return;
234
235 calculateVisibility(cullInfos, mVisibility.decals);
236
237 if(visibility != nullptr)
238 {
239 for (UINT32 i = 0; i < (UINT32)decals.size(); i++)
240 {
241 bool visible = (*visibility)[i];
242
243 (*visibility)[i] = visible || mVisibility.decals[i];
244 }
245 }
246 }
247
248 void RendererView::determineVisible(const Vector<RendererLight>& lights, const Vector<Sphere>& bounds,
249 LightType lightType, Vector<bool>* visibility)
250 {
251 // Special case for directional lights, they're always visible
252 if(lightType == LightType::Directional)
253 {
254 if (visibility)
255 visibility->assign(lights.size(), true);
256
257 return;
258 }
259
260 Vector<bool>* perViewVisibility;
261 if(lightType == LightType::Radial)
262 {
263 mVisibility.radialLights.clear();
264 mVisibility.radialLights.resize(lights.size(), false);
265
266 perViewVisibility = &mVisibility.radialLights;
267 }
268 else // Spot
269 {
270 mVisibility.spotLights.clear();
271 mVisibility.spotLights.resize(lights.size(), false);
272
273 perViewVisibility = &mVisibility.spotLights;
274 }
275
276 if (mRenderSettings->overlayOnly)
277 return;
278
279 calculateVisibility(bounds, *perViewVisibility);
280
281 if(visibility != nullptr)
282 {
283 for (UINT32 i = 0; i < (UINT32)lights.size(); i++)
284 {
285 bool visible = (*visibility)[i];
286
287 (*visibility)[i] = visible || (*perViewVisibility)[i];
288 }
289 }
290 }
291
292 void RendererView::calculateVisibility(const Vector<CullInfo>& cullInfos, Vector<bool>& visibility) const
293 {
294 UINT64 cameraLayers = mProperties.visibleLayers;
295 const ConvexVolume& worldFrustum = mProperties.cullFrustum;
296 const Vector3& worldCameraPosition = mProperties.viewOrigin;
297 float baseCullDistance = mRenderSettings->cullDistance;
298
299 for (UINT32 i = 0; i < (UINT32)cullInfos.size(); i++)
300 {
301 if ((cullInfos[i].layer & cameraLayers) == 0)
302 continue;
303
304 // Do distance culling
305 const Sphere& boundingSphere = cullInfos[i].bounds.getSphere();
306 const Vector3& worldRenderablePosition = boundingSphere.getCenter();
307
308 float distanceToCameraSq = worldCameraPosition.squaredDistance(worldRenderablePosition);
309 float correctedCullDistance = cullInfos[i].cullDistanceFactor * baseCullDistance;
310 float maxDistanceToCamera = correctedCullDistance + boundingSphere.getRadius();
311
312 if (distanceToCameraSq > maxDistanceToCamera * maxDistanceToCamera)
313 continue;
314
315 // Do frustum culling
316 // Note: This is bound to be a bottleneck at some point. When it is ensure that intersect methods use vector
317 // operations, as it is trivial to update them. Also consider spatial partitioning.
318 if (worldFrustum.intersects(boundingSphere))
319 {
320 // More precise with the box
321 const AABox& boundingBox = cullInfos[i].bounds.getBox();
322
323 if (worldFrustum.intersects(boundingBox))
324 visibility[i] = true;
325 }
326 }
327 }
328
329 void RendererView::calculateVisibility(const Vector<Sphere>& bounds, Vector<bool>& visibility) const
330 {
331 const ConvexVolume& worldFrustum = mProperties.cullFrustum;
332
333 for (UINT32 i = 0; i < (UINT32)bounds.size(); i++)
334 {
335 if (worldFrustum.intersects(bounds[i]))
336 visibility[i] = true;
337 }
338 }
339
340 void RendererView::calculateVisibility(const Vector<AABox>& bounds, Vector<bool>& visibility) const
341 {
342 const ConvexVolume& worldFrustum = mProperties.cullFrustum;
343
344 for (UINT32 i = 0; i < (UINT32)bounds.size(); i++)
345 {
346 if (worldFrustum.intersects(bounds[i]))
347 visibility[i] = true;
348 }
349 }
350
351 void RendererView::queueRenderElements(const SceneInfo& sceneInfo)
352 {
353 if (mRenderSettings->overlayOnly)
354 return;
355
356 // Queue renderables
357 for(UINT32 i = 0; i < (UINT32)sceneInfo.renderables.size(); i++)
358 {
359 if (!mVisibility.renderables[i])
360 continue;
361
362 const AABox& boundingBox = sceneInfo.renderableCullInfos[i].bounds.getBox();
363 const float distanceToCamera = (mProperties.viewOrigin - boundingBox.getCenter()).length();
364
365 for (auto& renderElem : sceneInfo.renderables[i]->elements)
366 {
367 // Note: I could keep renderables in multiple separate arrays, so I don't need to do the check here
368 ShaderFlags shaderFlags = renderElem.material->getShader()->getFlags();
369
370 if (shaderFlags.isSet(ShaderFlag::Transparent))
371 mTransparentQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
372 else if (shaderFlags.isSet(ShaderFlag::Forward))
373 mForwardOpaqueQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
374 else
375 mDeferredOpaqueQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
376 }
377 }
378
379 // Queue particle systems
380 for(UINT32 i = 0; i < (UINT32)sceneInfo.particleSystems.size(); i++)
381 {
382 if (!mVisibility.particleSystems[i])
383 continue;
384
385 const ParticlesRenderElement& renderElem = sceneInfo.particleSystems[i].renderElement;
386 if (!renderElem.isValid())
387 continue;
388
389 const AABox& boundingBox = sceneInfo.particleSystemCullInfos[i].bounds.getBox();
390 const float distanceToCamera = (mProperties.viewOrigin - boundingBox.getCenter()).length();
391
392 ShaderFlags shaderFlags = renderElem.material->getShader()->getFlags();
393
394 if (shaderFlags.isSet(ShaderFlag::Transparent))
395 mTransparentQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
396 else if (shaderFlags.isSet(ShaderFlag::Forward))
397 mForwardOpaqueQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
398 else
399 mDeferredOpaqueQueue->add(&renderElem, distanceToCamera, renderElem.techniqueIdx);
400 }
401
402 // Queue decals
403 const bool isMSAA = mProperties.target.numSamples > 1;
404 for(UINT32 i = 0; i < (UINT32)sceneInfo.decals.size(); i++)
405 {
406 if (!mVisibility.decals[i])
407 continue;
408
409 const DecalRenderElement& renderElem = sceneInfo.decals[i].renderElement;
410
411 // Note: I could keep renderables in multiple separate arrays, so I don't need to do the check here
412 ShaderFlags shaderFlags = renderElem.material->getShader()->getFlags();
413
414 // Decals are only supported using deferred rendering
415 if (shaderFlags.isSetAny(ShaderFlag::Transparent | ShaderFlag::Forward))
416 continue;
417
418 const AABox& boundingBox = sceneInfo.decalCullInfos[i].bounds.getBox();
419 const float distanceToCamera = (mProperties.viewOrigin - boundingBox.getCenter()).length();
420
421 // Check if viewer is inside the decal volume
422
423 // Extend the bounds slighty to cover the case when the viewer is outside, but the near plane is intersecting
424 // the decal bounds. We need to be conservative since the material for rendering outside will not properly
425 // render the inside of the decal volume.
426 const bool isInside = boundingBox.contains(mProperties.viewOrigin, mProperties.nearPlane * 3.0f);
427 const UINT32* techniqueIndices = renderElem.techniqueIndices[(INT32)isInside];
428
429 // No MSAA evaluation, or same value for all samples (no divergence between samples)
430 mDecalQueue->add(&renderElem, distanceToCamera,
431 techniqueIndices[(INT32)(isMSAA ? MSAAMode::Single : MSAAMode::None)]);
432
433 // Evaluates all MSAA samples for pixels that are marked as divergent
434 if(isMSAA)
435 mDecalQueue->add(&renderElem, distanceToCamera, techniqueIndices[(INT32)MSAAMode::Full]);
436 }
437
438 mForwardOpaqueQueue->sort();
439 mDeferredOpaqueQueue->sort();
440 mTransparentQueue->sort();
441 mDecalQueue->sort();
442 }
443
444 Vector2 RendererView::getDeviceZToViewZ(const Matrix4& projMatrix)
445 {
446 // Returns a set of values that will transform depth buffer values (in range [0, 1]) to a distance
447 // in view space. This involes applying the inverse projection transform to the depth value. When you multiply
448 // a vector with the projection matrix you get [clipX, clipY, Az + B, C * z], where we don't care about clipX/clipY.
449 // A is [2, 2], B is [2, 3] and C is [3, 2] elements of the projection matrix (only ones that matter for our depth
450 // value). The hardware will also automatically divide the z value with w to get the depth, therefore the final
451 // formula is:
452 // depth = (Az + B) / (C * z)
453
454 // To get the z coordinate back we simply do the opposite:
455 // z = B / (depth * C - A)
456
457 // However some APIs will also do a transformation on the depth values before storing them to the texture
458 // (e.g. OpenGL will transform from [-1, 1] to [0, 1]). And we need to reverse that as well. Therefore the final
459 // formula is:
460 // z = B / ((depth * (maxDepth - minDepth) + minDepth) * C - A)
461
462 // Are we reorganize it because it needs to fit the "(1.0f / (depth + y)) * x" format used in the shader:
463 // z = 1.0f / (depth + minDepth/(maxDepth - minDepth) - A/((maxDepth - minDepth) * C)) * B/((maxDepth - minDepth) * C)
464
465 const RenderAPICapabilities& caps = gCaps();
466
467 float depthRange = caps.maxDepth - caps.minDepth;
468 float minDepth = caps.minDepth;
469
470 float a = projMatrix[2][2];
471 float b = projMatrix[2][3];
472 float c = projMatrix[3][2];
473
474 Vector2 output;
475
476 if (c != 0.0f)
477 {
478 output.x = b / (depthRange * c);
479 output.y = minDepth / depthRange - a / (depthRange * c);
480 }
481 else // Ortographic, assuming viewing towards negative Z
482 {
483 output.x = b / -depthRange;
484 output.y = minDepth / depthRange - a / -depthRange;
485 }
486
487 return output;
488 }
489
490 Vector2 RendererView::getNDCZToViewZ(const Matrix4& projMatrix)
491 {
492 // Returns a set of values that will transform depth buffer values (e.g. [0, 1] in DX, [-1, 1] in GL) to a distance
493 // in view space. This involes applying the inverse projection transform to the depth value. When you multiply
494 // a vector with the projection matrix you get [clipX, clipY, Az + B, C * z], where we don't care about clipX/clipY.
495 // A is [2, 2], B is [2, 3] and C is [3, 2] elements of the projection matrix (only ones that matter for our depth
496 // value). The hardware will also automatically divide the z value with w to get the depth, therefore the final
497 // formula is:
498 // depth = (Az + B) / (C * z)
499
500 // To get the z coordinate back we simply do the opposite:
501 // z = B / (depth * C - A)
502
503 // Are we reorganize it because it needs to fit the "(1.0f / (depth + y)) * x" format used in the shader:
504 // z = 1.0f / (depth - A/C) * B/C
505
506 float a = projMatrix[2][2];
507 float b = projMatrix[2][3];
508 float c = projMatrix[3][2];
509
510 Vector2 output;
511
512 if (c != 0.0f)
513 {
514 output.x = b / c;
515 output.y = -a / c;
516 }
517 else // Ortographic, assuming viewing towards negative Z
518 {
519 output.x = -b;
520 output.y = a;
521 }
522
523 return output;
524 }
525
526 Vector2 RendererView::getNDCZToDeviceZ()
527 {
528 const RenderAPICapabilities& caps = gCaps();
529
530 Vector2 ndcZToDeviceZ;
531 ndcZToDeviceZ.x = 1.0f / (caps.maxDepth - caps.minDepth);
532 ndcZToDeviceZ.y = -caps.minDepth;
533
534 return ndcZToDeviceZ;
535 }
536
537 Matrix4 invertProjectionMatrix(const Matrix4& mat)
538 {
539 // Try to solve the most common case using high percision calculations, in order to reduce depth error
540 if(mat[0][1] == 0.0f && mat[0][3] == 0.0f &&
541 mat[1][0] == 0.0f && mat[1][3] == 0.0f &&
542 mat[2][0] == 0.0f && mat[2][1] == 0.0f &&
543 mat[3][0] == 0.0f && mat[3][1] == 0.0f &&
544 mat[3][2] == -1.0f && mat[3][3] == 0.0f)
545 {
546 double a = mat[0][0];
547 double b = mat[1][1];
548 double c = mat[2][2];
549 double d = mat[2][3];
550 double s = mat[0][2];
551 double t = mat[1][2];
552
553 return Matrix4(
554 (float)(1.0/a), 0.0f, 0.0f, (float)(-s/a),
555 0.0f, (float)(1.0/b), 0.0f, (float)(-t/b),
556 0.0f, 0.0f, 0.0f, -1.0f,
557 0.0f, 0.0f, (float)(1.0/d), (float)(c/d)
558 );
559 }
560 else
561 {
562 return mat.inverse();
563 }
564 }
565
566 void RendererView::updatePerViewBuffer()
567 {
568 Matrix4 viewProj = mProperties.projTransform * mProperties.viewTransform;
569 Matrix4 invProj = invertProjectionMatrix(mProperties.projTransform);
570 Matrix4 invView = mProperties.viewTransform.inverseAffine();
571 Matrix4 invViewProj = invView * invProj;
572
573 gPerCameraParamDef.gMatProj.set(mParamBuffer, mProperties.projTransform);
574 gPerCameraParamDef.gMatView.set(mParamBuffer, mProperties.viewTransform);
575 gPerCameraParamDef.gMatViewProj.set(mParamBuffer, viewProj);
576 gPerCameraParamDef.gMatInvViewProj.set(mParamBuffer, invViewProj);
577 gPerCameraParamDef.gMatInvProj.set(mParamBuffer, invProj);
578
579 // Construct a special inverse view-projection matrix that had projection entries that effect z and w eliminated.
580 // Used to transform a vector(clip_x, clip_y, view_z, view_w), where clip_x/clip_y are in clip space, and
581 // view_z/view_w in view space, into world space.
582
583 // Only projects z/w coordinates (cancels out with the inverse matrix below)
584 Matrix4 projZ = Matrix4::IDENTITY;
585 projZ[2][2] = mProperties.projTransform[2][2];
586 projZ[2][3] = mProperties.projTransform[2][3];
587 projZ[3][2] = mProperties.projTransform[3][2];
588 projZ[3][3] = 0.0f;
589
590 Matrix4 NDCToPrevNDC = mProperties.prevViewProjTransform * invViewProj;
591
592 gPerCameraParamDef.gMatScreenToWorld.set(mParamBuffer, invViewProj * projZ);
593 gPerCameraParamDef.gNDCToPrevNDC.set(mParamBuffer, NDCToPrevNDC);
594 gPerCameraParamDef.gViewDir.set(mParamBuffer, mProperties.viewDirection);
595 gPerCameraParamDef.gViewOrigin.set(mParamBuffer, mProperties.viewOrigin);
596 gPerCameraParamDef.gDeviceZToWorldZ.set(mParamBuffer, getDeviceZToViewZ(mProperties.projTransform));
597 gPerCameraParamDef.gNDCZToWorldZ.set(mParamBuffer, getNDCZToViewZ(mProperties.projTransform));
598 gPerCameraParamDef.gNDCZToDeviceZ.set(mParamBuffer, getNDCZToDeviceZ());
599
600 Vector2 nearFar(mProperties.nearPlane, mProperties.farPlane);
601 gPerCameraParamDef.gNearFar.set(mParamBuffer, nearFar);
602
603 const Rect2I& viewRect = mProperties.target.viewRect;
604
605 Vector4I viewportRect;
606 viewportRect[0] = viewRect.x;
607 viewportRect[1] = viewRect.y;
608 viewportRect[2] = viewRect.width;
609 viewportRect[3] = viewRect.height;
610
611 gPerCameraParamDef.gViewportRectangle.set(mParamBuffer, viewportRect);
612
613 Vector4 ndcToUV = getNDCToUV();
614 gPerCameraParamDef.gClipToUVScaleOffset.set(mParamBuffer, ndcToUV);
615
616 Vector4 uvToNDC(
617 1.0f / ndcToUV.x,
618 1.0f / ndcToUV.y,
619 -ndcToUV.z / ndcToUV.x,
620 -ndcToUV.w / ndcToUV.y);
621 gPerCameraParamDef.gUVToClipScaleOffset.set(mParamBuffer, uvToNDC);
622
623 if (!mRenderSettings->enableLighting)
624 gPerCameraParamDef.gAmbientFactor.set(mParamBuffer, 100.0f);
625 else
626 gPerCameraParamDef.gAmbientFactor.set(mParamBuffer, 0.0f);
627 }
628
629 Vector4 RendererView::getNDCToUV() const
630 {
631 const RenderAPICapabilities& caps = gCaps();
632 const Rect2I& viewRect = mProperties.target.viewRect;
633
634 float halfWidth = viewRect.width * 0.5f;
635 float halfHeight = viewRect.height * 0.5f;
636
637 float rtWidth = mProperties.target.targetWidth != 0 ? (float)mProperties.target.targetWidth : 20.0f;
638 float rtHeight = mProperties.target.targetHeight != 0 ? (float)mProperties.target.targetHeight : 20.0f;
639
640 Vector4 ndcToUV;
641 ndcToUV.x = halfWidth / rtWidth;
642 ndcToUV.y = -halfHeight / rtHeight;
643 ndcToUV.z = viewRect.x / rtWidth + (halfWidth + caps.horizontalTexelOffset) / rtWidth;
644 ndcToUV.w = viewRect.y / rtHeight + (halfHeight + caps.verticalTexelOffset) / rtHeight;
645
646 // Either of these flips the Y axis, but if they're both true they cancel out
647 if ((caps.conventions.uvYAxis == Conventions::Axis::Up) ^ (caps.conventions.ndcYAxis == Conventions::Axis::Down))
648 ndcToUV.y = -ndcToUV.y;
649
650 return ndcToUV;
651 }
652
653 void RendererView::updateLightGrid(const VisibleLightData& visibleLightData,
654 const VisibleReflProbeData& visibleReflProbeData)
655 {
656 mLightGrid.updateGrid(*this, visibleLightData, visibleReflProbeData, !mRenderSettings->enableLighting);
657 }
658
659 RendererViewGroup::RendererViewGroup(RendererView** views, UINT32 numViews, bool mainPass, UINT32 shadowMapSize)
660 : mIsMainPass(mainPass), mShadowRenderer(shadowMapSize)
661 {
662 setViews(views, numViews);
663 }
664
665 void RendererViewGroup::setViews(RendererView** views, UINT32 numViews)
666 {
667 mViews.clear();
668
669 for (UINT32 i = 0; i < numViews; i++)
670 {
671 mViews.push_back(views[i]);
672 views[i]->_setViewIdx(i);
673 }
674 }
675
676 void RendererViewGroup::determineVisibility(const SceneInfo& sceneInfo)
677 {
678 const auto numViews = (UINT32)mViews.size();
679
680 // Early exit if no views render scene geometry
681 bool allViewsOverlay = false;
682 for (UINT32 i = 0; i < numViews; i++)
683 {
684 if (!mViews[i]->getRenderSettings().overlayOnly)
685 {
686 allViewsOverlay = false;
687 break;
688 }
689 }
690
691 if (allViewsOverlay)
692 return;
693
694 // Calculate renderable visibility per view
695 mVisibility.renderables.resize(sceneInfo.renderables.size(), false);
696 mVisibility.renderables.assign(sceneInfo.renderables.size(), false);
697
698 mVisibility.particleSystems.resize(sceneInfo.particleSystems.size(), false);
699 mVisibility.particleSystems.assign(sceneInfo.particleSystems.size(), false);
700
701 mVisibility.decals.resize(sceneInfo.decals.size(), false);
702 mVisibility.decals.assign(sceneInfo.decals.size(), false);
703
704 for(UINT32 i = 0; i < numViews; i++)
705 {
706 mViews[i]->determineVisible(sceneInfo.renderables, sceneInfo.renderableCullInfos, &mVisibility.renderables);
707 mViews[i]->determineVisible(sceneInfo.particleSystems, sceneInfo.particleSystemCullInfos, &mVisibility.particleSystems);
708 mViews[i]->determineVisible(sceneInfo.decals, sceneInfo.decalCullInfos, &mVisibility.decals);
709 }
710
711 // Generate render queues per camera
712 for(UINT32 i = 0; i < numViews; i++)
713 mViews[i]->queueRenderElements(sceneInfo);
714
715 // Calculate light visibility for all views
716 const auto numRadialLights = (UINT32)sceneInfo.radialLights.size();
717 mVisibility.radialLights.resize(numRadialLights, false);
718 mVisibility.radialLights.assign(numRadialLights, false);
719
720 const auto numSpotLights = (UINT32)sceneInfo.spotLights.size();
721 mVisibility.spotLights.resize(numSpotLights, false);
722 mVisibility.spotLights.assign(numSpotLights, false);
723
724 for (UINT32 i = 0; i < numViews; i++)
725 {
726 if (mViews[i]->getRenderSettings().overlayOnly)
727 continue;
728
729 mViews[i]->determineVisible(sceneInfo.radialLights, sceneInfo.radialLightWorldBounds, LightType::Radial,
730 &mVisibility.radialLights);
731
732 mViews[i]->determineVisible(sceneInfo.spotLights, sceneInfo.spotLightWorldBounds, LightType::Spot,
733 &mVisibility.spotLights);
734 }
735
736 // Calculate refl. probe visibility for all views
737 const auto numProbes = (UINT32)sceneInfo.reflProbes.size();
738 mVisibility.reflProbes.resize(numProbes, false);
739 mVisibility.reflProbes.assign(numProbes, false);
740
741 // Note: Per-view visibility for refl. probes currently isn't calculated
742 for (UINT32 i = 0; i < numViews; i++)
743 {
744 const auto& viewProps = mViews[i]->getProperties();
745
746 // Don't recursively render reflection probes when generating reflection probe maps
747 if (viewProps.capturingReflections)
748 continue;
749
750 mViews[i]->calculateVisibility(sceneInfo.reflProbeWorldBounds, mVisibility.reflProbes);
751 }
752
753 // Organize light and refl. probe visibility information in a more GPU friendly manner
754
755 // Note: I'm determining light and refl. probe visibility for the entire group. It might be more performance
756 // efficient to do it per view. Additionally I'm using a single GPU buffer to hold their information, which is
757 // then updated when each view group is rendered. It might be better to keep one buffer reserved per-view.
758 mVisibleLightData.update(sceneInfo, *this);
759 mVisibleReflProbeData.update(sceneInfo, *this);
760
761 const bool supportsClusteredForward = gRenderBeast()->getFeatureSet() == RenderBeastFeatureSet::Desktop;
762 if(supportsClusteredForward)
763 {
764 for (UINT32 i = 0; i < numViews; i++)
765 {
766 if (mViews[i]->getRenderSettings().overlayOnly)
767 continue;
768
769 mViews[i]->updateLightGrid(mVisibleLightData, mVisibleReflProbeData);
770 }
771 }
772 }
773}}
774