Cogs.Core
CameraArraySystem.cpp
1#include <glm/glm.hpp>
2#include <glm/gtc/type_ptr.hpp>
3#include <glm/gtx/quaternion.hpp>
4
5#include "CameraArraySystem.h"
6#include "Foundation/Logging/Logger.h"
7#include "Utilities/Math.h"
8
9#include "Resources/Texture.h"
10#include "Rendering/ICapabilities.h"
11
12namespace
13{
14 using namespace Cogs::Core;
15
16 Cogs::Logging::Log logger = Cogs::Logging::getLogger("CameraArraySystem");
17
18 bool updateViewBuffer(Context* context,
19 std::vector<ViewBufferEntry>& viewBuffer,
20 const glm::mat4& worldFromBase,
21 const std::vector<glm::mat4>& projections,
22 const std::vector<glm::mat4>& transforms,
23 const size_t numViews,
24 const float overrideNearValue,
25 const float overrideFarValue,
26 const bool overrideProjectionNearAndFar)
27 {
28 // We need a matching set of transform/projections
29 if (projections.size() != numViews || transforms.size() != numViews) {
30 return false;
31 }
32
33 viewBuffer.resize(numViews);
34 for (size_t i = 0; i < numViews; i++) {
35 ViewBufferEntry& entry = viewBuffer[i];
36 entry.clipFromView = context->renderer->getProjectionMatrix(projections[i]);
37
38 // Optionally change the near and far planes embedded in the provided projections. This
39 // can be used to let on cameras use different depth ranges when using camera stacking.
40 //
41 // Code taken from cogs.js Main.cpp. I think it should in theory be possible
42 // to just post-multiply with a matrix that adjusts where near and far is.
43 //
44 if (overrideProjectionNearAndFar && overrideNearValue>0.0 && overrideNearValue < overrideFarValue) {
45
46 // Extract Left Bottom (lb) and Right Top (rt) from Inverse Projection Matrix
47 glm::mat4 IP = glm::inverse(projections[i]);
48 glm::vec4 lb = IP * glm::vec4(-1, -1, -1.0, 1);
49 glm::vec4 rt = IP * glm::vec4(1, 1, -1.0, 1);
50 lb = lb / lb.w;
51 rt = rt / rt.w;
52 // Construct new frustum with modified near and far
53 float slb = -overrideNearValue / lb.z;
54 float srt = -overrideNearValue / rt.z;
55 float left = lb.x * slb;
56 float bottom = lb.y * slb;
57 float right = rt.x * srt;
58 float top = rt.y * srt;
59
60 glm::mat4 P = glm::frustum(left, right, bottom, top, overrideNearValue, overrideFarValue);
61 entry.clipFromView = P;
62 entry.clipFromView = context->renderer->getProjectionMatrix(P);
63 }
64
65 entry.worldFromView = worldFromBase * transforms[i];
66 entry.viewFromClip = glm::inverse(entry.clipFromView);
67 entry.viewFromWorld = glm::inverse(entry.worldFromView);
68 entry.clipFromWorld = entry.clipFromView * entry.viewFromWorld;
69 entry.viewFromViewport = context->renderer->getViewFromViewportMatrix(entry.viewFromClip);
70 }
71
72 return true;
73 }
74
75 bool makeCameraEncloseViews(Context* context,
76 CameraComponent& refCamComp,
77 TransformComponent& refCamTrComp,
78 std::span<ViewBufferEntry> viewBuffer,
79 const uint32_t height,
80 const float debugFrustumSqueeze,
81 const bool moveCameraBack,
82 const bool updateNearAndFar,
83 const bool overrideViewportResolution)
84 {
85 const size_t numViews = viewBuffer.size();
86
87 // Create camera space from frustum. If there is one view, use the same camera
88 // space. If there are both left-and right views, create a view in the middle.
89 //
90 // NOTE: this assumes that the camera is either a root node or there are no
91 // transformations above it in the hierarchy.
92 assert(0 < numViews);
93
94 // The matrices have been modified so they are with relation to engine origin.
95 glm::vec3 refCamPos;
96 glm::quat refCamRot;
97 if (numViews == 1) {
98 refCamPos = glm::vec3(viewBuffer[0].worldFromView[3]);
99 refCamRot = glm::quat_cast(glm::mat3(viewBuffer[0].worldFromView));
100 }
101 else {
102 refCamPos = 0.5f * (glm::vec3(viewBuffer[0].worldFromView[3]) +
103 glm::vec3(viewBuffer[1].worldFromView[3]));
104 refCamRot = glm::shortMix(glm::quat_cast(glm::mat3(viewBuffer[0].worldFromView)),
105 glm::quat_cast(glm::mat3(viewBuffer[1].worldFromView)), 0.5f);
106 }
107
108 // Convert to world coordinates, and update camera transform
109 refCamTrComp.position = glm::vec3(0.f);
110 refCamTrComp.coordinates = context->transformSystem->worldFromEngineCoords(refCamPos);
111 refCamTrComp.rotation = refCamRot;
112 refCamTrComp.setChanged();
113 context->transformSystem->updateLocalToWorldTransform(refCamTrComp, true);
114
115 // Find intersection of frustum edges and Z axis and back camera back,
116 // tries to minimize field - of - view of combined frustum.
117 if (moveCameraBack) {
118 float back = 0.f;
119 const glm::mat4 camViewFromWorld = glm::inverse(context->transformSystem->getLocalToWorld(&refCamTrComp));
120 for (size_t i = 0; i < numViews; i++) {
121
122 const glm::mat4 camViewFromViewClip = camViewFromWorld * viewBuffer[i].worldFromView * viewBuffer[i].viewFromClip;
123
124 for (size_t c = 0; c < 4; c++) {
125 const float x = ((c >> 0) & 1) ? -1.f : 1.f;
126 const float y = ((c >> 1) & 1) ? -1.f : 1.f;
127
128 const glm::vec3 a = euclidean(camViewFromViewClip * glm::vec4(x, y, -1.f, 1.f));
129 const glm::vec3 b = euclidean(camViewFromViewClip * glm::vec4(x, y, 1.f, 1.f));
130
131 // Let a and b form two similar right-angled triangles with shared vertex q at z-axis.
132 // We want to find q:
133
134 // a.x/(a.z-q.z) = b.x/(b.z-q.z)
135 // (b.z-q.z) * a.x = (a.z-q.z) * b.x
136 // a.x b.z - a.x q.z = b.x a.z - b.x q.z
137 // b.x q.z - a.x q.z = b.x a.z - a.x b.z
138 // (b.x - a.x) q.z = b.x a.z - a.x b.z
139 // q.z = (b.x a.z - a.x b.z) / (b.x - a.x)
140 if (float qz = (b.x * a.z - a.x * b.z) / (b.x - a.x); std::isfinite(qz) && qz < back) {
141 back = qz;
142 }
143 // Same for y
144 if (float qz = (b.y * a.z - a.y * b.z) / (b.y - a.y); std::isfinite(qz) && qz < back) {
145 back = qz;
146 }
147 }
148 }
149
150 glm::vec3 newPosEngine = euclidean(context->transformSystem->getLocalToWorld(&refCamTrComp) * glm::vec4(0.f, 0.f, -back, 1.f));
151 glm::dvec3 newPosWorld = context->transformSystem->worldFromEngineCoords(newPosEngine);
152 refCamTrComp.position = newPosWorld - refCamTrComp.coordinates;
153 refCamTrComp.setChanged();
154 context->transformSystem->updateLocalToWorldTransform(refCamTrComp, true);
155 }
156
157 // Figure out field-of-view by transforming all the corners of the frustums of
158 // the view into the camera space we created and find the field-of-view that
159 // contains all points.
160 float xAngle = 0.f;
161 float yAngle = 0.f;
162 float nearDist = std::numeric_limits<float>::max();
163 float farDist = 0.f;
164 const glm::mat4 camViewFromWorld = glm::inverse(context->transformSystem->getLocalToWorld(&refCamTrComp));
165 for (size_t i = 0; i < numViews; i++) {
166
167 // Transform from view's clip-space to new camera space.
168 const glm::mat4 camViewFromViewClip = camViewFromWorld * viewBuffer[i].worldFromView * viewBuffer[i].viewFromClip;
169
170 for (size_t c = 0; c < 8; c++) {
171 // Project frustum corner into new view space
172 const glm::vec4 h = camViewFromViewClip * glm::vec4(((c >> 0) & 1) ? -1.f : 1.f,
173 ((c >> 1) & 1) ? -1.f : 1.f,
174 ((c >> 2) & 1) ? -1.f : 1.f,
175 1.f);
176 const glm::vec3 p = (1.f / h.w) * glm::vec3(h);
177
178 // Update bounds
179 const glm::vec3 pp = glm::vec3(std::abs(p.x),
180 std::abs(p.y),
181 std::max(1e-3f, -p.z));
182 nearDist = std::min(nearDist, pp.z);
183 farDist = std::max(farDist, pp.z);
184 xAngle = std::max(xAngle, std::atan2(pp.x, pp.z));
185 yAngle = std::max(yAngle, std::atan2(pp.y, pp.z));
186 }
187 }
188
189 // calc aspect ratio that is wide enough to contain all views.
190 const float aspect = glm::tan(0.5f * xAngle) / glm::tan(0.5f * yAngle);
191
192
193 // And update main camera.
194 refCamComp.fieldOfView = 2.0f * glm::clamp(1.f - debugFrustumSqueeze, 0.1f, 1.f) * yAngle;
195
196 // Optionally use near and far calculated here that matches the View's near and far
197 // pretty good instead of letting cogs calculate this the normal way.
198 if (updateNearAndFar) {
199 refCamComp.nearDistance = nearDist;
200 refCamComp.farDistance = farDist;
201 refCamComp.enableClippingPlaneAdjustment = false;
202 }
203 else {
204 refCamComp.enableClippingPlaneAdjustment = true;
205 }
206
207 refCamComp.viewportOrigin = glm::vec2(0.f);
208 if (overrideViewportResolution) {
209 // Use VR viewport size for pixel resolution calculations
210 refCamComp.viewportSize.y = static_cast<float>(height);
211 }
212 refCamComp.viewportSize.x = aspect * refCamComp.viewportSize.y;
213
214 refCamComp.setChanged();
215 context->cameraSystem->updateProjection(context, refCamComp);
216 return true;
217 }
218
219 void tryValidateTextureArraySetup(const CameraArrayComponent& camArrComp, CameraArrayData& camArrData, uint32_t& width, uint32_t& height)
220 {
221 assert(camArrData.textureMode == CameraArrayData::TextureMode::None);
222 if (camArrComp.colorTextures.size() == 1) {
223 Texture* colorTex = camArrComp.colorTextures[0].resolve();
224 if (colorTex && (colorTex->description.target == Cogs::ResourceDimensions::Texture2DArray || colorTex->description.target == Cogs::ResourceDimensions::Texture2DMSArray)) {
225
226 width = colorTex->description.width;
227 height = colorTex->description.height;
228
229 camArrData.numViews = colorTex->description.layers;
230 camArrData.textureMode = CameraArrayData::TextureMode::TextureArray;
231 camArrData.colorTextures = { camArrComp.colorTextures[0] };
232
233 if (camArrComp.enableDepth) {
234
235 if (camArrComp.depthTextures.empty()) {
236 camArrData.depthMode = CameraArrayData::DepthMode::Create;
237 }
238
239 if (camArrComp.depthTextures.size() == 1) {
240 Texture* depthTex = camArrComp.depthTextures[0].resolve();
241 if (colorTex->description.target == Cogs::ResourceDimensions::Texture2DArray || colorTex->description.target == Cogs::ResourceDimensions::Texture2DMSArray) {
242 if ((depthTex->description.width == width) &&
243 (depthTex->description.height == height) &&
244 (depthTex->description.layers == camArrData.numViews))
245 {
246 camArrData.depthMode = CameraArrayData::DepthMode::Provided;
247 camArrData.depthTextures = { camArrComp.depthTextures[0] };
248 }
249 }
250 }
251
252 }
253
254 }
255 }
256 }
257
258 void tryValidateArrayOfTexturesSetup(const CameraArrayComponent& camArrComp, CameraArrayData& camArrData, uint32_t& width, uint32_t& height)
259 {
260 if (camArrData.textureMode != CameraArrayData::TextureMode::None || camArrComp.colorTextures.empty()) {
261 return;
262 }
263
264 camArrData.numViews = static_cast<uint32_t>(camArrComp.colorTextures.size());
265
266 camArrData.depthMode = CameraArrayData::DepthMode::None;
267 if (camArrComp.enableDepth) {
268 if (camArrComp.depthTextures.empty()) {
269 camArrData.depthMode = CameraArrayData::DepthMode::Create;
270 }
271
272 if (camArrComp.depthTextures.size() == camArrComp.colorTextures.size()) {
273 camArrData.depthMode = CameraArrayData::DepthMode::Provided;
274 // We validate the each depth texture in the loop below
275 }
276 }
277
278 for (size_t i = 0; i < camArrData.numViews; i++) {
279
280 TextureHandle colorTexHandle = camArrComp.colorTextures[i];
281 const Texture* colorTex = colorTexHandle.resolve();
282 if (!(colorTex && (colorTex->description.target == Cogs::ResourceDimensions::Texture2D || colorTex->description.target == Cogs::ResourceDimensions::RenderBuffer))) {
283 return;
284 }
285 if (i == 0) {
286 width = colorTex->description.width;
287 height = colorTex->description.height;
288 }
289 else if ((colorTex->description.width != width) || (colorTex->description.height != height)) {
290 return;
291 }
292 camArrData.colorTextures.push_back(colorTexHandle);
293
294 if (camArrData.depthMode == CameraArrayData::DepthMode::Provided) {
295 TextureHandle depthTexHandle = camArrComp.depthTextures[i];
296 const Texture* depthTex = depthTexHandle.resolve();
297 if (depthTex &&
298 (depthTex->description.target == Cogs::ResourceDimensions::Texture2D || depthTex->description.target == Cogs::ResourceDimensions::RenderBuffer) &&
299 (depthTex->description.width == width) &&
300 (depthTex->description.height == height))
301 {
302 camArrData.depthTextures.push_back(depthTexHandle);
303 }
304 }
305 }
306
307 camArrData.textureMode = CameraArrayData::TextureMode::ArrayOfTextures;
308
309 if ((camArrData.depthMode == CameraArrayData::DepthMode::Provided) && (camArrData.colorTextures.size() != camArrData.depthTextures.size())) {
310 camArrData.depthMode = CameraArrayData::DepthMode::None;
311 }
312 }
313
314
315}
316
317
318
320{
321 for (CameraArrayComponent& camArrComp : pool) {
322
323 const TransformComponent* camArrTrComp = camArrComp.getComponent<TransformComponent>();
324 if (!camArrTrComp) continue;
325
326 const glm::mat4 worldFromBase = context->transformSystem->getLocalToWorld(camArrTrComp);
327
328 CameraArrayData& camArrData = getData(&camArrComp);
329 camArrData.textureMode = CameraArrayData::TextureMode::None;
330 camArrData.depthMode = CameraArrayData::DepthMode::None;
331 camArrData.colorTextures.clear();
332 camArrData.depthTextures.clear();
333
334 uint32_t width = 0;
335 uint32_t height = 0;
336
337 tryValidateTextureArraySetup(camArrComp, camArrData, width, height);
338 tryValidateArrayOfTexturesSetup(camArrComp, camArrData, width, height);
339
340
341
342
343 if (camArrData.textureMode != CameraArrayData::TextureMode::None) {
344 assert(0 < camArrData.numViews);
345
346 // Only support 2 multiviews for now as that is currently the only use-case,
347 // and generalizing this beyond that requires memory here and there.
348 // Update: Allow 1 view to facilitate VR simulators with only one view
349 if (camArrData.numViews <= 2) {
350
351 // Check number of matrices etc and oopulate the view buffer
352 if (updateViewBuffer(context,
353 camArrData.viewBuffer,
354 worldFromBase,
355 camArrComp.projections,
356 camArrComp.transforms,
357 camArrData.numViews,
358 camArrComp.overrideNearValue,
359 camArrComp.overrideFarValue,
361 {
362 if (const EntityPtr referenceCamera = camArrComp.referenceCamera.lock(); referenceCamera) {
363 if (CameraComponent* refCamComp = referenceCamera->getComponent<CameraComponent>(); refCamComp) {
364 if (TransformComponent* refCamTrComp = refCamComp->getComponent<TransformComponent>(); refCamTrComp) {
365 if (camArrComp.updateReferenceCamera == false || makeCameraEncloseViews(context,
366 *refCamComp, *refCamTrComp,
367 camArrData.viewBuffer,
368 height,
369 camArrComp.debugFrustumSqueeze,
370 camArrComp.moveCameraBack,
371 camArrComp.updateNearAndFar,
372 camArrComp.updateViewportResolution))
373 {
374 const CameraData& camData = context->cameraSystem->getData(refCamComp);
375
376 camArrData.camData = camData;
377 camArrData.pipeline = refCamComp->renderPipeline;
378
379 camArrData.camData.viewportOrigin = { 0, 0 };
380 camArrData.camData.viewportSize.x = static_cast<float>(width);
381 camArrData.camData.viewportSize.y = static_cast<float>(height);
382
383 camArrData.camData.cameraArray = camArrComp.getComponentHandle<CameraArrayComponent>();
384
385 continue; // sucess
386 }
387 }
388 }
389 }
390 }
391 }
392 }
393
394 // One of the tests failed
395 camArrData.textureMode = CameraArrayData::TextureMode::None;
396 }
397}
void setChanged()
Sets the component to the ComponentFlags::Changed state with carry.
Definition: Component.h:202
ComponentType * getComponent() const
Definition: Component.h:159
ComponentHandle getComponentHandle() const
Definition: Component.h:177
Multi-view: Render a set of related views into array texture layers.
bool enableDepth
Rendering should use a depth buffer, either backed by a provided texture or a buffer created by the r...
std::vector< glm::mat4 > projections
One projection matrix per layer.
std::vector< TextureHandle > colorTextures
The color render texture to output the rendered scene from the camera to.
bool updateReferenceCamera
Update reference camera using the view projection and transform matrices.
bool moveCameraBack
Find intersection of frustum edges and Z axis and back camera back, tries to minimize field-of-view o...
bool overrideProjectionNearAndFar
If true, override near and far values embedded in the provided projection matrix with overrideNearVal...
float overrideNearValue
Near-value to use when overrideNearAndFar is enabled.
float overrideFarValue
Far-value to use when overrideNearAndFar is enabled.
std::vector< TextureHandle > depthTextures
The depth render texture to output the rendered scene from the camera to. Optional.
bool updateNearAndFar
When updating the reference camera, also update near and far.
bool updateViewportResolution
When updating the reference camera, use texture dimension to set camera viewport resolution.
std::vector< glm::mat4 > transforms
One affine transform matrix per layer.
float debugFrustumSqueeze
Debug variable to squeeze the frustum to be smaller than required, as a help to debug culling.
WeakEntityPtr referenceCamera
Reference camera to pull data from.
float fieldOfView
Vertical field of view, given in radians.
bool enableClippingPlaneAdjustment
If automatic adjustment of the clipping planes should be performed to fit as much of the scene as pos...
glm::vec2 viewportSize
Size of the viewport covered by this instance, given in pixels.
std::string renderPipeline
Render pipeline to apply when rendering to texture. Defaults to the built-in forward rendering pipeli...
glm::vec2 viewportOrigin
Origin of the viewport covered by this instance, given in screen units from the lower left.
Context * context
Pointer to the Context instance the system lives in.
void update()
Updates the system state to that of the current frame.
ComponentPool< ComponentType > pool
Pool of components managed by the system.
A Context instance contains all the services, systems and runtime components needed to use Cogs.
Definition: Context.h:83
class IRenderer * renderer
Renderer.
Definition: Context.h:228
virtual glm::mat4 getViewFromViewportMatrix(const glm::mat4 inverseProjectionMatrix)=0
Get an adjusted inverse projection matrix mainly used in post processing.
virtual glm::mat4 getProjectionMatrix(const glm::mat4 projectionMatrix)=0
Get an adjusted projection matrix used to render.
Defines a 4x4 transformation matrix for the entity and a global offset for root entities.
glm::dvec3 coordinates
Global coordinates.
glm::quat rotation
Rotation given as a quaternion.
glm::vec3 position
Local position relative to the global coordinates, or the parent coordinate system if the parent fiel...
Log implementation class.
Definition: LogManager.h:139
Contains the Engine, Renderer, resource managers and other systems needed to run Cogs....
std::shared_ptr< ComponentModel::Entity > EntityPtr
Smart pointer for Entity access.
Definition: EntityPtr.h:12
constexpr Log getLogger(const char(&name)[LEN]) noexcept
Definition: LogManager.h:180
Contains data describing a Camera instance and its derived data structured such as matrix data and vi...
Definition: CameraSystem.h:67
ResourceType * resolve() const
Resolve the handle, returning a pointer to the actual resource.
Texture resources contain raster bitmap data to use for texturing.
Definition: Texture.h:91