The open source OpenXR runtime
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

mono: projection layer supporting mono

+671 -738
+1 -1
doc/frame-pacing.md
··· 40 40 is done with CPU work and ready to do GPU work. 41 41 * @ref xrt_comp_discard_frame - The frame is discarded. 42 42 * @ref xrt_comp_layer_begin - Called during transfers of layers. 43 - * @ref xrt_comp_layer_stereo_projection - This and other layer functions are 43 + * @ref xrt_comp_layer_projection - This and other layer functions are 44 44 called to list the layers the compositor should render. 45 45 * @ref xrt_comp_layer_commit - The compositor starts to render the frame, 46 46 trying to finish at the **present** time.
+2
src/xrt/auxiliary/util/u_device.c
··· 270 270 271 271 if (alloc_hmd) { 272 272 xdev->hmd = (struct xrt_hmd_parts *)(ptr + offset_hmd); 273 + // set default view count 274 + xdev->hmd->view_count = 2; 273 275 } 274 276 275 277 if (alloc_tracking) {
+11 -10
src/xrt/compositor/client/comp_d3d11_client.cpp
··· 554 554 } 555 555 556 556 static xrt_result_t 557 - client_d3d11_compositor_layer_stereo_projection(struct xrt_compositor *xc, 558 - struct xrt_device *xdev, 559 - struct xrt_swapchain *l_xsc, 560 - struct xrt_swapchain *r_xsc, 561 - const struct xrt_layer_data *data) 557 + client_d3d11_compositor_layer_projection(struct xrt_compositor *xc, 558 + struct xrt_device *xdev, 559 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 560 + const struct xrt_layer_data *data) 562 561 { 563 562 struct client_d3d11_compositor *c = as_client_d3d11_compositor(xc); 564 563 565 - assert(data->type == XRT_LAYER_STEREO_PROJECTION); 564 + assert(data->type == XRT_LAYER_PROJECTION); 566 565 567 - struct xrt_swapchain *l_xscn = as_client_d3d11_swapchain(l_xsc)->xsc.get(); 568 - struct xrt_swapchain *r_xscn = as_client_d3d11_swapchain(r_xsc)->xsc.get(); 566 + struct xrt_swapchain *xscn[XRT_MAX_VIEWS]; 567 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 568 + xscn[i] = as_client_d3d11_swapchain(xsc[i])->xsc.get(); 569 + } 569 570 570 571 // No flip required: D3D11 swapchain image convention matches Vulkan. 571 - return xrt_comp_layer_stereo_projection(&c->xcn->base, xdev, l_xscn, r_xscn, data); 572 + return xrt_comp_layer_projection(&c->xcn->base, xdev, xscn, data); 572 573 } 573 574 574 575 static xrt_result_t ··· 879 880 c->base.base.begin_frame = client_d3d11_compositor_begin_frame; 880 881 c->base.base.discard_frame = client_d3d11_compositor_discard_frame; 881 882 c->base.base.layer_begin = client_d3d11_compositor_layer_begin; 882 - c->base.base.layer_stereo_projection = client_d3d11_compositor_layer_stereo_projection; 883 + c->base.base.layer_projection = client_d3d11_compositor_layer_projection; 883 884 c->base.base.layer_stereo_projection_depth = client_d3d11_compositor_layer_stereo_projection_depth; 884 885 c->base.base.layer_quad = client_d3d11_compositor_layer_quad; 885 886 c->base.base.layer_cube = client_d3d11_compositor_layer_cube;
+11 -13
src/xrt/compositor/client/comp_d3d12_client.cpp
··· 782 782 } 783 783 784 784 static xrt_result_t 785 - client_d3d12_compositor_layer_stereo_projection(struct xrt_compositor *xc, 786 - struct xrt_device *xdev, 787 - struct xrt_swapchain *l_xsc, 788 - struct xrt_swapchain *r_xsc, 789 - const struct xrt_layer_data *data) 785 + client_d3d12_compositor_layer_projection(struct xrt_compositor *xc, 786 + struct xrt_device *xdev, 787 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 788 + const struct xrt_layer_data *data) 790 789 { 791 790 struct client_d3d12_compositor *c = as_client_d3d12_compositor(xc); 792 791 793 - assert(data->type == XRT_LAYER_STEREO_PROJECTION); 794 - 795 - struct xrt_swapchain *l_xscn = as_client_d3d12_swapchain(l_xsc)->xsc.get(); 796 - struct xrt_swapchain *r_xscn = as_client_d3d12_swapchain(r_xsc)->xsc.get(); 792 + assert(data->type == XRT_LAYER_PROJECTION); 797 793 794 + struct xrt_swapchain *xscn[XRT_MAX_VIEWS]; 795 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 796 + xscn[i] = as_client_d3d12_swapchain(xsc[i])->xsc.get(); 797 + } 798 798 struct xrt_layer_data d = *data; 799 - client_d3d12_swapchain_scale_rect(l_xsc, &d.stereo.l.sub.norm_rect); 800 - client_d3d12_swapchain_scale_rect(r_xsc, &d.stereo.r.sub.norm_rect); 801 799 802 800 // No flip required: D3D12 swapchain image convention matches Vulkan. 803 - return xrt_comp_layer_stereo_projection(&c->xcn->base, xdev, l_xscn, r_xscn, &d); 801 + return xrt_comp_layer_projection(&c->xcn->base, xdev, xscn, &d); 804 802 } 805 803 806 804 static xrt_result_t ··· 1137 1135 c->base.base.begin_frame = client_d3d12_compositor_begin_frame; 1138 1136 c->base.base.discard_frame = client_d3d12_compositor_discard_frame; 1139 1137 c->base.base.layer_begin = client_d3d12_compositor_layer_begin; 1140 - c->base.base.layer_stereo_projection = client_d3d12_compositor_layer_stereo_projection; 1138 + c->base.base.layer_projection = client_d3d12_compositor_layer_projection; 1141 1139 c->base.base.layer_stereo_projection_depth = client_d3d12_compositor_layer_stereo_projection_depth; 1142 1140 c->base.base.layer_quad = client_d3d12_compositor_layer_quad; 1143 1141 c->base.base.layer_cube = client_d3d12_compositor_layer_cube;
+11 -14
src/xrt/compositor/client/comp_gl_client.c
··· 229 229 } 230 230 231 231 static xrt_result_t 232 - client_gl_compositor_layer_stereo_projection(struct xrt_compositor *xc, 233 - struct xrt_device *xdev, 234 - struct xrt_swapchain *l_xsc, 235 - struct xrt_swapchain *r_xsc, 236 - const struct xrt_layer_data *data) 232 + client_gl_compositor_layer_projection(struct xrt_compositor *xc, 233 + struct xrt_device *xdev, 234 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 235 + const struct xrt_layer_data *data) 237 236 { 238 237 struct xrt_compositor *xcn; 239 - struct xrt_swapchain *l_xscn; 240 - struct xrt_swapchain *r_xscn; 241 - 242 - assert(data->type == XRT_LAYER_STEREO_PROJECTION); 238 + struct xrt_swapchain *xscn[XRT_MAX_VIEWS]; 243 239 244 240 xcn = to_native_compositor(xc); 245 - l_xscn = to_native_swapchain(l_xsc); 246 - r_xscn = to_native_swapchain(r_xsc); 247 - 241 + assert(data->type == XRT_LAYER_PROJECTION); 242 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 243 + xscn[i] = &client_gl_swapchain(xsc[i])->xscn->base; 244 + } 248 245 struct xrt_layer_data d = *data; 249 246 d.flip_y = !d.flip_y; 250 247 251 - return xrt_comp_layer_stereo_projection(xcn, xdev, l_xscn, r_xscn, &d); 248 + return xrt_comp_layer_projection(xcn, xdev, xscn, &d); 252 249 } 253 250 254 251 static xrt_result_t ··· 613 610 c->base.base.begin_frame = client_gl_compositor_begin_frame; 614 611 c->base.base.discard_frame = client_gl_compositor_discard_frame; 615 612 c->base.base.layer_begin = client_gl_compositor_layer_begin; 616 - c->base.base.layer_stereo_projection = client_gl_compositor_layer_stereo_projection; 613 + c->base.base.layer_projection = client_gl_compositor_layer_projection; 617 614 c->base.base.layer_stereo_projection_depth = client_gl_compositor_layer_stereo_projection_depth; 618 615 c->base.base.layer_quad = client_gl_compositor_layer_quad; 619 616 c->base.base.layer_cube = client_gl_compositor_layer_cube;
+11 -12
src/xrt/compositor/client/comp_vk_client.c
··· 474 474 } 475 475 476 476 static xrt_result_t 477 - client_vk_compositor_layer_stereo_projection(struct xrt_compositor *xc, 478 - struct xrt_device *xdev, 479 - struct xrt_swapchain *l_xsc, 480 - struct xrt_swapchain *r_xsc, 481 - const struct xrt_layer_data *data) 477 + client_vk_compositor_layer_projection(struct xrt_compositor *xc, 478 + struct xrt_device *xdev, 479 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 480 + const struct xrt_layer_data *data) 482 481 { 483 482 struct xrt_compositor *xcn; 484 - struct xrt_swapchain *l_xscn; 485 - struct xrt_swapchain *r_xscn; 483 + struct xrt_swapchain *xscn[XRT_MAX_VIEWS]; 486 484 487 - assert(data->type == XRT_LAYER_STEREO_PROJECTION); 485 + assert(data->type == XRT_LAYER_PROJECTION); 486 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 487 + xscn[i] = &client_vk_swapchain(xsc[i])->xscn->base; 488 + } 488 489 489 490 xcn = to_native_compositor(xc); 490 - l_xscn = to_native_swapchain(l_xsc); 491 - r_xscn = to_native_swapchain(r_xsc); 492 491 493 - return xrt_comp_layer_stereo_projection(xcn, xdev, l_xscn, r_xscn, data); 492 + return xrt_comp_layer_projection(xcn, xdev, xscn, data); 494 493 } 495 494 496 495 ··· 842 841 c->base.base.begin_frame = client_vk_compositor_begin_frame; 843 842 c->base.base.discard_frame = client_vk_compositor_discard_frame; 844 843 c->base.base.layer_begin = client_vk_compositor_layer_begin; 845 - c->base.base.layer_stereo_projection = client_vk_compositor_layer_stereo_projection; 844 + c->base.base.layer_projection = client_vk_compositor_layer_projection; 846 845 c->base.base.layer_stereo_projection_depth = client_vk_compositor_layer_stereo_projection_depth; 847 846 c->base.base.layer_quad = client_vk_compositor_layer_quad; 848 847 c->base.base.layer_cube = client_vk_compositor_layer_cube;
+14 -22
src/xrt/compositor/main/comp_compositor.c
··· 272 272 enum xrt_layer_type type = layer->data.type; 273 273 274 274 // Handled by the distortion shader. 275 - if (type != XRT_LAYER_STEREO_PROJECTION && // 275 + if (type != XRT_LAYER_PROJECTION && // 276 276 type != XRT_LAYER_STEREO_PROJECTION_DEPTH) { 277 277 return false; 278 278 } ··· 989 989 990 990 uint32_t w0 = (uint32_t)(xdev->hmd->views[0].display.w_pixels * scale); 991 991 uint32_t h0 = (uint32_t)(xdev->hmd->views[0].display.h_pixels * scale); 992 - uint32_t w1 = (uint32_t)(xdev->hmd->views[1].display.w_pixels * scale); 993 - uint32_t h1 = (uint32_t)(xdev->hmd->views[1].display.h_pixels * scale); 994 - 995 - uint32_t w0_2 = xdev->hmd->views[0].display.w_pixels * 2; 996 - uint32_t h0_2 = xdev->hmd->views[0].display.h_pixels * 2; 997 - uint32_t w1_2 = xdev->hmd->views[1].display.w_pixels * 2; 998 - uint32_t h1_2 = xdev->hmd->views[1].display.h_pixels * 2; 999 992 1000 993 c->view_extents.width = w0; 1001 994 c->view_extents.height = h0; ··· 1052 1045 /* 1053 1046 * Rest of info. 1054 1047 */ 1055 - // Hardcoded for now. 1056 - uint32_t view_count = 2; 1057 1048 1058 1049 struct xrt_system_compositor_info sys_info_storage = {0}; 1059 1050 struct xrt_system_compositor_info *sys_info = &sys_info_storage; ··· 1066 1057 sys_info->client_d3d_deviceLUID_valid = c->settings.client_gpu_deviceLUID_valid; 1067 1058 1068 1059 // clang-format off 1069 - sys_info->views[0].recommended.width_pixels = w0; 1070 - sys_info->views[0].recommended.height_pixels = h0; 1071 - sys_info->views[0].recommended.sample_count = 1; 1072 - sys_info->views[0].max.width_pixels = w0_2; 1073 - sys_info->views[0].max.height_pixels = h0_2; 1074 - sys_info->views[0].max.sample_count = 1; 1060 + uint32_t view_count = xdev->hmd->view_count; 1061 + for (uint32_t i = 0; i < view_count; ++i) { 1062 + uint32_t w = (uint32_t)(xdev->hmd->views[i].display.w_pixels * scale); 1063 + uint32_t h = (uint32_t)(xdev->hmd->views[i].display.h_pixels * scale); 1064 + uint32_t w_2 = xdev->hmd->views[i].display.w_pixels * 2; 1065 + uint32_t h_2 = xdev->hmd->views[i].display.h_pixels * 2; 1075 1066 1076 - sys_info->views[1].recommended.width_pixels = w1; 1077 - sys_info->views[1].recommended.height_pixels = h1; 1078 - sys_info->views[1].recommended.sample_count = 1; 1079 - sys_info->views[1].max.width_pixels = w1_2; 1080 - sys_info->views[1].max.height_pixels = h1_2; 1081 - sys_info->views[1].max.sample_count = 1; 1067 + sys_info->views[i].recommended.width_pixels = w; 1068 + sys_info->views[i].recommended.height_pixels = h; 1069 + sys_info->views[i].recommended.sample_count = 1; 1070 + sys_info->views[i].max.width_pixels = w_2; 1071 + sys_info->views[i].max.height_pixels = h_2; 1072 + sys_info->views[i].max.sample_count = 1; 1073 + } 1082 1074 // clang-format on 1083 1075 1084 1076 // If we can add e.g. video pass-through capabilities, we may need to change (augment) this list.
+63 -79
src/xrt/compositor/main/comp_renderer.c
··· 114 114 { 115 115 //! Targets for rendering to the scratch buffer. 116 116 struct render_gfx_target_resources targets[COMP_SCRATCH_NUM_IMAGES]; 117 - } views[2]; 117 + } views[XRT_MAX_VIEWS]; 118 118 } scratch; 119 119 120 120 //! @} ··· 217 217 218 218 static void 219 219 calc_viewport_data(struct comp_renderer *r, 220 - struct render_viewport_data *out_l_viewport_data, 221 - struct render_viewport_data *out_r_viewport_data) 220 + struct render_viewport_data out_viewport_data[XRT_MAX_VIEWS], 221 + size_t view_count) 222 222 { 223 223 struct comp_compositor *c = r->c; 224 224 ··· 235 235 float scale_x = (float)r->c->target->width / (float)w_i32; 236 236 float scale_y = (float)r->c->target->height / (float)h_i32; 237 237 238 - struct xrt_view *l_v = &r->c->xdev->hmd->views[0]; 239 - struct xrt_view *r_v = &r->c->xdev->hmd->views[1]; 240 - 241 - struct render_viewport_data l_viewport_data; 242 - struct render_viewport_data r_viewport_data; 243 - 244 - if (pre_rotate) { 245 - l_viewport_data = (struct render_viewport_data){ 246 - .x = (uint32_t)(l_v->viewport.y_pixels * scale_x), 247 - .y = (uint32_t)(l_v->viewport.x_pixels * scale_y), 248 - .w = (uint32_t)(l_v->viewport.h_pixels * scale_x), 249 - .h = (uint32_t)(l_v->viewport.w_pixels * scale_y), 250 - }; 251 - r_viewport_data = (struct render_viewport_data){ 252 - .x = (uint32_t)(r_v->viewport.y_pixels * scale_x), 253 - .y = (uint32_t)(r_v->viewport.x_pixels * scale_y), 254 - .w = (uint32_t)(r_v->viewport.h_pixels * scale_x), 255 - .h = (uint32_t)(r_v->viewport.w_pixels * scale_y), 256 - }; 257 - } else { 258 - l_viewport_data = (struct render_viewport_data){ 259 - .x = (uint32_t)(l_v->viewport.x_pixels * scale_x), 260 - .y = (uint32_t)(l_v->viewport.y_pixels * scale_y), 261 - .w = (uint32_t)(l_v->viewport.w_pixels * scale_x), 262 - .h = (uint32_t)(l_v->viewport.h_pixels * scale_y), 263 - }; 264 - r_viewport_data = (struct render_viewport_data){ 265 - .x = (uint32_t)(r_v->viewport.x_pixels * scale_x), 266 - .y = (uint32_t)(r_v->viewport.y_pixels * scale_y), 267 - .w = (uint32_t)(r_v->viewport.w_pixels * scale_x), 268 - .h = (uint32_t)(r_v->viewport.h_pixels * scale_y), 269 - }; 238 + for (uint32_t i = 0; i < view_count; ++i) { 239 + struct xrt_view *v = &r->c->xdev->hmd->views[i]; 240 + if (pre_rotate) { 241 + out_viewport_data[i] = (struct render_viewport_data){ 242 + .x = (uint32_t)(v->viewport.y_pixels * scale_x), 243 + .y = (uint32_t)(v->viewport.x_pixels * scale_y), 244 + .w = (uint32_t)(v->viewport.h_pixels * scale_x), 245 + .h = (uint32_t)(v->viewport.w_pixels * scale_y), 246 + }; 247 + } else { 248 + out_viewport_data[i] = (struct render_viewport_data){ 249 + .x = (uint32_t)(v->viewport.x_pixels * scale_x), 250 + .y = (uint32_t)(v->viewport.y_pixels * scale_y), 251 + .w = (uint32_t)(v->viewport.w_pixels * scale_x), 252 + .h = (uint32_t)(v->viewport.h_pixels * scale_y), 253 + }; 254 + } 270 255 } 271 - 272 - *out_l_viewport_data = l_viewport_data; 273 - *out_r_viewport_data = r_viewport_data; 274 256 } 275 257 276 258 static void 277 - calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_rots[2]) 259 + calc_vertex_rot_data(struct comp_renderer *r, struct xrt_matrix_2x2 out_vertex_rots[XRT_MAX_VIEWS], size_t view_count) 278 260 { 279 261 bool pre_rotate = false; 280 262 if (r->c->target->surface_transform & VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR || ··· 291 273 }, 292 274 }}; 293 275 294 - for (uint32_t i = 0; i < 2; i++) { 276 + for (uint32_t i = 0; i < view_count; i++) { 295 277 // Get the view. 296 278 struct xrt_view *v = &r->c->xdev->hmd->views[i]; 297 279 ··· 310 292 static void 311 293 calc_pose_data(struct comp_renderer *r, 312 294 enum comp_target_fov_source fov_source, 313 - struct xrt_fov out_fovs[2], 314 - struct xrt_pose out_world[2], 315 - struct xrt_pose out_eye[2]) 295 + struct xrt_fov out_fovs[XRT_MAX_VIEWS], 296 + struct xrt_pose out_world[XRT_MAX_VIEWS], 297 + struct xrt_pose out_eye[XRT_MAX_VIEWS], 298 + uint32_t view_count) 316 299 { 317 300 COMP_TRACE_MARKER(); 318 301 ··· 323 306 }; 324 307 325 308 struct xrt_space_relation head_relation = XRT_SPACE_RELATION_ZERO; 326 - struct xrt_fov xdev_fovs[2] = XRT_STRUCT_INIT; 327 - struct xrt_pose xdev_poses[2] = XRT_STRUCT_INIT; 309 + struct xrt_fov xdev_fovs[XRT_MAX_VIEWS] = XRT_STRUCT_INIT; 310 + struct xrt_pose xdev_poses[XRT_MAX_VIEWS] = XRT_STRUCT_INIT; 328 311 329 312 xrt_device_get_view_poses( // 330 313 r->c->xdev, // xdev 331 314 &default_eye_relation, // default_eye_relation 332 315 r->c->frame.rendering.predicted_display_time_ns, // at_timestamp_ns 333 - 2, // view_count 316 + view_count, // view_count 334 317 &head_relation, // out_head_relation 335 318 xdev_fovs, // out_fovs 336 319 xdev_poses); // out_poses 337 320 338 - struct xrt_fov dist_fov[2] = XRT_STRUCT_INIT; 339 - for (uint32_t i = 0; i < 2; i++) { 321 + struct xrt_fov dist_fov[XRT_MAX_VIEWS] = XRT_STRUCT_INIT; 322 + for (uint32_t i = 0; i < view_count; i++) { 340 323 dist_fov[i] = r->c->xdev->hmd->distortion.fov[i]; 341 324 } 342 325 ··· 347 330 case COMP_TARGET_FOV_SOURCE_DEVICE_VIEWS: use_xdev = true; break; 348 331 } 349 332 350 - for (uint32_t i = 0; i < 2; i++) { 333 + for (uint32_t i = 0; i < view_count; i++) { 351 334 const struct xrt_fov fov = use_xdev ? xdev_fovs[i] : dist_fov[i]; 352 335 const struct xrt_pose eye_pose = xdev_poses[i]; 353 336 ··· 588 571 VK_ATTACHMENT_LOAD_OP_CLEAR, // load_op 589 572 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // final_layout 590 573 591 - for (uint32_t i = 0; i < ARRAY_SIZE(r->scratch.views); i++) { 574 + for (uint32_t i = 0; i < c->nr.view_count; i++) { 592 575 bret = comp_scratch_single_images_ensure(&r->c->scratch.views[i], &r->c->base.vk, scratch_extent); 593 576 if (!bret) { 594 577 COMP_ERROR(c, "comp_scratch_single_images_ensure: false"); ··· 838 821 comp_mirror_fini(&r->mirror_to_debug_gui, vk); 839 822 840 823 // Do this after the layer renderer. 841 - for (uint32_t i = 0; i < ARRAY_SIZE(r->scratch.views); i++) { 824 + for (uint32_t i = 0; i < r->c->nr.view_count; i++) { 842 825 for (uint32_t k = 0; k < COMP_SCRATCH_NUM_IMAGES; k++) { 843 826 render_gfx_target_resources_close(&r->scratch.views[i].targets[k]); 844 827 } ··· 883 866 assert(!fast_path || c->base.slot.layer_count >= 1); 884 867 885 868 // Viewport information. 886 - struct render_viewport_data viewport_datas[2]; 887 - calc_viewport_data(r, &viewport_datas[0], &viewport_datas[1]); 869 + struct render_viewport_data viewport_datas[XRT_MAX_VIEWS]; 870 + calc_viewport_data(r, viewport_datas, rr->r->view_count); 888 871 889 872 // Vertex rotation information. 890 - struct xrt_matrix_2x2 vertex_rots[2]; 891 - calc_vertex_rot_data(r, vertex_rots); 873 + struct xrt_matrix_2x2 vertex_rots[XRT_MAX_VIEWS]; 874 + calc_vertex_rot_data(r, vertex_rots, rr->r->view_count); 892 875 893 876 // Device view information. 894 - struct xrt_fov fovs[2]; 895 - struct xrt_pose world_poses[2]; 896 - struct xrt_pose eye_poses[2]; 897 - calc_pose_data( // 898 - r, // r 899 - fov_source, // fov_source 900 - fovs, // fovs[2] 901 - world_poses, // world_poses[2] 902 - eye_poses); // eye_poses[2] 877 + struct xrt_fov fovs[XRT_MAX_VIEWS]; 878 + struct xrt_pose world_poses[XRT_MAX_VIEWS]; 879 + struct xrt_pose eye_poses[XRT_MAX_VIEWS]; 880 + calc_pose_data( // 881 + r, // r 882 + fov_source, // fov_source 883 + fovs, // fovs 884 + world_poses, // world_poses 885 + eye_poses, // eye_poses 886 + rr->r->view_count); // view_count 903 887 904 888 905 889 // The arguments for the dispatch function. ··· 909 893 rtr, // rtr 910 894 fast_path, // fast_path 911 895 do_timewarp); // do_timewarp 912 - 913 - for (uint32_t i = 0; i < 2; i++) { 896 + for (uint32_t i = 0; i < rr->r->view_count; i++) { 914 897 // Which image of the scratch images for this view are we using. 915 898 uint32_t scratch_index = crss->views[i].index; 916 899 ··· 1003 986 bool do_timewarp = !c->debug.atw_off; 1004 987 1005 988 // Device view information. 1006 - struct xrt_fov fovs[2]; 1007 - struct xrt_pose world_poses[2]; 1008 - struct xrt_pose eye_poses[2]; 1009 - calc_pose_data( // 1010 - r, // r 1011 - fov_source, // fov_source 1012 - fovs, // fovs[2] 1013 - world_poses, // world_poses[2] 1014 - eye_poses); // eye_poses[2] 989 + struct xrt_fov fovs[XRT_MAX_VIEWS]; 990 + struct xrt_pose world_poses[XRT_MAX_VIEWS]; 991 + struct xrt_pose eye_poses[XRT_MAX_VIEWS]; 992 + calc_pose_data( // 993 + r, // r 994 + fov_source, // fov_source 995 + fovs, // fovs 996 + world_poses, // world_poses 997 + eye_poses, // eye_poses 998 + crc->r->view_count); // view_count 1015 999 1016 1000 // Target Vulkan resources.. 1017 1001 VkImage target_image = r->c->target->images[r->acquired_buffer].handle; 1018 1002 VkImageView target_image_view = r->c->target->images[r->acquired_buffer].view; 1019 1003 1020 1004 // Target view information. 1021 - struct render_viewport_data views[2]; 1022 - calc_viewport_data(r, &views[0], &views[1]); 1005 + struct render_viewport_data views[XRT_MAX_VIEWS]; 1006 + calc_viewport_data(r, views, crc->r->view_count); 1023 1007 1024 1008 // The arguments for the dispatch function. 1025 1009 struct comp_render_dispatch_data data; ··· 1030 1014 fast_path, // fast_path 1031 1015 do_timewarp); // do_timewarp 1032 1016 1033 - for (uint32_t i = 0; i < 2; i++) { 1017 + for (uint32_t i = 0; i < crc->r->view_count; i++) { 1034 1018 // Which image of the scratch images for this view are we using. 1035 1019 uint32_t scratch_index = crss->views[i].index; 1036 1020 ··· 1139 1123 comp_target_update_timings(ct); 1140 1124 1141 1125 // Hardcoded for now. 1142 - uint32_t view_count = 2; 1126 + const uint32_t view_count = c->nr.view_count; 1143 1127 enum comp_target_fov_source fov_source = COMP_TARGET_FOV_SOURCE_DISTORTION; 1144 1128 1145 1129 // For sratch image debugging.
+1 -1
src/xrt/compositor/mock/mock_compositor.cpp
··· 188 188 // mc->base.base.begin_frame = mock_compositor_begin_frame; 189 189 // mc->base.base.discard_frame = mock_compositor_discard_frame; 190 190 // mc->base.base.layer_begin = mock_compositor_layer_begin; 191 - // mc->base.base.layer_stereo_projection = mock_compositor_layer_stereo_projection; 191 + // mc->base.base.layer_projection = mock_compositor_layer_projection; 192 192 // mc->base.base.layer_stereo_projection_depth = mock_compositor_layer_stereo_projection_depth; 193 193 // mc->base.base.layer_quad = mock_compositor_layer_quad; 194 194 // mc->base.base.layer_cube = mock_compositor_layer_cube;
+6 -6
src/xrt/compositor/mock/mock_compositor.h
··· 154 154 enum xrt_blend_mode env_blend_mode); 155 155 156 156 /*! 157 - * Optional function pointer for mock compositor, called during @ref xrt_comp_layer_stereo_projection 157 + * Optional function pointer for mock compositor, called during @ref xrt_comp_layer_projection 158 158 */ 159 - xrt_result_t (*layer_stereo_projection)(struct mock_compositor *mc, 160 - struct xrt_device *xdev, 161 - struct xrt_swapchain *l_xsc, 162 - struct xrt_swapchain *r_xsc, 163 - const struct xrt_layer_data *data); 159 + xrt_result_t (*layer_projection)(struct mock_compositor *mc, 160 + struct xrt_device *xdev, 161 + struct xrt_swapchain *l_xsc, 162 + struct xrt_swapchain *r_xsc, 163 + const struct xrt_layer_data *data); 164 164 165 165 /*! 166 166 * Optional function pointer for mock compositor, called during @ref
+8 -8
src/xrt/compositor/multi/comp_multi_compositor.c
··· 645 645 } 646 646 647 647 static xrt_result_t 648 - multi_compositor_layer_stereo_projection(struct xrt_compositor *xc, 649 - struct xrt_device *xdev, 650 - struct xrt_swapchain *l_xsc, 651 - struct xrt_swapchain *r_xsc, 652 - const struct xrt_layer_data *data) 648 + multi_compositor_layer_projection(struct xrt_compositor *xc, 649 + struct xrt_device *xdev, 650 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 651 + const struct xrt_layer_data *data) 653 652 { 654 653 struct multi_compositor *mc = multi_compositor(xc); 655 654 (void)mc; 656 655 657 656 size_t index = mc->progress.layer_count++; 658 657 mc->progress.layers[index].xdev = xdev; 659 - xrt_swapchain_reference(&mc->progress.layers[index].xscs[0], l_xsc); 660 - xrt_swapchain_reference(&mc->progress.layers[index].xscs[1], r_xsc); 658 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 659 + xrt_swapchain_reference(&mc->progress.layers[index].xscs[i], xsc[i]); 660 + } 661 661 mc->progress.layers[index].data = *data; 662 662 663 663 return XRT_SUCCESS; ··· 969 969 mc->base.base.begin_frame = multi_compositor_begin_frame; 970 970 mc->base.base.discard_frame = multi_compositor_discard_frame; 971 971 mc->base.base.layer_begin = multi_compositor_layer_begin; 972 - mc->base.base.layer_stereo_projection = multi_compositor_layer_stereo_projection; 972 + mc->base.base.layer_projection = multi_compositor_layer_projection; 973 973 mc->base.base.layer_stereo_projection_depth = multi_compositor_layer_stereo_projection_depth; 974 974 mc->base.base.layer_quad = multi_compositor_layer_quad; 975 975 mc->base.base.layer_cube = multi_compositor_layer_cube;
+1 -1
src/xrt/compositor/multi/comp_multi_private.h
··· 65 65 * 66 66 * How many are actually used depends on the value of @p data.type 67 67 */ 68 - struct xrt_swapchain *xscs[4]; 68 + struct xrt_swapchain *xscs[2 * XRT_MAX_VIEWS]; 69 69 70 70 /*! 71 71 * All basic (trivially-serializable) data associated with a layer,
+11 -10
src/xrt/compositor/multi/comp_multi_system.c
··· 52 52 do_projection_layer(struct xrt_compositor *xc, struct multi_compositor *mc, struct multi_layer_entry *layer, uint32_t i) 53 53 { 54 54 struct xrt_device *xdev = layer->xdev; 55 - struct xrt_swapchain *l_xcs = layer->xscs[0]; 56 - struct xrt_swapchain *r_xcs = layer->xscs[1]; 55 + 56 + // Cast away 57 + struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data; 57 58 58 - if (l_xcs == NULL || r_xcs == NULL) { 59 - U_LOG_E("Invalid swap chain for projection layer #%u!", i); 60 - return; 59 + // Do not need to copy the reference, but should verify the pointers for consistency 60 + for (uint32_t j = 0; j < data->proj.view_count; j++) { 61 + if (layer->xscs[j] == NULL) { 62 + U_LOG_E("Invalid swap chain for projection layer #%u!", i); 63 + return; 64 + } 61 65 } 62 66 63 67 if (xdev == NULL) { ··· 65 69 return; 66 70 } 67 71 68 - // Cast away 69 - struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data; 70 - 71 - xrt_comp_layer_stereo_projection(xc, xdev, l_xcs, r_xcs, data); 72 + xrt_comp_layer_projection(xc, xdev, layer->xscs, data); 72 73 } 73 74 74 75 static void ··· 282 283 struct multi_layer_entry *layer = &mc->delivered.layers[i]; 283 284 284 285 switch (layer->data.type) { 285 - case XRT_LAYER_STEREO_PROJECTION: do_projection_layer(xc, mc, layer, i); break; 286 + case XRT_LAYER_PROJECTION: do_projection_layer(xc, mc, layer, i); break; 286 287 case XRT_LAYER_STEREO_PROJECTION_DEPTH: do_projection_layer_depth(xc, mc, layer, i); break; 287 288 case XRT_LAYER_QUAD: do_quad_layer(xc, mc, layer, i); break; 288 289 case XRT_LAYER_CUBE: do_cube_layer(xc, mc, layer, i); break;
+9 -10
src/xrt/compositor/null/null_compositor.c
··· 270 270 (void)sys_info->client_vk_deviceUUID; 271 271 (void)sys_info->client_d3d_deviceLUID; 272 272 (void)sys_info->client_d3d_deviceLUID_valid; 273 - 273 + uint32_t view_count = xdev->hmd->view_count; 274 274 // clang-format off 275 - sys_info->views[0].recommended.width_pixels = RECOMMENDED_VIEW_WIDTH; 276 - sys_info->views[0].recommended.height_pixels = RECOMMENDED_VIEW_HEIGHT; 277 - sys_info->views[0].recommended.sample_count = 1; 278 - sys_info->views[0].max.width_pixels = MAX_VIEW_WIDTH; 279 - sys_info->views[0].max.height_pixels = MAX_VIEW_HEIGHT; 280 - sys_info->views[0].max.sample_count = 1; 275 + for (uint32_t i = 0; i < view_count; ++i) { 276 + sys_info->views[i].recommended.width_pixels = RECOMMENDED_VIEW_WIDTH; 277 + sys_info->views[i].recommended.height_pixels = RECOMMENDED_VIEW_HEIGHT; 278 + sys_info->views[i].recommended.sample_count = 1; 279 + sys_info->views[i].max.width_pixels = MAX_VIEW_WIDTH; 280 + sys_info->views[i].max.height_pixels = MAX_VIEW_HEIGHT; 281 + sys_info->views[i].max.sample_count = 1; 282 + } 281 283 // clang-format on 282 - 283 - // Assumes the two views (eyes) are similarly configured 284 - sys_info->views[1] = sys_info->views[0]; 285 284 286 285 // Copy the list directly. 287 286 assert(xdev->hmd->blend_mode_count <= XRT_MAX_DEVICE_BLEND_MODES);
+107 -122
src/xrt/compositor/render/render_compute.c
··· 51 51 * For dispatching compute to the view, calculate the number of groups. 52 52 */ 53 53 static void 54 - calc_dispatch_dims_2_views(const struct render_viewport_data views[2], uint32_t *out_w, uint32_t *out_h) 54 + calc_dispatch_dims_views(const struct render_viewport_data views[XRT_MAX_VIEWS], 55 + uint32_t view_count, 56 + uint32_t *out_w, 57 + uint32_t *out_h) 55 58 { 56 59 #define IMAX(a, b) ((a) > (b) ? (a) : (b)) 57 - uint32_t w = IMAX(views[0].w, views[1].w); 58 - uint32_t h = IMAX(views[0].h, views[1].h); 60 + uint32_t w = 0; 61 + uint32_t h = 0; 62 + for (uint32_t i = 0; i < view_count; ++i) { 63 + w = IMAX(w, views[i].w); 64 + h = IMAX(h, views[i].h); 65 + } 59 66 #undef IMAX 60 67 61 68 // Power of two divide and round up. ··· 76 83 XRT_MAYBE_UNUSED static void 77 84 update_compute_layer_descriptor_set(struct vk_bundle *vk, 78 85 uint32_t src_binding, 79 - VkSampler src_samplers[RENDER_MAX_IMAGES], 80 - VkImageView src_image_views[RENDER_MAX_IMAGES], 86 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 87 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 81 88 uint32_t image_count, 82 89 uint32_t target_binding, 83 90 VkImageView target_image_view, ··· 86 93 VkDeviceSize ubo_size, 87 94 VkDescriptorSet descriptor_set) 88 95 { 89 - assert(image_count <= RENDER_MAX_IMAGES); 90 - 91 - VkDescriptorImageInfo src_image_info[RENDER_MAX_IMAGES]; 96 + VkDescriptorImageInfo src_image_info[RENDER_MAX_IMAGES_SIZE]; 92 97 for (uint32_t i = 0; i < image_count; i++) { 93 98 src_image_info[i].sampler = src_samplers[i]; 94 99 src_image_info[i].imageView = src_image_views[i]; ··· 144 149 XRT_MAYBE_UNUSED static void 145 150 update_compute_shared_descriptor_set(struct vk_bundle *vk, 146 151 uint32_t src_binding, 147 - VkSampler src_samplers[2], 148 - VkImageView src_image_views[2], 152 + VkSampler src_samplers[XRT_MAX_VIEWS], 153 + VkImageView src_image_views[XRT_MAX_VIEWS], 149 154 uint32_t distortion_binding, 150 - VkSampler distortion_samplers[6], 151 - VkImageView distortion_image_views[6], 155 + VkSampler distortion_samplers[3 * XRT_MAX_VIEWS], 156 + VkImageView distortion_image_views[3 * XRT_MAX_VIEWS], 152 157 uint32_t target_binding, 153 158 VkImageView target_image_view, 154 159 uint32_t ubo_binding, 155 160 VkBuffer ubo_buffer, 156 161 VkDeviceSize ubo_size, 157 - VkDescriptorSet descriptor_set) 162 + VkDescriptorSet descriptor_set, 163 + uint32_t view_count) 158 164 { 159 - VkDescriptorImageInfo src_image_info[2] = { 160 - { 161 - .sampler = src_samplers[0], 162 - .imageView = src_image_views[0], 163 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 164 - }, 165 - { 166 - .sampler = src_samplers[1], 167 - .imageView = src_image_views[1], 168 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 169 - }, 170 - }; 165 + VkDescriptorImageInfo src_image_info[XRT_MAX_VIEWS]; 166 + for (uint32_t i = 0; i < view_count; ++i) { 167 + src_image_info[i].sampler = src_samplers[i]; 168 + src_image_info[i].imageView = src_image_views[i]; 169 + src_image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; 170 + } 171 171 172 - VkDescriptorImageInfo distortion_image_info[6] = { 173 - { 174 - .sampler = distortion_samplers[0], 175 - .imageView = distortion_image_views[0], 176 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 177 - }, 178 - { 179 - .sampler = distortion_samplers[1], 180 - .imageView = distortion_image_views[1], 181 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 182 - }, 183 - { 184 - .sampler = distortion_samplers[2], 185 - .imageView = distortion_image_views[2], 186 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 187 - }, 188 - { 189 - .sampler = distortion_samplers[3], 190 - .imageView = distortion_image_views[3], 191 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 192 - }, 193 - { 194 - .sampler = distortion_samplers[4], 195 - .imageView = distortion_image_views[4], 196 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 197 - }, 198 - { 199 - .sampler = distortion_samplers[5], 200 - .imageView = distortion_image_views[5], 201 - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 202 - }, 203 - }; 172 + VkDescriptorImageInfo distortion_image_info[3 * XRT_MAX_VIEWS]; 173 + for (uint32_t i = 0; i < 3 * view_count; ++i) { 174 + distortion_image_info[i].sampler = distortion_samplers[i]; 175 + distortion_image_info[i].imageView = distortion_image_views[i]; 176 + distortion_image_info[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; 177 + } 204 178 205 179 VkDescriptorImageInfo target_image_info = { 206 180 .imageView = target_image_view, ··· 218 192 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, 219 193 .dstSet = descriptor_set, 220 194 .dstBinding = src_binding, 221 - .descriptorCount = ARRAY_SIZE(src_image_info), 195 + .descriptorCount = view_count, 222 196 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 223 197 .pImageInfo = src_image_info, 224 198 }, ··· 226 200 .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, 227 201 .dstSet = descriptor_set, 228 202 .dstBinding = distortion_binding, 229 - .descriptorCount = ARRAY_SIZE(distortion_image_info), 203 + .descriptorCount = 3 * view_count, 230 204 .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 231 205 .pImageInfo = distortion_image_info, 232 206 }, ··· 263 237 uint32_t ubo_binding, 264 238 VkBuffer ubo_buffer, 265 239 VkDeviceSize ubo_size, 266 - VkDescriptorSet descriptor_set) 240 + VkDescriptorSet descriptor_set, 241 + uint32_t view_count) 267 242 { 268 243 VkDescriptorImageInfo target_image_info = { 269 244 .imageView = target_image_view, ··· 320 295 struct vk_bundle *vk = r->vk; 321 296 crc->r = r; 322 297 323 - for (uint32_t i = 0; i < ARRAY_SIZE(crc->layer_descriptor_sets); i++) { 298 + for (uint32_t i = 0; i < RENDER_MAX_LAYER_RUNS_COUNT; i++) { 324 299 ret = vk_create_descriptor_set( // 325 300 vk, // vk_bundle 326 301 r->compute.descriptor_pool, // descriptor_pool ··· 417 392 render_compute_layers(struct render_compute *crc, 418 393 VkDescriptorSet descriptor_set, 419 394 VkBuffer ubo, 420 - VkSampler src_samplers[RENDER_MAX_IMAGES], 421 - VkImageView src_image_views[RENDER_MAX_IMAGES], 395 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 396 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 422 397 uint32_t num_srcs, 423 398 VkImageView target_image_view, 424 399 const struct render_viewport_data *view, ··· 477 452 478 453 void 479 454 render_compute_projection_timewarp(struct render_compute *crc, 480 - VkSampler src_samplers[2], 481 - VkImageView src_image_views[2], 482 - const struct xrt_normalized_rect src_norm_rects[2], 483 - const struct xrt_pose src_poses[2], 484 - const struct xrt_fov src_fovs[2], 485 - const struct xrt_pose new_poses[2], 455 + VkSampler src_samplers[XRT_MAX_VIEWS], 456 + VkImageView src_image_views[XRT_MAX_VIEWS], 457 + const struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS], 458 + const struct xrt_pose src_poses[XRT_MAX_VIEWS], 459 + const struct xrt_fov src_fovs[XRT_MAX_VIEWS], 460 + const struct xrt_pose new_poses[XRT_MAX_VIEWS], 486 461 VkImage target_image, 487 462 VkImageView target_image_view, 488 - const struct render_viewport_data views[2]) 463 + const struct render_viewport_data views[XRT_MAX_VIEWS]) 489 464 { 490 465 assert(crc->r != NULL); 491 466 ··· 497 472 * UBO 498 473 */ 499 474 500 - struct xrt_matrix_4x4 time_warp_matrix[2]; 501 - render_calc_time_warp_matrix( // 502 - &src_poses[0], // 503 - &src_fovs[0], // 504 - &new_poses[0], // 505 - &time_warp_matrix[0]); // 506 - render_calc_time_warp_matrix( // 507 - &src_poses[1], // 508 - &src_fovs[1], // 509 - &new_poses[1], // 510 - &time_warp_matrix[1]); // 475 + struct xrt_matrix_4x4 time_warp_matrix[XRT_MAX_VIEWS]; 476 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 477 + render_calc_time_warp_matrix( // 478 + &src_poses[i], // 479 + &src_fovs[i], // 480 + &new_poses[i], // 481 + &time_warp_matrix[i]); // 482 + } 511 483 512 484 struct render_compute_distortion_ubo_data *data = 513 485 (struct render_compute_distortion_ubo_data *)r->compute.distortion.ubo.mapped; 514 - data->views[0] = views[0]; 515 - data->views[1] = views[1]; 516 - data->pre_transforms[0] = r->distortion.uv_to_tanangle[0]; 517 - data->pre_transforms[1] = r->distortion.uv_to_tanangle[1]; 518 - data->transforms[0] = time_warp_matrix[0]; 519 - data->transforms[1] = time_warp_matrix[1]; 520 - data->post_transforms[0] = src_norm_rects[0]; 521 - data->post_transforms[1] = src_norm_rects[1]; 522 - 486 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 487 + data->views[i] = views[i]; 488 + data->pre_transforms[i] = r->distortion.uv_to_tanangle[i]; 489 + data->transforms[i] = time_warp_matrix[i]; 490 + data->post_transforms[i] = src_norm_rects[i]; 491 + } 523 492 524 493 /* 525 494 * Source, target and distortion images. ··· 544 513 subresource_range); // 545 514 546 515 VkSampler sampler = r->samplers.clamp_to_edge; 547 - VkSampler distortion_samplers[6] = { 548 - sampler, sampler, sampler, sampler, sampler, sampler, 549 - }; 516 + VkSampler distortion_samplers[3 * XRT_MAX_VIEWS]; 517 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 518 + distortion_samplers[3 * i + 0] = sampler; 519 + distortion_samplers[3 * i + 1] = sampler; 520 + distortion_samplers[3 * i + 2] = sampler; 521 + } 550 522 551 523 update_compute_shared_descriptor_set( // 552 524 vk, // ··· 561 533 r->compute.ubo_binding, // 562 534 r->compute.distortion.ubo.buffer, // 563 535 VK_WHOLE_SIZE, // 564 - crc->shared_descriptor_set); // 536 + crc->shared_descriptor_set, // 537 + crc->r->view_count); // 565 538 566 539 vk->vkCmdBindPipeline( // 567 540 r->cmd, // commandBuffer ··· 580 553 581 554 582 555 uint32_t w = 0, h = 0; 583 - calc_dispatch_dims_2_views(views, &w, &h); 556 + calc_dispatch_dims_views(views, crc->r->view_count, &w, &h); 584 557 assert(w != 0 && h != 0); 585 558 586 559 vk->vkCmdDispatch( // ··· 616 589 617 590 void 618 591 render_compute_projection(struct render_compute *crc, 619 - VkSampler src_samplers[2], 620 - VkImageView src_image_views[2], 621 - const struct xrt_normalized_rect src_norm_rects[2], 592 + VkSampler src_samplers[XRT_MAX_VIEWS], 593 + VkImageView src_image_views[XRT_MAX_VIEWS], 594 + const struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS], 622 595 VkImage target_image, 623 596 VkImageView target_image_view, 624 - const struct render_viewport_data views[2]) 597 + const struct render_viewport_data views[XRT_MAX_VIEWS]) 625 598 { 626 599 assert(crc->r != NULL); 627 600 ··· 635 608 636 609 struct render_compute_distortion_ubo_data *data = 637 610 (struct render_compute_distortion_ubo_data *)r->compute.distortion.ubo.mapped; 638 - data->views[0] = views[0]; 639 - data->views[1] = views[1]; 640 - data->post_transforms[0] = src_norm_rects[0]; 641 - data->post_transforms[1] = src_norm_rects[1]; 611 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 612 + data->views[i] = views[i]; 613 + data->post_transforms[i] = src_norm_rects[i]; 614 + } 642 615 643 616 644 617 /* ··· 664 637 subresource_range); // 665 638 666 639 VkSampler sampler = r->samplers.clamp_to_edge; 667 - VkSampler distortion_samplers[6] = { 668 - sampler, sampler, sampler, sampler, sampler, sampler, 669 - }; 640 + VkSampler distortion_samplers[3 * XRT_MAX_VIEWS]; 641 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 642 + distortion_samplers[3 * i + 0] = sampler; 643 + distortion_samplers[3 * i + 1] = sampler; 644 + distortion_samplers[3 * i + 2] = sampler; 645 + } 670 646 671 647 update_compute_shared_descriptor_set( // 672 648 vk, // ··· 681 657 r->compute.ubo_binding, // 682 658 r->compute.distortion.ubo.buffer, // 683 659 VK_WHOLE_SIZE, // 684 - crc->shared_descriptor_set); // 660 + crc->shared_descriptor_set, // 661 + crc->r->view_count); // 685 662 686 663 vk->vkCmdBindPipeline( // 687 664 r->cmd, // commandBuffer ··· 700 677 701 678 702 679 uint32_t w = 0, h = 0; 703 - calc_dispatch_dims_2_views(views, &w, &h); 680 + calc_dispatch_dims_views(views, crc->r->view_count, &w, &h); 704 681 assert(w != 0 && h != 0); 705 682 706 683 vk->vkCmdDispatch( // ··· 735 712 } 736 713 737 714 void 738 - render_compute_clear(struct render_compute *crc, // 739 - VkImage target_image, // 740 - VkImageView target_image_view, // 741 - const struct render_viewport_data views[2]) // 715 + render_compute_clear(struct render_compute *crc, // 716 + VkImage target_image, // 717 + VkImageView target_image_view, // 718 + const struct render_viewport_data views[XRT_MAX_VIEWS]) // 742 719 { 743 720 assert(crc->r != NULL); 744 721 ··· 751 728 */ 752 729 753 730 // Calculate transforms. 754 - struct xrt_matrix_4x4 transforms[2]; 755 - for (uint32_t i = 0; i < 2; i++) { 731 + struct xrt_matrix_4x4 transforms[XRT_MAX_VIEWS]; 732 + for (uint32_t i = 0; i < crc->r->view_count; i++) { 756 733 math_matrix_4x4_identity(&transforms[i]); 757 734 } 758 735 759 736 struct render_compute_distortion_ubo_data *data = 760 737 (struct render_compute_distortion_ubo_data *)r->compute.clear.ubo.mapped; 761 - data->views[0] = views[0]; 762 - data->views[1] = views[1]; 763 - 738 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 739 + data->views[i] = views[i]; 740 + } 764 741 765 742 /* 766 743 * Source, target and distortion images. ··· 785 762 subresource_range); // 786 763 787 764 VkSampler sampler = r->samplers.mock; 788 - VkSampler src_samplers[2] = {sampler, sampler}; 789 - VkImageView src_image_views[2] = {r->mock.color.image_view, r->mock.color.image_view}; 790 - VkSampler distortion_samplers[6] = {sampler, sampler, sampler, sampler, sampler, sampler}; 765 + VkSampler src_samplers[XRT_MAX_VIEWS]; 766 + VkImageView src_image_views[XRT_MAX_VIEWS]; 767 + VkSampler distortion_samplers[3 * XRT_MAX_VIEWS]; 768 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 769 + src_samplers[i] = sampler; 770 + src_image_views[i] = r->mock.color.image_view; 771 + distortion_samplers[3 * i + 0] = sampler; 772 + distortion_samplers[3 * i + 1] = sampler; 773 + distortion_samplers[3 * i + 2] = sampler; 774 + } 791 775 792 776 update_compute_shared_descriptor_set( // 793 777 vk, // vk_bundle ··· 802 786 r->compute.ubo_binding, // ubo_binding 803 787 r->compute.clear.ubo.buffer, // ubo_buffer 804 788 VK_WHOLE_SIZE, // ubo_size 805 - crc->shared_descriptor_set); // descriptor_set 789 + crc->shared_descriptor_set, // descriptor_set 790 + crc->r->view_count); // view_count 806 791 807 792 vk->vkCmdBindPipeline( // 808 793 r->cmd, // commandBuffer ··· 821 806 822 807 823 808 uint32_t w = 0, h = 0; 824 - calc_dispatch_dims_2_views(views, &w, &h); 809 + calc_dispatch_dims_views(views, crc->r->view_count, &w, &h); 825 810 assert(w != 0 && h != 0); 826 811 827 812 vk->vkCmdDispatch( //
+19 -26
src/xrt/compositor/render/render_distortion.c
··· 285 285 struct xrt_device *xdev, 286 286 bool pre_rotate) 287 287 { 288 - struct render_buffer bufs[RENDER_DISTORTION_NUM_IMAGES]; 289 - VkDeviceMemory device_memories[RENDER_DISTORTION_NUM_IMAGES]; 290 - VkImage images[RENDER_DISTORTION_NUM_IMAGES]; 291 - VkImageView image_views[RENDER_DISTORTION_NUM_IMAGES]; 288 + struct render_buffer bufs[RENDER_DISTORTION_IMAGES_SIZE]; 289 + VkDeviceMemory device_memories[RENDER_DISTORTION_IMAGES_SIZE]; 290 + VkImage images[RENDER_DISTORTION_IMAGES_SIZE]; 291 + VkImageView image_views[RENDER_DISTORTION_IMAGES_SIZE]; 292 292 VkCommandBuffer upload_buffer = VK_NULL_HANDLE; 293 293 VkResult ret; 294 294 ··· 297 297 * Basics 298 298 */ 299 299 300 - static_assert(RENDER_DISTORTION_NUM_IMAGES == 6, "Wrong number of distortion images!"); 301 - 302 - render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[0], &r->distortion.uv_to_tanangle[0]); 303 - render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[1], &r->distortion.uv_to_tanangle[1]); 304 - 300 + for (uint32_t i = 0; i < r->view_count; ++i) { 301 + render_calc_uv_to_tangent_lengths_rect(&xdev->hmd->distortion.fov[i], &r->distortion.uv_to_tanangle[i]); 302 + } 305 303 306 304 /* 307 305 * Buffers with data to upload. 306 + * view_count=2,RRGGBB 307 + * view_count=3,RRRGGGBBB 308 308 */ 309 - 310 - ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[0], &bufs[2], &bufs[4], 0, pre_rotate); 311 - VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources); 312 - 313 - ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[1], &bufs[3], &bufs[5], 1, pre_rotate); 314 - VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources); 315 - 309 + for (uint32_t i = 0; i < r->view_count; ++i) { 310 + ret = create_and_fill_in_distortion_buffer_for_view(vk, xdev, &bufs[i], &bufs[r->view_count + i], 311 + &bufs[2 * r->view_count + i], i, pre_rotate); 312 + VK_CHK_WITH_GOTO(ret, "create_and_fill_in_distortion_buffer_for_view", err_resources); 313 + } 316 314 317 315 /* 318 316 * Command submission. ··· 326 324 VK_CHK_WITH_GOTO(ret, "vk_cmd_pool_create_and_begin_cmd_buffer_locked", err_unlock); 327 325 VK_NAME_COMMAND_BUFFER(vk, upload_buffer, "render_resources distortion command buffer"); 328 326 329 - for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) { 327 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 330 328 ret = create_and_queue_upload_locked( // 331 329 vk, // vk_bundle 332 330 pool, // pool ··· 349 347 350 348 r->distortion.pre_rotated = pre_rotate; 351 349 352 - for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) { 350 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 353 351 r->distortion.device_memories[i] = device_memories[i]; 354 352 r->distortion.images[i] = images[i]; 355 353 r->distortion.image_views[i] = image_views[i]; ··· 360 358 * Tidy 361 359 */ 362 360 363 - for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) { 361 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 364 362 render_buffer_close(vk, &bufs[i]); 365 363 } 366 364 ··· 374 372 vk_cmd_pool_unlock(pool); 375 373 376 374 err_resources: 377 - for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) { 375 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 378 376 D(ImageView, image_views[i]); 379 377 D(Image, images[i]); 380 378 DF(Memory, device_memories[i]); ··· 396 394 { 397 395 struct vk_bundle *vk = r->vk; 398 396 399 - static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.image_views), "Array size is wrong!"); 400 - static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.images), "Array size is wrong!"); 401 - static_assert(RENDER_DISTORTION_NUM_IMAGES == ARRAY_SIZE(r->distortion.device_memories), 402 - "Array size is wrong!"); 403 - 404 - for (uint32_t i = 0; i < RENDER_DISTORTION_NUM_IMAGES; i++) { 397 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 405 398 D(ImageView, r->distortion.image_views[i]); 406 399 D(Image, r->distortion.images[i]); 407 400 DF(Memory, r->distortion.device_memories[i]);
+55 -49
src/xrt/compositor/render/render_interface.h
··· 61 61 * Max number of images that can be given at a single time to the layer 62 62 * squasher in a single dispatch. 63 63 */ 64 - #define RENDER_MAX_IMAGES (RENDER_MAX_LAYERS * 2) 64 + #define RENDER_MAX_IMAGES_SIZE (RENDER_MAX_LAYERS * XRT_MAX_VIEWS) 65 + #define RENDER_MAX_IMAGES_COUNT (RENDER_MAX_LAYERS * r->view_count) 65 66 66 67 /*! 67 68 * Maximum number of times that the layer squasher shader can run per ··· 70 71 * two or more different compositions it's not the maximum number of views per 71 72 * composition (which is this number divided by number of composition). 72 73 */ 73 - #define RENDER_MAX_LAYER_RUNS (2) 74 + #define RENDER_MAX_LAYER_RUNS_SIZE (XRT_MAX_VIEWS) 75 + #define RENDER_MAX_LAYER_RUNS_COUNT (r->view_count) 74 76 75 77 //! How large in pixels the distortion image is. 76 78 #define RENDER_DISTORTION_IMAGE_DIMENSIONS (128) 77 79 78 - //! How many distortion images we have, one for each channel (3 rgb) and per view, total 6. 79 - #define RENDER_DISTORTION_NUM_IMAGES (6) 80 + //! How many distortion images we have, one for each channel (3 rgb) and per view. 81 + #define RENDER_DISTORTION_IMAGES_SIZE (3 * XRT_MAX_VIEWS) 82 + #define RENDER_DISTORTION_IMAGES_COUNT (3 * r->view_count) 80 83 81 84 //! Which binding does the layer projection and quad shader has it's UBO on. 82 85 #define RENDER_BINDING_LAYER_SHARED_UBO 0 ··· 347 350 */ 348 351 struct render_resources 349 352 { 353 + //! The count of views that we are rendering to. 354 + uint32_t view_count; 355 + 350 356 //! Vulkan resources. 351 357 struct vk_bundle *vk; 352 358 ··· 440 446 struct render_buffer ibo; 441 447 442 448 uint32_t vertex_count; 443 - uint32_t index_counts[2]; 449 + uint32_t index_counts[XRT_MAX_VIEWS]; 444 450 uint32_t stride; 445 - uint32_t index_offsets[2]; 451 + uint32_t index_offsets[XRT_MAX_VIEWS]; 446 452 uint32_t index_count_total; 447 453 448 454 //! Info ubos, only supports two views currently. 449 - struct render_buffer ubos[2]; 455 + struct render_buffer ubos[XRT_MAX_VIEWS]; 450 456 } mesh; 451 457 452 458 /*! ··· 498 504 uint32_t image_array_size; 499 505 500 506 //! Target info. 501 - struct render_buffer ubos[RENDER_MAX_LAYER_RUNS]; 507 + struct render_buffer ubos[RENDER_MAX_LAYER_RUNS_SIZE]; 502 508 } layer; 503 509 504 510 struct ··· 534 540 struct 535 541 { 536 542 //! Transform to go from UV to tangle angles. 537 - struct xrt_normalized_rect uv_to_tanangle[2]; 543 + struct xrt_normalized_rect uv_to_tanangle[XRT_MAX_VIEWS]; 538 544 539 545 //! Backing memory to distortion images. 540 - VkDeviceMemory device_memories[RENDER_DISTORTION_NUM_IMAGES]; 546 + VkDeviceMemory device_memories[RENDER_DISTORTION_IMAGES_SIZE]; 541 547 542 548 //! Distortion images. 543 - VkImage images[RENDER_DISTORTION_NUM_IMAGES]; 549 + VkImage images[RENDER_DISTORTION_IMAGES_SIZE]; 544 550 545 551 //! The views into the distortion images. 546 - VkImageView image_views[RENDER_DISTORTION_NUM_IMAGES]; 552 + VkImageView image_views[RENDER_DISTORTION_IMAGES_SIZE]; 547 553 548 554 //! Whether distortion images have been pre-rotated 90 degrees. 549 555 bool pre_rotated; ··· 642 648 { 643 649 VkExtent2D extent; 644 650 645 - struct render_scratch_color_image color[2]; 651 + struct render_scratch_color_image color[XRT_MAX_VIEWS]; 646 652 }; 647 653 648 654 /*! ··· 1096 1102 struct render_resources *r; 1097 1103 1098 1104 //! Layer descriptor set. 1099 - VkDescriptorSet layer_descriptor_sets[RENDER_MAX_LAYER_RUNS]; 1105 + VkDescriptorSet layer_descriptor_sets[RENDER_MAX_LAYER_RUNS_SIZE]; 1100 1106 1101 1107 /*! 1102 1108 * Shared descriptor set, used for the clear and distortion shaders. It ··· 1138 1144 { 1139 1145 uint32_t val; 1140 1146 uint32_t unpremultiplied; 1141 - uint32_t padding[2]; 1147 + uint32_t padding[XRT_MAX_VIEWS]; 1142 1148 } layer_type[RENDER_MAX_LAYERS]; 1143 1149 1144 1150 //! Which image/sampler(s) correspond to each layer. 1145 1151 struct 1146 1152 { 1147 - uint32_t images[2]; 1153 + uint32_t images[XRT_MAX_VIEWS]; 1148 1154 //! @todo Implement separated samplers and images (and change to samplers[2]) 1149 - uint32_t padding[2]; 1155 + uint32_t padding[XRT_MAX_VIEWS]; 1150 1156 } images_samplers[RENDER_MAX_LAYERS]; 1151 1157 1152 1158 //! Shared between cylinder and equirect2. ··· 1206 1212 struct 1207 1213 { 1208 1214 struct xrt_vec2 val; 1209 - float padding[2]; 1215 + float padding[XRT_MAX_VIEWS]; 1210 1216 } quad_extent[RENDER_MAX_LAYERS]; 1211 1217 }; 1212 1218 ··· 1217 1223 */ 1218 1224 struct render_compute_distortion_ubo_data 1219 1225 { 1220 - struct render_viewport_data views[2]; 1221 - struct xrt_normalized_rect pre_transforms[2]; 1222 - struct xrt_normalized_rect post_transforms[2]; 1223 - struct xrt_matrix_4x4 transforms[2]; 1226 + struct render_viewport_data views[XRT_MAX_VIEWS]; 1227 + struct xrt_normalized_rect pre_transforms[XRT_MAX_VIEWS]; 1228 + struct xrt_normalized_rect post_transforms[XRT_MAX_VIEWS]; 1229 + struct xrt_matrix_4x4 transforms[XRT_MAX_VIEWS]; 1224 1230 }; 1225 1231 1226 1232 /*! ··· 1270 1276 * @public @memberof render_compute 1271 1277 */ 1272 1278 void 1273 - render_compute_layers(struct render_compute *crc, // 1274 - VkDescriptorSet descriptor_set, // 1275 - VkBuffer ubo, // 1276 - VkSampler src_samplers[RENDER_MAX_IMAGES], // 1277 - VkImageView src_image_views[RENDER_MAX_IMAGES], // 1278 - uint32_t num_srcs, // 1279 - VkImageView target_image_view, // 1280 - const struct render_viewport_data *view, // 1281 - bool timewarp); // 1279 + render_compute_layers(struct render_compute *crc, // 1280 + VkDescriptorSet descriptor_set, // 1281 + VkBuffer ubo, // 1282 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], // 1283 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], // 1284 + uint32_t num_srcs, // 1285 + VkImageView target_image_view, // 1286 + const struct render_viewport_data *view, // 1287 + bool timewarp); // 1282 1288 1283 1289 /*! 1284 1290 * @public @memberof render_compute 1285 1291 */ 1286 1292 void 1287 1293 render_compute_projection_timewarp(struct render_compute *crc, 1288 - VkSampler src_samplers[2], 1289 - VkImageView src_image_views[2], 1290 - const struct xrt_normalized_rect src_rects[2], 1291 - const struct xrt_pose src_poses[2], 1292 - const struct xrt_fov src_fovs[2], 1293 - const struct xrt_pose new_poses[2], 1294 + VkSampler src_samplers[XRT_MAX_VIEWS], 1295 + VkImageView src_image_views[XRT_MAX_VIEWS], 1296 + const struct xrt_normalized_rect src_rects[XRT_MAX_VIEWS], 1297 + const struct xrt_pose src_poses[XRT_MAX_VIEWS], 1298 + const struct xrt_fov src_fovs[XRT_MAX_VIEWS], 1299 + const struct xrt_pose new_poses[XRT_MAX_VIEWS], 1294 1300 VkImage target_image, 1295 1301 VkImageView target_image_view, 1296 - const struct render_viewport_data views[2]); 1302 + const struct render_viewport_data views[XRT_MAX_VIEWS]); 1297 1303 1298 1304 /*! 1299 1305 * @public @memberof render_compute 1300 1306 */ 1301 1307 void 1302 - render_compute_projection(struct render_compute *crc, // 1303 - VkSampler src_samplers[2], // 1304 - VkImageView src_image_views[2], // 1305 - const struct xrt_normalized_rect src_rects[2], // 1306 - VkImage target_image, // 1307 - VkImageView target_image_view, // 1308 - const struct render_viewport_data views[2]); // 1308 + render_compute_projection(struct render_compute *crc, // 1309 + VkSampler src_samplers[XRT_MAX_VIEWS], // 1310 + VkImageView src_image_views[XRT_MAX_VIEWS], // 1311 + const struct xrt_normalized_rect src_rects[XRT_MAX_VIEWS], // 1312 + VkImage target_image, // 1313 + VkImageView target_image_view, // 1314 + const struct render_viewport_data views[XRT_MAX_VIEWS]); // 1309 1315 1310 1316 /*! 1311 1317 * @public @memberof render_compute 1312 1318 */ 1313 1319 void 1314 - render_compute_clear(struct render_compute *crc, // 1315 - VkImage target_image, // 1316 - VkImageView target_image_view, // 1317 - const struct render_viewport_data views[2]); // 1320 + render_compute_clear(struct render_compute *crc, // 1321 + VkImage target_image, // 1322 + VkImageView target_image_view, // 1323 + const struct render_viewport_data views[XRT_MAX_VIEWS]); // 1318 1324 1319 1325 1320 1326
+42 -48
src/xrt/compositor/render/render_resources.c
··· 19 19 #include "render/render_interface.h" 20 20 21 21 22 + #include <stdio.h> 23 + 24 + 22 25 /* 23 26 * 24 27 * Gfx shared ··· 143 146 } 144 147 145 148 XRT_CHECK_RESULT static bool 146 - init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer *l_ubo, struct render_buffer *r_ubo) 149 + init_mesh_ubo_buffers(struct vk_bundle *vk, struct render_buffer ubo[XRT_MAX_VIEWS], uint32_t view_count) 147 150 { 148 151 VkResult ret; 149 152 ··· 154 157 155 158 // Distortion ubo size. 156 159 VkDeviceSize ubo_size = sizeof(struct render_gfx_mesh_ubo_data); 160 + for (uint32_t i = 0; i < view_count; ++i) { 161 + ret = render_buffer_init(vk, // 162 + &ubo[i], // 163 + ubo_usage_flags, // 164 + memory_property_flags, // 165 + ubo_size); // size 166 + VK_CHK_WITH_RET(ret, "render_buffer_init", false); 167 + char name[20]; 168 + snprintf(name, sizeof(name), "mesh ubo %d", i); 169 + VK_NAME_BUFFER(vk, ubo[i].buffer, name); 157 170 158 - ret = render_buffer_init(vk, // 159 - l_ubo, // 160 - ubo_usage_flags, // 161 - memory_property_flags, // 162 - ubo_size); // size 163 - VK_CHK_WITH_RET(ret, "render_buffer_init", false); 164 - VK_NAME_BUFFER(vk, l_ubo->buffer, "mesh l_ubo"); 165 - 166 - ret = render_buffer_map(vk, l_ubo); 167 - VK_CHK_WITH_RET(ret, "render_buffer_map", false); 168 - 169 - ret = render_buffer_init(vk, // 170 - r_ubo, // 171 - ubo_usage_flags, // 172 - memory_property_flags, // 173 - ubo_size); // size 174 - VK_CHK_WITH_RET(ret, "render_buffer_init", false); 175 - VK_NAME_BUFFER(vk, r_ubo->buffer, "mesh r_ubo"); 176 - 177 - ret = render_buffer_map(vk, r_ubo); 178 - VK_CHK_WITH_RET(ret, "render_buffer_map", false); 179 - 171 + ret = render_buffer_map(vk, &ubo[i]); 172 + VK_CHK_WITH_RET(ret, "render_buffer_map", false); 173 + } 180 174 return true; 181 175 } 182 176 ··· 532 526 * Constants 533 527 */ 534 528 529 + r->view_count = xdev->hmd->view_count; 535 530 r->mesh.src_binding = 0; 536 531 r->mesh.ubo_binding = 1; 537 532 struct xrt_hmd_parts *parts = xdev->hmd; 538 533 r->mesh.vertex_count = parts->distortion.mesh.vertex_count; 539 534 r->mesh.stride = parts->distortion.mesh.stride; 540 - r->mesh.index_counts[0] = parts->distortion.mesh.index_counts[0]; 541 - r->mesh.index_counts[1] = parts->distortion.mesh.index_counts[1]; 542 535 r->mesh.index_count_total = parts->distortion.mesh.index_count_total; 543 - r->mesh.index_offsets[0] = parts->distortion.mesh.index_offsets[0]; 544 - r->mesh.index_offsets[1] = parts->distortion.mesh.index_offsets[1]; 545 - 536 + for (uint32_t i = 0; i < r->view_count; ++i) { 537 + r->mesh.index_counts[i] = parts->distortion.mesh.index_counts[i]; 538 + r->mesh.index_offsets[i] = parts->distortion.mesh.index_offsets[i]; 539 + } 546 540 r->compute.src_binding = 0; 547 541 r->compute.distortion_binding = 1; 548 542 r->compute.target_binding = 2; 549 543 r->compute.ubo_binding = 3; 550 544 551 545 r->compute.layer.image_array_size = vk->features.max_per_stage_descriptor_sampled_images; 552 - if (r->compute.layer.image_array_size > RENDER_MAX_IMAGES) { 553 - r->compute.layer.image_array_size = RENDER_MAX_IMAGES; 546 + if (r->compute.layer.image_array_size > RENDER_MAX_IMAGES_COUNT) { 547 + r->compute.layer.image_array_size = RENDER_MAX_IMAGES_COUNT; 554 548 } 555 549 556 550 ··· 703 697 704 698 { 705 699 // Number of layer shader runs (views) times number of layers. 706 - const uint32_t layer_shader_count = RENDER_MAX_LAYER_RUNS * RENDER_MAX_LAYERS; 700 + const uint32_t layer_shader_count = RENDER_MAX_LAYER_RUNS_COUNT * RENDER_MAX_LAYERS; 707 701 708 702 // Two mesh distortion runs. 709 - const uint32_t mesh_shader_count = 2; 703 + const uint32_t mesh_shader_count = RENDER_MAX_LAYER_RUNS_COUNT; 710 704 711 705 struct vk_descriptor_pool_info mesh_pool_info = { 712 706 .uniform_per_descriptor_count = 1, ··· 737 731 buffer_count += layer_shader_count; 738 732 739 733 // One UBO per mesh shader. 740 - buffer_count += 2; 734 + buffer_count += RENDER_MAX_LAYER_RUNS_COUNT; 741 735 742 736 // We currently use the aligmnent as max UBO size. 743 737 static_assert(sizeof(struct render_gfx_mesh_ubo_data) <= RENDER_ALWAYS_SAFE_UBO_ALIGNMENT, "MAX"); ··· 819 813 return false; 820 814 } 821 815 822 - bret = init_mesh_ubo_buffers( // 823 - vk, // 824 - &r->mesh.ubos[0], // 825 - &r->mesh.ubos[1]); // 816 + bret = init_mesh_ubo_buffers( // 817 + vk, // 818 + r->mesh.ubos, r->view_count); // 826 819 if (!bret) { 827 820 return false; 828 821 } ··· 838 831 839 832 const uint32_t compute_descriptor_count = // 840 833 1 + // Shared/distortion run(s). 841 - RENDER_MAX_LAYER_RUNS; // Layer shader run(s). 834 + RENDER_MAX_LAYER_RUNS_COUNT; // Layer shader run(s). 842 835 843 836 struct vk_descriptor_pool_info compute_pool_info = { 844 837 .uniform_per_descriptor_count = 1, 845 838 // layer images 846 - .sampler_per_descriptor_count = r->compute.layer.image_array_size + 6, 839 + .sampler_per_descriptor_count = r->compute.layer.image_array_size + RENDER_DISTORTION_IMAGES_COUNT, 847 840 .storage_image_per_descriptor_count = 1, 848 841 .storage_buffer_per_descriptor_count = 0, 849 842 .descriptor_count = compute_descriptor_count, ··· 921 914 922 915 size_t layer_ubo_size = sizeof(struct render_compute_layer_ubo_data); 923 916 924 - for (uint32_t i = 0; i < ARRAY_SIZE(r->compute.layer.ubos); i++) { 917 + for (uint32_t i = 0; i < r->view_count; i++) { 925 918 ret = render_buffer_init( // 926 919 vk, // vk_bundle 927 920 &r->compute.layer.ubos[i], // buffer ··· 1048 1041 * Compute distortion textures, not created until later. 1049 1042 */ 1050 1043 1051 - for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.image_views); i++) { 1044 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 1052 1045 r->distortion.image_views[i] = VK_NULL_HANDLE; 1053 1046 } 1054 - for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.images); i++) { 1047 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 1055 1048 r->distortion.images[i] = VK_NULL_HANDLE; 1056 1049 } 1057 - for (uint32_t i = 0; i < ARRAY_SIZE(r->distortion.device_memories); i++) { 1050 + for (uint32_t i = 0; i < RENDER_DISTORTION_IMAGES_COUNT; i++) { 1058 1051 r->distortion.device_memories[i] = VK_NULL_HANDLE; 1059 1052 } 1060 1053 ··· 1120 1113 D(QueryPool, r->query_pool); 1121 1114 render_buffer_close(vk, &r->mesh.vbo); 1122 1115 render_buffer_close(vk, &r->mesh.ibo); 1123 - render_buffer_close(vk, &r->mesh.ubos[0]); 1124 - render_buffer_close(vk, &r->mesh.ubos[1]); 1116 + for (uint32_t i = 0; i < r->view_count; ++i) { 1117 + render_buffer_close(vk, &r->mesh.ubos[i]); 1118 + } 1125 1119 1126 1120 D(DescriptorPool, r->compute.descriptor_pool); 1127 1121 ··· 1139 1133 1140 1134 render_distortion_images_close(r); 1141 1135 render_buffer_close(vk, &r->compute.clear.ubo); 1142 - for (uint32_t i = 0; i < ARRAY_SIZE(r->compute.layer.ubos); i++) { 1136 + for (uint32_t i = 0; i < r->view_count; i++) { 1143 1137 render_buffer_close(vk, &r->compute.layer.ubos[i]); 1144 1138 } 1145 1139 render_buffer_close(vk, &r->compute.distortion.ubo); ··· 1263 1257 1264 1258 render_scratch_images_close(r, rsi); 1265 1259 1266 - for (uint32_t i = 0; i < ARRAY_SIZE(rsi->color); i++) { 1260 + for (uint32_t i = 0; i < r->view_count; i++) { 1267 1261 bret = create_scratch_image_and_view( // 1268 1262 r->vk, // 1269 1263 extent, // ··· 1288 1282 { 1289 1283 struct vk_bundle *vk = r->vk; 1290 1284 1291 - for (uint32_t i = 0; i < ARRAY_SIZE(rsi->color); i++) { 1285 + for (uint32_t i = 0; i < r->view_count; i++) { 1292 1286 teardown_scratch_color_image(vk, &rsi->color[i]); 1293 1287 } 1294 1288
+2 -2
src/xrt/compositor/shaders/layer.comp
··· 9 9 #include "srgb.inc.glsl" 10 10 11 11 //! @todo should this be a spcialization const? 12 - #define XRT_LAYER_STEREO_PROJECTION 0 12 + #define XRT_LAYER_PROJECTION 0 13 13 #define XRT_LAYER_STEREO_PROJECTION_DEPTH 1 14 14 #define XRT_LAYER_QUAD 2 15 15 #define XRT_LAYER_CUBE 3 ··· 464 464 case XRT_LAYER_EQUIRECT2: 465 465 rgba = do_equirect2(view_uv, layer); 466 466 break; 467 - case XRT_LAYER_STEREO_PROJECTION: 467 + case XRT_LAYER_PROJECTION: 468 468 case XRT_LAYER_STEREO_PROJECTION_DEPTH: 469 469 rgba = do_projection(view_uv, layer); 470 470 break;
+9 -8
src/xrt/compositor/util/comp_base.c
··· 115 115 } 116 116 117 117 static xrt_result_t 118 - base_layer_stereo_projection(struct xrt_compositor *xc, 119 - struct xrt_device *xdev, 120 - struct xrt_swapchain *l_xsc, 121 - struct xrt_swapchain *r_xsc, 122 - const struct xrt_layer_data *data) 118 + base_layer_projection(struct xrt_compositor *xc, 119 + struct xrt_device *xdev, 120 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 121 + const struct xrt_layer_data *data) 123 122 { 124 123 struct comp_base *cb = comp_base(xc); 125 124 126 125 uint32_t layer_id = cb->slot.layer_count; 127 126 128 127 struct comp_layer *layer = &cb->slot.layers[layer_id]; 129 - layer->sc_array[0] = comp_swapchain(l_xsc); 130 - layer->sc_array[1] = comp_swapchain(r_xsc); 128 + assert(ARRAY_SIZE(layer->sc_array) >= data->proj.view_count); 129 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 130 + layer->sc_array[i] = comp_swapchain(xsc[i]); 131 + } 131 132 layer->data = *data; 132 133 133 134 cb->slot.layer_count++; ··· 256 257 cb->base.base.create_semaphore = base_create_semaphore; 257 258 cb->base.base.import_fence = base_import_fence; 258 259 cb->base.base.layer_begin = base_layer_begin; 259 - cb->base.base.layer_stereo_projection = base_layer_stereo_projection; 260 + cb->base.base.layer_projection = base_layer_projection; 260 261 cb->base.base.layer_stereo_projection_depth = base_layer_stereo_projection_depth; 261 262 cb->base.base.layer_quad = base_layer_quad; 262 263 cb->base.base.layer_cube = base_layer_cube;
+3 -3
src/xrt/compositor/util/comp_base.h
··· 33 33 * 34 34 * Unused elements should be set to null. 35 35 */ 36 - struct comp_swapchain *sc_array[4]; 36 + struct comp_swapchain *sc_array[XRT_MAX_VIEWS * 2]; 37 37 38 38 /*! 39 39 * All basic (trivially-serializable) data associated with a layer. ··· 62 62 bool one_projection_layer_fast_path; 63 63 64 64 //! fov as reported by device for the current submit. 65 - struct xrt_fov fovs[2]; 65 + struct xrt_fov fovs[XRT_MAX_VIEWS]; 66 66 //! absolute pose as reported by device for the current submit. 67 - struct xrt_pose poses[2]; 67 + struct xrt_pose poses[XRT_MAX_VIEWS]; 68 68 }; 69 69 70 70 /*!
+1 -2
src/xrt/compositor/util/comp_render.h
··· 102 102 */ 103 103 struct comp_render_dispatch_data 104 104 { 105 - struct comp_render_view_data views[2]; 105 + struct comp_render_view_data views[XRT_MAX_VIEWS]; 106 106 107 107 //! The number of views currently in this dispatch data. 108 108 uint32_t view_count; ··· 324 324 * layers should it not be possible to do a fast_path. Will insert barriers to 325 325 * change the scratch images and target images to the needed layout. 326 326 * 327 - * Currently limited to exactly two views. 328 327 * 329 328 * Expected layouts: 330 329 * * Layer images: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+67 -84
src/xrt/compositor/util/comp_render_cs.c
··· 44 44 uint32_t cur_image, 45 45 VkSampler clamp_to_edge, 46 46 VkSampler clamp_to_border_black, 47 - VkSampler src_samplers[RENDER_MAX_IMAGES], 48 - VkImageView src_image_views[RENDER_MAX_IMAGES], 47 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 48 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 49 49 struct render_compute_layer_ubo_data *ubo_data, 50 50 uint32_t *out_cur_image) 51 51 { ··· 106 106 uint32_t cur_image, 107 107 VkSampler clamp_to_edge, 108 108 VkSampler clamp_to_border_black, 109 - VkSampler src_samplers[RENDER_MAX_IMAGES], 110 - VkImageView src_image_views[RENDER_MAX_IMAGES], 109 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 110 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 111 111 struct render_compute_layer_ubo_data *ubo_data, 112 112 bool do_timewarp, 113 113 uint32_t *out_cur_image) ··· 115 115 const struct xrt_layer_projection_view_data *vd = NULL; 116 116 const struct xrt_layer_depth_data *dvd = NULL; 117 117 118 - if (data->type == XRT_LAYER_STEREO_PROJECTION) { 118 + if (data->type == XRT_LAYER_PROJECTION) { 119 119 view_index_to_projection_data(view_index, data, &vd); 120 120 } else { 121 121 view_index_to_depth_data(view_index, data, &vd, &dvd); ··· 169 169 uint32_t cur_image, 170 170 VkSampler clamp_to_edge, 171 171 VkSampler clamp_to_border_black, 172 - VkSampler src_samplers[RENDER_MAX_IMAGES], 173 - VkImageView src_image_views[RENDER_MAX_IMAGES], 172 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 173 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 174 174 struct render_compute_layer_ubo_data *ubo_data, 175 175 uint32_t *out_cur_image) 176 176 { ··· 252 252 uint32_t cur_image, 253 253 VkSampler clamp_to_edge, 254 254 VkSampler clamp_to_border_black, 255 - VkSampler src_samplers[RENDER_MAX_IMAGES], 256 - VkImageView src_image_views[RENDER_MAX_IMAGES], 255 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE], 256 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE], 257 257 struct render_compute_layer_ubo_data *ubo_data, 258 258 uint32_t *out_cur_image) 259 259 { ··· 317 317 static void 318 318 do_cs_clear(struct render_compute *crc, const struct comp_render_dispatch_data *d) 319 319 { 320 - // Hardcoded to two views. 321 - if (d->view_count != 2) { 322 - U_LOG_E("Only supports exactly 2 views!"); 323 - assert(d->view_count == 2); 320 + if (d->view_count > XRT_MAX_VIEWS) { 321 + U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS); 322 + assert(d->view_count < XRT_MAX_VIEWS); 324 323 return; 325 324 } 326 325 327 - const struct render_viewport_data target_viewport_datas[2] = { 328 - d->views[0].target_viewport_data, 329 - d->views[1].target_viewport_data, 330 - }; 326 + struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS]; 327 + for (uint32_t i = 0; i < crc->r->view_count; ++i) { 328 + target_viewport_datas[i] = d->views[i].target_viewport_data; 329 + } 330 + 331 331 332 332 render_compute_clear( // 333 333 crc, // crc ··· 339 339 static void 340 340 do_cs_distortion_from_scratch(struct render_compute *crc, const struct comp_render_dispatch_data *d) 341 341 { 342 - // Hardcoded to two views. 343 - if (d->view_count != 2) { 344 - U_LOG_E("Only supports exactly 2 views!"); 345 - assert(d->view_count == 2); 342 + if (d->view_count > XRT_MAX_VIEWS) { 343 + U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS); 344 + assert(d->view_count < XRT_MAX_VIEWS); 346 345 return; 347 346 } 348 - 349 347 VkSampler clamp_to_border_black = crc->r->samplers.clamp_to_border_black; 350 348 351 - struct render_viewport_data target_viewport_datas[2]; 352 - VkImageView src_image_views[2]; 353 - VkSampler src_samplers[2]; 354 - struct xrt_normalized_rect src_norm_rects[2]; 349 + struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS]; 350 + VkImageView src_image_views[XRT_MAX_VIEWS]; 351 + VkSampler src_samplers[XRT_MAX_VIEWS]; 352 + struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS]; 355 353 356 354 for (uint32_t i = 0; i < d->view_count; i++) { 357 355 // Data to be filled in. ··· 382 380 } 383 381 384 382 static void 385 - do_cs_distortion_from_stereo_layer(struct render_compute *crc, 386 - const struct comp_layer *layer, 387 - const struct xrt_layer_projection_view_data *lvd, 388 - const struct xrt_layer_projection_view_data *rvd, 389 - const struct comp_render_dispatch_data *d) 383 + do_cs_distortion_for_layer(struct render_compute *crc, 384 + const struct comp_layer *layer, 385 + const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS], 386 + const struct comp_render_dispatch_data *d) 390 387 { 391 - // Hardcoded to two views. 392 - if (d->view_count != 2) { 393 - U_LOG_E("Only supports exactly 2 views!"); 394 - assert(d->view_count == 2); 388 + if (d->view_count > XRT_MAX_VIEWS) { 389 + U_LOG_E("Only supports max %d views!", XRT_MAX_VIEWS); 390 + assert(d->view_count < XRT_MAX_VIEWS); 395 391 return; 396 392 } 397 393 398 394 // Fetch from this data. 399 395 const struct xrt_layer_data *data = &layer->data; 400 - uint32_t left_array_index = lvd->sub.array_index; 401 - uint32_t right_array_index = rvd->sub.array_index; 402 - const struct comp_swapchain_image *left = &layer->sc_array[0]->images[lvd->sub.image_index]; 403 - const struct comp_swapchain_image *right = &layer->sc_array[1]->images[rvd->sub.image_index]; 404 396 405 397 VkSampler clamp_to_border_black = crc->r->samplers.clamp_to_border_black; 406 398 407 399 // Data to fill in. 408 - struct xrt_pose world_poses[2]; 409 - struct render_viewport_data target_viewport_datas[2]; 410 - struct xrt_normalized_rect src_norm_rects[2]; 411 - struct xrt_pose src_poses[2]; 412 - struct xrt_fov src_fovs[2]; 413 - VkSampler src_samplers[2]; 414 - VkImageView src_image_views[2]; 400 + struct xrt_pose world_poses[XRT_MAX_VIEWS]; 401 + struct render_viewport_data target_viewport_datas[XRT_MAX_VIEWS]; 402 + struct xrt_normalized_rect src_norm_rects[XRT_MAX_VIEWS]; 403 + struct xrt_pose src_poses[XRT_MAX_VIEWS]; 404 + struct xrt_fov src_fovs[XRT_MAX_VIEWS]; 405 + VkSampler src_samplers[XRT_MAX_VIEWS]; 406 + VkImageView src_image_views[XRT_MAX_VIEWS]; 415 407 416 408 for (uint32_t i = 0; i < d->view_count; i++) { 417 - 418 409 struct xrt_pose world_pose; 419 410 struct render_viewport_data viewport_data; 420 411 struct xrt_pose src_pose; 421 412 struct xrt_fov src_fov; 422 413 struct xrt_normalized_rect src_norm_rect; 423 414 VkImageView src_image_view; 415 + uint32_t array_index = vds[i]->sub.array_index; 416 + const struct comp_swapchain_image *image = &layer->sc_array[i]->images[vds[i]->sub.image_index]; 424 417 425 418 // Gather data. 426 419 world_pose = d->views[i].world_pose; 427 420 viewport_data = d->views[i].target_viewport_data; 428 421 429 - if (!is_view_index_right(i)) { 430 - // Left, aka not right. 431 - src_pose = lvd->pose; 432 - src_fov = lvd->fov; 433 - src_norm_rect = lvd->sub.norm_rect; 434 - src_image_view = get_image_view(left, data->flags, left_array_index); 435 - } else { 436 - // Right 437 - src_pose = rvd->pose; 438 - src_fov = rvd->fov; 439 - src_norm_rect = rvd->sub.norm_rect; 440 - src_image_view = get_image_view(right, data->flags, right_array_index); 441 - } 422 + src_pose = vds[i]->pose; 423 + src_fov = vds[i]->fov; 424 + src_norm_rect = vds[i]->sub.norm_rect; 425 + src_image_view = get_image_view(image, data->flags, array_index); 442 426 443 427 if (data->flip_y) { 444 428 src_norm_rect.h = -src_norm_rect.h; ··· 515 499 516 500 // Tightly pack color and optional depth images. 517 501 uint32_t cur_image = 0; 518 - VkSampler src_samplers[RENDER_MAX_IMAGES]; 519 - VkImageView src_image_views[RENDER_MAX_IMAGES]; 502 + VkSampler src_samplers[RENDER_MAX_IMAGES_SIZE]; 503 + VkImageView src_image_views[RENDER_MAX_IMAGES_SIZE]; 520 504 521 505 ubo_data->view = *target_view; 522 506 ubo_data->pre_transform = *pre_transform; ··· 540 524 switch (data->type) { 541 525 case XRT_LAYER_CYLINDER: required_image_samplers = 1; break; 542 526 case XRT_LAYER_EQUIRECT2: required_image_samplers = 1; break; 543 - case XRT_LAYER_STEREO_PROJECTION: required_image_samplers = 1; break; 527 + case XRT_LAYER_PROJECTION: required_image_samplers = 1; break; 544 528 case XRT_LAYER_STEREO_PROJECTION_DEPTH: required_image_samplers = 2; break; 545 529 case XRT_LAYER_QUAD: required_image_samplers = 1; break; 546 530 default: ··· 587 571 &cur_image); // out_cur_image 588 572 break; 589 573 case XRT_LAYER_STEREO_PROJECTION_DEPTH: 590 - case XRT_LAYER_STEREO_PROJECTION: { 574 + case XRT_LAYER_PROJECTION: { 591 575 do_cs_projection_layer( // 592 576 data, // data 593 577 layer, // layer ··· 722 706 // We want to read from the images afterwards. 723 707 VkImageLayout transition_to = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; 724 708 725 - if (fast_path && layers[0].data.type == XRT_LAYER_STEREO_PROJECTION) { 709 + if (fast_path && layers[0].data.type == XRT_LAYER_PROJECTION) { 726 710 int i = 0; 727 711 const struct comp_layer *layer = &layers[i]; 728 - const struct xrt_layer_stereo_projection_data *stereo = &layer->data.stereo; 729 - const struct xrt_layer_projection_view_data *lvd = &stereo->l; 730 - const struct xrt_layer_projection_view_data *rvd = &stereo->r; 731 - 732 - do_cs_distortion_from_stereo_layer( // 733 - crc, // crc 734 - layer, // layer 735 - lvd, // lvd 736 - rvd, // rvd 737 - d); // d 712 + const struct xrt_layer_projection_data *proj = &layer->data.proj; 713 + const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS]; 714 + for (uint32_t view = 0; view < crc->r->view_count; ++view) { 715 + vds[view] = &proj->v[view]; 716 + } 717 + do_cs_distortion_for_layer( // 718 + crc, // crc 719 + layer, // layer 720 + vds, // vds 721 + d); // d 738 722 } else if (fast_path && layers[0].data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) { 739 723 int i = 0; 740 724 const struct comp_layer *layer = &layers[i]; 741 725 const struct xrt_layer_stereo_projection_depth_data *stereo = &layer->data.stereo_depth; 742 - const struct xrt_layer_projection_view_data *lvd = &stereo->l; 743 - const struct xrt_layer_projection_view_data *rvd = &stereo->r; 744 - 745 - do_cs_distortion_from_stereo_layer( // 746 - crc, // crc 747 - layer, // layer 748 - lvd, // lvd 749 - rvd, // rvd 750 - d); // d 726 + const struct xrt_layer_projection_view_data *vds[2]; 727 + vds[0] = &stereo->l; 728 + vds[1] = &stereo->r; 729 + do_cs_distortion_for_layer( // 730 + crc, // crc 731 + layer, // layer 732 + vds, // vds 733 + d); // d 751 734 } else if (layer_count > 0) { 752 735 comp_render_cs_layers( // 753 736 crc, //
+25 -39
src/xrt/compositor/util/comp_render_gfx.c
··· 69 69 */ 70 70 struct gfx_layer_state 71 71 { 72 - struct gfx_layer_view_state views[2]; 72 + struct gfx_layer_view_state views[XRT_MAX_VIEWS]; 73 73 }; 74 74 75 75 /* ··· 77 77 */ 78 78 struct gfx_mesh_state 79 79 { 80 - VkDescriptorSet descriptor_sets[2]; 80 + VkDescriptorSet descriptor_sets[XRT_MAX_VIEWS]; 81 81 }; 82 82 83 83 /* ··· 98 98 */ 99 99 struct gfx_mesh_data 100 100 { 101 - struct gfx_mesh_view_data views[2]; 101 + struct gfx_mesh_view_data views[XRT_MAX_VIEWS]; 102 102 }; 103 103 104 104 ··· 366 366 struct vk_bundle *vk = rr->r->vk; 367 367 VkResult ret; 368 368 369 - if (layer_data->type == XRT_LAYER_STEREO_PROJECTION) { 369 + if (layer_data->type == XRT_LAYER_PROJECTION) { 370 370 view_index_to_projection_data(view_index, layer_data, &vd); 371 371 } else { 372 372 view_index_to_depth_data(view_index, layer_data, &vd, &dvd); ··· 561 561 state); // state 562 562 VK_CHK_WITH_GOTO(ret, "do_equirect2_layer", err_layer); 563 563 break; 564 - case XRT_LAYER_STEREO_PROJECTION: 564 + case XRT_LAYER_PROJECTION: 565 565 case XRT_LAYER_STEREO_PROJECTION_DEPTH: 566 566 ret = do_projection_layer( // 567 567 rr, // rr ··· 626 626 state->premultiplied_alphas[i], // 627 627 state->descriptor_sets[i]); // 628 628 break; 629 - case XRT_LAYER_STEREO_PROJECTION: 629 + case XRT_LAYER_PROJECTION: 630 630 case XRT_LAYER_STEREO_PROJECTION_DEPTH: 631 631 render_gfx_layer_projection( // 632 632 rr, // ··· 750 750 do_mesh_from_proj(struct render_gfx *rr, 751 751 const struct comp_render_dispatch_data *d, 752 752 const struct comp_layer *layer, 753 - const struct xrt_layer_projection_view_data *lvd, 754 - const struct xrt_layer_projection_view_data *rvd) 753 + const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS]) 755 754 { 756 755 const struct xrt_layer_data *data = &layer->data; 757 - const uint32_t left_array_index = lvd->sub.array_index; 758 - const uint32_t right_array_index = rvd->sub.array_index; 759 - const struct comp_swapchain_image *left = &layer->sc_array[0]->images[lvd->sub.image_index]; 760 - const struct comp_swapchain_image *right = &layer->sc_array[1]->images[rvd->sub.image_index]; 761 756 762 - VkSampler clamp_to_border_black = rr->r->samplers.clamp_to_border_black; 757 + const VkSampler clamp_to_border_black = rr->r->samplers.clamp_to_border_black; 763 758 764 759 struct gfx_mesh_data md = XRT_STRUCT_INIT; 765 760 for (uint32_t i = 0; i < d->view_count; i++) { 761 + const uint32_t array_index = vds[i]->sub.array_index; 762 + const struct comp_swapchain_image *image = &layer->sc_array[i]->images[vds[i]->sub.image_index]; 766 763 767 764 struct xrt_pose src_pose; 768 765 struct xrt_fov src_fov; 769 766 struct xrt_normalized_rect src_norm_rect; 770 - VkImageView src_image_view; 771 767 772 - if (!is_view_index_right(i)) { 773 - // Left, aka not right. 774 - src_pose = lvd->pose; 775 - src_fov = lvd->fov; 776 - src_norm_rect = lvd->sub.norm_rect; 777 - src_image_view = get_image_view(left, data->flags, left_array_index); 778 - } else { 779 - // Right 780 - src_pose = rvd->pose; 781 - src_fov = rvd->fov; 782 - src_norm_rect = rvd->sub.norm_rect; 783 - src_image_view = get_image_view(right, data->flags, right_array_index); 784 - } 768 + src_pose = vds[i]->pose; 769 + src_fov = vds[i]->fov; 770 + src_norm_rect = vds[i]->sub.norm_rect; 771 + const VkImageView src_image_view = get_image_view(image, data->flags, array_index); 785 772 786 773 if (data->flip_y) { 787 774 src_norm_rect.h = -src_norm_rect.h; ··· 827 814 // Sanity check. 828 815 assert(!fast_path || layer_count >= 1); 829 816 830 - if (fast_path && layer->data.type == XRT_LAYER_STEREO_PROJECTION) { 817 + if (fast_path && layer->data.type == XRT_LAYER_PROJECTION) { 831 818 // Fast path. 832 - const struct xrt_layer_stereo_projection_data *stereo = &layer->data.stereo; 833 - const struct xrt_layer_projection_view_data *lvd = &stereo->l; 834 - const struct xrt_layer_projection_view_data *rvd = &stereo->r; 835 - 819 + const struct xrt_layer_projection_data *proj = &layer->data.proj; 820 + const struct xrt_layer_projection_view_data *vds[XRT_MAX_VIEWS]; 821 + for (uint32_t j = 0; j < d->view_count; ++j) { 822 + vds[j] = &proj->v[j]; 823 + } 836 824 do_mesh_from_proj( // 837 825 rr, // 838 826 d, // 839 827 layer, // 840 - lvd, // 841 - rvd); // 828 + vds); // 842 829 843 830 } else if (fast_path && layer->data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) { 844 831 // Fast path. 845 832 const struct xrt_layer_stereo_projection_depth_data *stereo = &layer->data.stereo_depth; 846 - const struct xrt_layer_projection_view_data *lvd = &stereo->l; 847 - const struct xrt_layer_projection_view_data *rvd = &stereo->r; 848 - 833 + const struct xrt_layer_projection_view_data *vds[2]; 834 + vds[0] = &stereo->l; 835 + vds[1] = &stereo->r; 849 836 do_mesh_from_proj( // 850 837 rr, // 851 838 d, // 852 839 layer, // 853 - lvd, // 854 - rvd); // 840 + vds); // 855 841 856 842 } else { 857 843 if (fast_path) {
+4 -4
src/xrt/compositor/util/comp_render_helpers.h
··· 51 51 const struct xrt_layer_data *data, 52 52 const struct xrt_layer_projection_view_data **out_vd) 53 53 { 54 - const struct xrt_layer_stereo_projection_data *stereo = &data->stereo; 54 + const struct xrt_layer_projection_data *proj = &data->proj; 55 55 56 56 if (is_view_index_right(view_index)) { 57 - *out_vd = &stereo->r; 57 + *out_vd = &proj->v[view_index]; 58 58 } else { 59 - *out_vd = &stereo->l; 59 + *out_vd = &proj->v[view_index]; 60 60 } 61 61 } 62 62 ··· 94 94 case XRT_LAYER_EQUIRECT1: visibility = data->equirect1.visibility; break; 95 95 case XRT_LAYER_EQUIRECT2: visibility = data->equirect2.visibility; break; 96 96 case XRT_LAYER_QUAD: visibility = data->quad.visibility; break; 97 - case XRT_LAYER_STEREO_PROJECTION: 97 + case XRT_LAYER_PROJECTION: 98 98 case XRT_LAYER_STEREO_PROJECTION_DEPTH: return true; 99 99 default: return false; 100 100 };
+18 -20
src/xrt/include/xrt/xrt_compositor.h
··· 75 75 */ 76 76 enum xrt_layer_type 77 77 { 78 - XRT_LAYER_STEREO_PROJECTION, 78 + XRT_LAYER_PROJECTION, 79 79 XRT_LAYER_STEREO_PROJECTION_DEPTH, 80 80 XRT_LAYER_QUAD, 81 81 XRT_LAYER_CUBE, ··· 228 228 * The @ref xrt_swapchain references and @ref xrt_device are provided outside of 229 229 * this struct. 230 230 */ 231 - struct xrt_layer_stereo_projection_data 231 + struct xrt_layer_projection_data 232 232 { 233 - struct xrt_layer_projection_view_data l, r; 233 + uint32_t view_count; 234 + struct xrt_layer_projection_view_data v[XRT_MAX_VIEWS]; 234 235 }; 235 236 236 237 /*! ··· 458 459 * xrt_compositor::layer_commit where this data was passed. 459 460 */ 460 461 union { 461 - struct xrt_layer_stereo_projection_data stereo; 462 + struct xrt_layer_projection_data proj; 462 463 struct xrt_layer_stereo_projection_depth_data stereo_depth; 463 464 struct xrt_layer_quad_data quad; 464 465 struct xrt_layer_cube_data cube; ··· 1204 1205 * 1205 1206 * @param xc Self pointer 1206 1207 * @param xdev The device the layer is relative to. 1207 - * @param l_xsc Swapchain object containing left eye RGB data. 1208 - * @param r_xsc Swapchain object containing right eye RGB data. 1208 + * @param xsc Swapchain object containing eye RGB data. 1209 1209 * @param data All of the pure data bits (not pointers/handles), 1210 1210 * including what parts of the supplied swapchain 1211 1211 * objects to use for each view. 1212 1212 */ 1213 - xrt_result_t (*layer_stereo_projection)(struct xrt_compositor *xc, 1214 - struct xrt_device *xdev, 1215 - struct xrt_swapchain *l_xsc, 1216 - struct xrt_swapchain *r_xsc, 1217 - const struct xrt_layer_data *data); 1213 + xrt_result_t (*layer_projection)(struct xrt_compositor *xc, 1214 + struct xrt_device *xdev, 1215 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 1216 + const struct xrt_layer_data *data); 1218 1217 1219 1218 /*! 1220 1219 * @brief Adds a stereo projection layer for submission, has depth information. ··· 1680 1679 } 1681 1680 1682 1681 /*! 1683 - * @copydoc xrt_compositor::layer_stereo_projection 1682 + * @copydoc xrt_compositor::layer_projection 1684 1683 * 1685 1684 * Helper for calling through the function pointer. 1686 1685 * 1687 1686 * @public @memberof xrt_compositor 1688 1687 */ 1689 1688 static inline xrt_result_t 1690 - xrt_comp_layer_stereo_projection(struct xrt_compositor *xc, 1691 - struct xrt_device *xdev, 1692 - struct xrt_swapchain *l_xsc, 1693 - struct xrt_swapchain *r_xsc, 1694 - const struct xrt_layer_data *data) 1689 + xrt_comp_layer_projection(struct xrt_compositor *xc, 1690 + struct xrt_device *xdev, 1691 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 1692 + const struct xrt_layer_data *data) 1695 1693 { 1696 - return xc->layer_stereo_projection(xc, xdev, l_xsc, r_xsc, data); 1694 + return xc->layer_projection(xc, xdev, xsc, data); 1697 1695 } 1698 1696 1699 1697 /*! ··· 2285 2283 uint32_t width_pixels; 2286 2284 uint32_t height_pixels; 2287 2285 uint32_t sample_count; 2288 - } max; //!< Maximums for this view. 2289 - } views[2]; //!< View configuration information. 2286 + } max; //!< Maximums for this view. 2287 + } views[XRT_MAX_VIEWS]; //!< View configuration information. 2290 2288 2291 2289 //! Maximum number of composition layers supported, never changes. 2292 2290 uint32_t max_layers;
+7 -5
src/xrt/include/xrt/xrt_device.h
··· 13 13 14 14 #include "xrt/xrt_defines.h" 15 15 #include "xrt/xrt_visibility_mask.h" 16 - 16 + #include "xrt/xrt_limits.h" 17 17 18 18 #ifdef __cplusplus 19 19 extern "C" { ··· 107 107 * 108 108 * For now hardcoded display to two. 109 109 */ 110 - struct xrt_view views[2]; 110 + struct xrt_view views[XRT_MAX_VIEWS]; 111 111 112 + size_t view_count; 112 113 /*! 113 114 * Array of supported blend modes. 114 115 */ ··· 139 140 //! Indices, for triangle strip. 140 141 int *indices; 141 142 //! Number of indices for the triangle strips (one per view). 142 - uint32_t index_counts[2]; 143 + uint32_t index_counts[XRT_MAX_VIEWS]; 143 144 //! Offsets for the indices (one offset per view). 144 - uint32_t index_offsets[2]; 145 + uint32_t index_offsets[XRT_MAX_VIEWS]; 145 146 //! Total number of elements in mesh::indices array. 146 147 uint32_t index_count_total; 147 148 } mesh; 148 149 149 150 //! distortion is subject to the field of view 150 - struct xrt_fov fov[2]; 151 + struct xrt_fov fov[XRT_MAX_VIEWS]; 151 152 } distortion; 152 153 }; 153 154 ··· 407 408 struct xrt_space_relation *out_head_relation, 408 409 struct xrt_fov *out_fovs, 409 410 struct xrt_pose *out_poses); 411 + 410 412 /** 411 413 * Compute the distortion at a single point. 412 414 *
+4
src/xrt/include/xrt/xrt_limits.h
··· 16 16 * @addtogroup xrt_iface 17 17 * @{ 18 18 */ 19 + /* 20 + * Max number of views supported by a compositor, artificial limit. 21 + */ 22 + #define XRT_MAX_VIEWS 2 19 23 20 24 /*! 21 25 * Maximum number of handles sent in one call.
+10 -15
src/xrt/ipc/client/ipc_client_compositor.c
··· 581 581 } 582 582 583 583 static xrt_result_t 584 - ipc_compositor_layer_stereo_projection(struct xrt_compositor *xc, 585 - struct xrt_device *xdev, 586 - struct xrt_swapchain *l_xsc, 587 - struct xrt_swapchain *r_xsc, 588 - const struct xrt_layer_data *data) 584 + ipc_compositor_layer_projection(struct xrt_compositor *xc, 585 + struct xrt_device *xdev, 586 + struct xrt_swapchain *xsc[XRT_MAX_VIEWS], 587 + const struct xrt_layer_data *data) 589 588 { 590 589 struct ipc_client_compositor *icc = ipc_client_compositor(xc); 591 590 592 - assert(data->type == XRT_LAYER_STEREO_PROJECTION); 591 + assert(data->type == XRT_LAYER_PROJECTION); 593 592 594 593 struct ipc_shared_memory *ism = icc->ipc_c->ism; 595 594 struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id]; 596 595 struct ipc_layer_entry *layer = &slot->layers[icc->layers.layer_count]; 597 - struct ipc_client_swapchain *l = ipc_client_swapchain(l_xsc); 598 - struct ipc_client_swapchain *r = ipc_client_swapchain(r_xsc); 599 - 600 596 layer->xdev_id = 0; //! @todo Real id. 601 - layer->swapchain_ids[0] = l->id; 602 - layer->swapchain_ids[1] = r->id; 603 - layer->swapchain_ids[2] = -1; 604 - layer->swapchain_ids[3] = -1; 605 597 layer->data = *data; 606 - 598 + for (uint32_t i = 0; i < data->proj.view_count; ++i) { 599 + struct ipc_client_swapchain *ics = ipc_client_swapchain(xsc[i]); 600 + layer->swapchain_ids[i] = ics->id; 601 + } 607 602 // Increment the number of layers. 608 603 icc->layers.layer_count++; 609 604 ··· 889 884 icc->base.base.begin_frame = ipc_compositor_begin_frame; 890 885 icc->base.base.discard_frame = ipc_compositor_discard_frame; 891 886 icc->base.base.layer_begin = ipc_compositor_layer_begin; 892 - icc->base.base.layer_stereo_projection = ipc_compositor_layer_stereo_projection; 887 + icc->base.base.layer_projection = ipc_compositor_layer_projection; 893 888 icc->base.base.layer_stereo_projection_depth = ipc_compositor_layer_stereo_projection_depth; 894 889 icc->base.base.layer_quad = ipc_compositor_layer_quad; 895 890 icc->base.base.layer_cube = ipc_compositor_layer_cube;
+6 -5
src/xrt/ipc/client/ipc_client_hmd.c
··· 188 188 ich->device_id, // 189 189 default_eye_relation, // 190 190 at_timestamp_ns, // 191 + view_count, // 191 192 &info); // 192 193 IPC_CHK_ONLY_PRINT(ich->ipc_c, xret, "ipc_call_device_get_view_poses_2"); 193 194 ··· 352 353 for (int i = 0; i < XRT_MAX_DEVICE_BLEND_MODES; i++) { 353 354 ich->base.hmd->blend_modes[i] = ipc_c->ism->hmd.blend_modes[i]; 354 355 } 356 + ich->base.hmd->view_count = ism->hmd.view_count; 355 357 ich->base.hmd->blend_mode_count = ipc_c->ism->hmd.blend_mode_count; 356 - 357 - ich->base.hmd->views[0].display.w_pixels = ipc_c->ism->hmd.views[0].display.w_pixels; 358 - ich->base.hmd->views[0].display.h_pixels = ipc_c->ism->hmd.views[0].display.h_pixels; 359 - ich->base.hmd->views[1].display.w_pixels = ipc_c->ism->hmd.views[1].display.w_pixels; 360 - ich->base.hmd->views[1].display.h_pixels = ipc_c->ism->hmd.views[1].display.h_pixels; 358 + for (uint32_t i = 0; i < ich->base.hmd->view_count; ++i) { 359 + ich->base.hmd->views[i].display.w_pixels = ipc_c->ism->hmd.views[i].display.w_pixels; 360 + ich->base.hmd->views[i].display.h_pixels = ipc_c->ism->hmd.views[i].display.h_pixels; 361 + } 361 362 362 363 // Distortion information, fills in xdev->compute_distortion(). 363 364 u_distortion_mesh_set_none(&ich->base);
+17 -16
src/xrt/ipc/server/ipc_server_handler.c
··· 739 739 { 740 740 // xdev 741 741 uint32_t device_id = layer->xdev_id; 742 - // left 743 - uint32_t lxsci = layer->swapchain_ids[0]; 744 - // right 745 - uint32_t rxsci = layer->swapchain_ids[1]; 746 - 747 742 struct xrt_device *xdev = get_xdev(ics, device_id); 748 - struct xrt_swapchain *lxcs = ics->xscs[lxsci]; 749 - struct xrt_swapchain *rxcs = ics->xscs[rxsci]; 750 743 751 - if (lxcs == NULL || rxcs == NULL) { 752 - U_LOG_E("Invalid swap chain for projection layer!"); 744 + if (xdev == NULL) { 745 + U_LOG_E("Invalid xdev for projection layer!"); 753 746 return false; 754 747 } 755 748 756 - if (xdev == NULL) { 757 - U_LOG_E("Invalid xdev for projection layer!"); 758 - return false; 749 + uint32_t view_count = xdev->hmd->view_count; 750 + 751 + struct xrt_swapchain *xcs[XRT_MAX_VIEWS]; 752 + for (uint32_t k = 0; k < view_count; k++) { 753 + const uint32_t xsci = layer->swapchain_ids[k]; 754 + xcs[k] = ics->xscs[xsci]; 755 + if (xcs[k] == NULL) { 756 + U_LOG_E("Invalid swap chain for projection layer!"); 757 + return false; 758 + } 759 759 } 760 760 761 + 761 762 // Cast away volatile. 762 763 struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data; 763 764 764 - xrt_comp_layer_stereo_projection(xc, xdev, lxcs, rxcs, data); 765 + xrt_comp_layer_projection(xc, xdev, xcs, data); 765 766 766 767 return true; 767 768 } ··· 971 972 volatile struct ipc_layer_entry *layer = &slot->layers[i]; 972 973 973 974 switch (layer->data.type) { 974 - case XRT_LAYER_STEREO_PROJECTION: 975 + case XRT_LAYER_PROJECTION: 975 976 if (!_update_projection_layer(xc, ics, layer, i)) { 976 977 return false; 977 978 } ··· 1744 1745 uint32_t id, 1745 1746 const struct xrt_vec3 *default_eye_relation, 1746 1747 uint64_t at_timestamp_ns, 1748 + uint32_t view_count, 1747 1749 struct ipc_info_get_view_poses_2 *out_info) 1748 1750 { 1749 1751 // To make the code a bit more readable. 1750 1752 uint32_t device_id = id; 1751 1753 struct xrt_device *xdev = get_xdev(ics, device_id); 1752 - 1753 1754 xrt_device_get_view_poses( // 1754 1755 xdev, // 1755 1756 default_eye_relation, // 1756 1757 at_timestamp_ns, // 1757 - 2, // 1758 + view_count, // 1758 1759 &out_info->head_relation, // 1759 1760 out_info->fovs, // 1760 1761 out_info->poses); //
+6 -4
src/xrt/ipc/server/ipc_server_process.c
··· 358 358 359 359 // Is this a HMD? 360 360 if (xdev->hmd != NULL) { 361 - ism->hmd.views[0].display.w_pixels = xdev->hmd->views[0].display.w_pixels; 362 - ism->hmd.views[0].display.h_pixels = xdev->hmd->views[0].display.h_pixels; 363 - ism->hmd.views[1].display.w_pixels = xdev->hmd->views[1].display.w_pixels; 364 - ism->hmd.views[1].display.h_pixels = xdev->hmd->views[1].display.h_pixels; 361 + // set view count 362 + ism->hmd.view_count = xdev->hmd->view_count; 363 + for (uint32_t view = 0; view < xdev->hmd->view_count; ++view) { 364 + ism->hmd.views[view].display.w_pixels = xdev->hmd->views[view].display.w_pixels; 365 + ism->hmd.views[view].display.h_pixels = xdev->hmd->views[view].display.h_pixels; 366 + } 365 367 366 368 for (size_t i = 0; i < xdev->hmd->blend_mode_count; i++) { 367 369 // Not super necessary, we also do this assert in oxr_system.c
+5 -4
src/xrt/ipc/shared/ipc_protocol.h
··· 155 155 * 156 156 * How many are actually used depends on the value of @p data.type 157 157 */ 158 - uint32_t swapchain_ids[4]; 158 + uint32_t swapchain_ids[XRT_MAX_VIEWS * 2]; 159 159 160 160 /*! 161 161 * All basic (trivially-serializable) data associated with a layer, ··· 260 260 uint32_t h_pixels; 261 261 } display; 262 262 } views[2]; 263 - 263 + // view count 264 + uint32_t view_count; 264 265 enum xrt_blend_mode blend_modes[XRT_MAX_DEVICE_BLEND_MODES]; 265 266 uint32_t blend_mode_count; 266 267 } hmd; ··· 328 329 */ 329 330 struct ipc_info_get_view_poses_2 330 331 { 331 - struct xrt_fov fovs[2]; 332 - struct xrt_pose poses[2]; 332 + struct xrt_fov fovs[XRT_MAX_VIEWS]; 333 + struct xrt_pose poses[XRT_MAX_VIEWS]; 333 334 struct xrt_space_relation head_relation; 334 335 };
+2 -1
src/xrt/ipc/shared/proto.json
··· 381 381 "in": [ 382 382 {"name": "id", "type": "uint32_t"}, 383 383 {"name": "fallback_eye_relation", "type": "struct xrt_vec3"}, 384 - {"name": "at_timestamp_ns", "type": "uint64_t"} 384 + {"name": "at_timestamp_ns", "type": "uint64_t"}, 385 + {"name": "view_count", "type": "uint32_t"} 385 386 ], 386 387 "out": [ 387 388 {"name": "info", "type": "struct ipc_info_get_view_poses_2"}
+2 -2
src/xrt/state_trackers/oxr/oxr_instance.c
··· 352 352 oxr_instance_destroy(log, &inst->handle); 353 353 return ret; 354 354 } 355 - 356 - ret = oxr_system_fill_in(log, inst, XRT_SYSTEM_ID, &inst->system); 355 + uint32_t view_count = dev->hmd->view_count; 356 + ret = oxr_system_fill_in(log, inst, XRT_SYSTEM_ID, view_count, &inst->system); 357 357 if (ret != XR_SUCCESS) { 358 358 oxr_instance_destroy(log, &inst->handle); 359 359 return ret;
+5 -1
src/xrt/state_trackers/oxr/oxr_objects.h
··· 913 913 struct oxr_system **out_selected); 914 914 915 915 XrResult 916 - oxr_system_fill_in(struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, struct oxr_system *sys); 916 + oxr_system_fill_in(struct oxr_logger *log, 917 + struct oxr_instance *inst, 918 + XrSystemId systemId, 919 + uint32_t view_count, 920 + struct oxr_system *sys); 917 921 918 922 XrResult 919 923 oxr_system_verify_id(struct oxr_logger *log, const struct oxr_instance *inst, XrSystemId systemId);
+4 -4
src/xrt/state_trackers/oxr/oxr_session.c
··· 455 455 bool print = sess->sys->inst->debug_views; 456 456 struct xrt_device *xdev = GET_XDEV_BY_ROLE(sess->sys, head); 457 457 struct oxr_space *baseSpc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_space *, viewLocateInfo->space); 458 - uint32_t view_count = 2; 458 + uint32_t view_count = xdev->hmd->view_count; 459 459 460 460 // Start two call handling. 461 461 if (viewCountOutput != NULL) { ··· 491 491 492 492 // The head pose as in the xdev's space, aka XRT_INPUT_GENERIC_HEAD_POSE. 493 493 struct xrt_space_relation T_xdev_head = XRT_SPACE_RELATION_ZERO; 494 - struct xrt_fov fovs[2] = {0}; 495 - struct xrt_pose poses[2] = {0}; 494 + struct xrt_fov fovs[XRT_MAX_VIEWS] = {0}; 495 + struct xrt_pose poses[XRT_MAX_VIEWS] = {0}; 496 496 497 497 xrt_device_get_view_poses( // 498 498 xdev, // 499 499 &default_eye_relation, // 500 500 xdisplay_time, // 501 - 2, // 501 + view_count, // 502 502 &T_xdev_head, // 503 503 fovs, // 504 504 poses);
+52 -49
src/xrt/state_trackers/oxr/oxr_session_frame_end.c
··· 591 591 return ret; 592 592 } 593 593 594 - if (proj->viewCount != 2) { 594 + if (proj->viewCount < 1 || proj->viewCount > XRT_MAX_VIEWS) { 595 595 return oxr_error(log, XR_ERROR_VALIDATION_FAILURE, 596 - "(frameEndInfo->layers[%u]->viewCount == %u) must be 2 for projection layers and the " 597 - "current view configuration", 598 - layer_index, proj->viewCount); 596 + "(frameEndInfo->layers[%u]->viewCount == %u) must be between 1 and %d for projection " 597 + "layers and the current view configuration", 598 + layer_index, proj->viewCount, XRT_MAX_VIEWS); 599 599 } 600 600 601 601 // number of depth layers must be 0 or proj->viewCount ··· 694 694 } 695 695 696 696 #ifdef OXR_HAVE_KHR_composition_layer_depth 697 - if (depth_layer_count > 0 && depth_layer_count != proj->viewCount) { 697 + if (depth_layer_count > 0 && depth_layer_count != proj->viewCount && proj->viewCount != 2) { 698 698 return oxr_error( 699 699 log, XR_ERROR_VALIDATION_FAILURE, 700 700 "(frameEndInfo->layers[%u] projection layer must have %u depth layers or none, but has: %u)", ··· 1259 1259 { 1260 1260 struct oxr_space *spc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_space *, proj->space); 1261 1261 struct oxr_swapchain *d_scs[2] = {NULL, NULL}; 1262 - struct oxr_swapchain *scs[2]; 1262 + struct oxr_swapchain *scs[XRT_MAX_VIEWS]; 1263 1263 struct xrt_pose *pose_ptr; 1264 - struct xrt_pose pose[2]; 1264 + struct xrt_pose pose[XRT_MAX_VIEWS]; 1265 + struct xrt_swapchain *swapchains[XRT_MAX_VIEWS]; 1265 1266 1266 1267 enum xrt_layer_composition_flags flags = convert_layer_flags(proj->layerFlags); 1267 1268 1268 - uint32_t swapchain_count = ARRAY_SIZE(scs); 1269 - for (uint32_t i = 0; i < swapchain_count; i++) { 1269 + for (uint32_t i = 0; i < proj->viewCount; i++) { 1270 1270 scs[i] = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, proj->views[i].subImage.swapchain); 1271 1271 pose_ptr = (struct xrt_pose *)&proj->views[i].pose; 1272 1272 ··· 1279 1279 flags |= XRT_LAYER_COMPOSITION_VIEW_SPACE_BIT; 1280 1280 } 1281 1281 1282 - struct xrt_fov *l_fov = (struct xrt_fov *)&proj->views[0].fov; 1283 - struct xrt_fov *r_fov = (struct xrt_fov *)&proj->views[1].fov; 1284 1282 1285 1283 struct xrt_layer_data data; 1286 1284 U_ZERO(&data); 1287 - data.type = XRT_LAYER_STEREO_PROJECTION; 1285 + data.type = XRT_LAYER_PROJECTION; 1288 1286 data.name = XRT_INPUT_GENERIC_HEAD_POSE; 1289 1287 data.timestamp = xrt_timestamp; 1290 1288 data.flags = flags; 1291 - data.stereo.l.fov = *l_fov; 1292 - data.stereo.l.pose = pose[0]; 1293 - data.stereo.r.fov = *r_fov; 1294 - data.stereo.r.pose = pose[1]; 1295 - fill_in_sub_image(scs[0], &proj->views[0].subImage, &data.stereo.l.sub); 1296 - fill_in_sub_image(scs[1], &proj->views[1].subImage, &data.stereo.r.sub); 1289 + data.proj.view_count = proj->viewCount; 1290 + for (size_t i = 0; i < proj->viewCount; ++i) { 1291 + struct xrt_fov *fov = (struct xrt_fov *)&proj->views[i].fov; 1292 + data.proj.v[i].fov = *fov; 1293 + data.proj.v[i].pose = pose[i]; 1294 + fill_in_sub_image(scs[i], &proj->views[i].subImage, &data.proj.v[i].sub); 1295 + swapchains[i] = scs[i]->swapchain; 1296 + } 1297 1297 fill_in_color_scale_bias(sess, (XrCompositionLayerBaseHeader *)proj, &data); 1298 1298 fill_in_y_flip(sess, (XrCompositionLayerBaseHeader *)proj, &data); 1299 1299 fill_in_blend_factors(sess, (XrCompositionLayerBaseHeader *)proj, &data); 1300 1300 fill_in_layer_settings(sess, (XrCompositionLayerBaseHeader *)proj, &data); 1301 1301 1302 + 1302 1303 #ifdef OXR_HAVE_KHR_composition_layer_depth 1303 - const XrCompositionLayerDepthInfoKHR *d_l = OXR_GET_INPUT_FROM_CHAIN( 1304 - &proj->views[0], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR); 1305 - if (d_l) { 1306 - data.stereo_depth.l_d.far_z = d_l->farZ; 1307 - data.stereo_depth.l_d.near_z = d_l->nearZ; 1308 - data.stereo_depth.l_d.max_depth = d_l->maxDepth; 1309 - data.stereo_depth.l_d.min_depth = d_l->minDepth; 1304 + if (proj->viewCount == 2) { 1305 + const XrCompositionLayerDepthInfoKHR *d_l = OXR_GET_INPUT_FROM_CHAIN( 1306 + &proj->views[0], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR); 1307 + if (d_l) { 1308 + data.stereo_depth.l_d.far_z = d_l->farZ; 1309 + data.stereo_depth.l_d.near_z = d_l->nearZ; 1310 + data.stereo_depth.l_d.max_depth = d_l->maxDepth; 1311 + data.stereo_depth.l_d.min_depth = d_l->minDepth; 1310 1312 1311 - struct oxr_swapchain *sc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_l->subImage.swapchain); 1313 + struct oxr_swapchain *sc = 1314 + XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_l->subImage.swapchain); 1312 1315 1313 - fill_in_sub_image(sc, &d_l->subImage, &data.stereo_depth.l_d.sub); 1316 + fill_in_sub_image(sc, &d_l->subImage, &data.stereo_depth.l_d.sub); 1314 1317 1315 - // Need to pass this in. 1316 - d_scs[0] = sc; 1317 - } 1318 + // Need to pass this in. 1319 + d_scs[0] = sc; 1320 + } 1318 1321 1319 - const XrCompositionLayerDepthInfoKHR *d_r = OXR_GET_INPUT_FROM_CHAIN( 1320 - &proj->views[1], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR); 1322 + const XrCompositionLayerDepthInfoKHR *d_r = OXR_GET_INPUT_FROM_CHAIN( 1323 + &proj->views[1], XR_TYPE_COMPOSITION_LAYER_DEPTH_INFO_KHR, XrCompositionLayerDepthInfoKHR); 1321 1324 1322 - if (d_r) { 1323 - data.stereo_depth.r_d.far_z = d_r->farZ; 1324 - data.stereo_depth.r_d.near_z = d_r->nearZ; 1325 - data.stereo_depth.r_d.max_depth = d_r->maxDepth; 1326 - data.stereo_depth.r_d.min_depth = d_r->minDepth; 1325 + if (d_r) { 1326 + data.stereo_depth.r_d.far_z = d_r->farZ; 1327 + data.stereo_depth.r_d.near_z = d_r->nearZ; 1328 + data.stereo_depth.r_d.max_depth = d_r->maxDepth; 1329 + data.stereo_depth.r_d.min_depth = d_r->minDepth; 1327 1330 1328 - struct oxr_swapchain *sc = XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_r->subImage.swapchain); 1331 + struct oxr_swapchain *sc = 1332 + XRT_CAST_OXR_HANDLE_TO_PTR(struct oxr_swapchain *, d_r->subImage.swapchain); 1329 1333 1330 - fill_in_sub_image(sc, &d_r->subImage, &data.stereo_depth.r_d.sub); 1334 + fill_in_sub_image(sc, &d_r->subImage, &data.stereo_depth.r_d.sub); 1331 1335 1332 - // Need to pass this in. 1333 - d_scs[1] = sc; 1336 + // Need to pass this in. 1337 + d_scs[1] = sc; 1338 + } 1334 1339 } 1335 1340 #endif // OXR_HAVE_KHR_composition_layer_depth 1336 - 1337 1341 if (d_scs[0] != NULL && d_scs[1] != NULL) { 1338 1342 #ifdef OXR_HAVE_KHR_composition_layer_depth 1339 1343 fill_in_depth_test(sess, (XrCompositionLayerBaseHeader *)proj, &data); ··· 1351 1355 assert(false && "Should not get here"); 1352 1356 #endif // OXR_HAVE_KHR_composition_layer_depth 1353 1357 } else { 1354 - xrt_result_t xret = xrt_comp_layer_stereo_projection( // 1355 - xc, // compositor 1356 - head, // xdev 1357 - scs[0]->swapchain, // left 1358 - scs[1]->swapchain, // right 1359 - &data); // data 1360 - OXR_CHECK_XRET(log, sess, xret, xrt_comp_layer_stereo_projection); 1358 + xrt_result_t xret = xrt_comp_layer_projection( // 1359 + xc, // compositor 1360 + head, // xdev 1361 + swapchains, // swapchains 1362 + &data); // data 1363 + OXR_CHECK_XRET(log, sess, xret, xrt_comp_layer_projection); 1361 1364 } 1362 1365 1363 1366 return XR_SUCCESS;
+32 -35
src/xrt/state_trackers/oxr/oxr_system.c
··· 107 107 108 108 109 109 XrResult 110 - oxr_system_fill_in(struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, struct oxr_system *sys) 110 + oxr_system_fill_in( 111 + struct oxr_logger *log, struct oxr_instance *inst, XrSystemId systemId, uint32_t view_count, struct oxr_system *sys) 111 112 { 112 113 //! @todo handle other subaction paths? 113 114 114 115 sys->inst = inst; 115 116 sys->systemId = systemId; 116 117 sys->form_factor = XR_FORM_FACTOR_HEAD_MOUNTED_DISPLAY; 117 - sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_STEREO; 118 + if (view_count == 1) { 119 + sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_MONO; 120 + } else if (view_count == 2) { 121 + sys->view_config_type = XR_VIEW_CONFIGURATION_TYPE_PRIMARY_STEREO; 122 + } else { 123 + assert(false && "view_count must be 1 or 2"); 124 + } 125 + U_LOG_D("sys->view_config_type = %d", sys->view_config_type); 118 126 sys->dynamic_roles_cache = (struct xrt_system_roles)XRT_SYSTEM_ROLES_INIT; 119 127 120 128 #ifdef XR_USE_GRAPHICS_API_VULKAN ··· 141 149 142 150 struct xrt_system_compositor_info *info = &sys->xsysc->info; 143 151 144 - uint32_t w0 = (uint32_t)(info->views[0].recommended.width_pixels * scale); 145 - uint32_t h0 = (uint32_t)(info->views[0].recommended.height_pixels * scale); 146 - uint32_t w1 = (uint32_t)(info->views[1].recommended.width_pixels * scale); 147 - uint32_t h1 = (uint32_t)(info->views[1].recommended.height_pixels * scale); 152 + #define imin(a, b) (a < b ? a : b) 153 + for (uint32_t i = 0; i < view_count; ++i) { 154 + uint32_t w = (uint32_t)(info->views[i].recommended.width_pixels * scale); 155 + uint32_t h = (uint32_t)(info->views[i].recommended.height_pixels * scale); 156 + uint32_t w_2 = info->views[i].max.width_pixels; 157 + uint32_t h_2 = info->views[i].max.height_pixels; 148 158 149 - uint32_t w0_2 = info->views[0].max.width_pixels; 150 - uint32_t h0_2 = info->views[0].max.height_pixels; 151 - uint32_t w1_2 = info->views[1].max.width_pixels; 152 - uint32_t h1_2 = info->views[1].max.height_pixels; 159 + w = imin(w, w_2); 160 + h = imin(h, h_2); 153 161 154 - #define imin(a, b) (a < b ? a : b) 155 - 156 - w0 = imin(w0, w0_2); 157 - h0 = imin(h0, h0_2); 158 - w1 = imin(w1, w1_2); 159 - h1 = imin(h1, h1_2); 162 + sys->views[i].recommendedImageRectWidth = w; 163 + sys->views[i].maxImageRectWidth = w_2; 164 + sys->views[i].recommendedImageRectHeight = h; 165 + sys->views[i].maxImageRectHeight = h_2; 166 + sys->views[i].recommendedSwapchainSampleCount = info->views[i].recommended.sample_count; 167 + sys->views[i].maxSwapchainSampleCount = info->views[i].max.sample_count; 168 + } 160 169 161 170 #undef imin 162 - 163 - // clang-format off 164 - sys->views[0].recommendedImageRectWidth = w0; 165 - sys->views[0].maxImageRectWidth = w0_2; 166 - sys->views[0].recommendedImageRectHeight = h0; 167 - sys->views[0].maxImageRectHeight = h0_2; 168 - sys->views[0].recommendedSwapchainSampleCount = info->views[0].recommended.sample_count; 169 - sys->views[0].maxSwapchainSampleCount = info->views[0].max.sample_count; 170 - 171 - sys->views[1].recommendedImageRectWidth = w1; 172 - sys->views[1].maxImageRectWidth = w1_2; 173 - sys->views[1].recommendedImageRectHeight = h1; 174 - sys->views[1].maxImageRectHeight = h1_2; 175 - sys->views[1].recommendedSwapchainSampleCount = info->views[1].recommended.sample_count; 176 - sys->views[1].maxSwapchainSampleCount = info->views[1].max.sample_count; 177 - // clang-format on 178 171 179 172 180 173 /* ··· 459 452 if (viewConfigurationType != sys->view_config_type) { 460 453 return oxr_error(log, XR_ERROR_VIEW_CONFIGURATION_TYPE_UNSUPPORTED, "Invalid view configuration type"); 461 454 } 462 - 463 - OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 2, view_configuration_view_fill_in, 464 - sys->views, XR_SUCCESS); 455 + if (sys->view_config_type == XR_VIEW_CONFIGURATION_TYPE_PRIMARY_MONO) { 456 + OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 1, 457 + view_configuration_view_fill_in, sys->views, XR_SUCCESS); 458 + } else { 459 + OXR_TWO_CALL_FILL_IN_HELPER(log, viewCapacityInput, viewCountOutput, views, 2, 460 + view_configuration_view_fill_in, sys->views, XR_SUCCESS); 461 + } 465 462 }
+5 -1
src/xrt/state_trackers/prober/p_prober.c
··· 852 852 853 853 u_pp(dg, "\n\tIn roles:"); 854 854 855 + #define PH(IDENT) \ 856 + u_pp(dg, "\n\t\t%s: %s, view count: %d", #IDENT, \ 857 + xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->str : "<none>", \ 858 + xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->hmd->view_count : "<none>") 855 859 #define P(IDENT) u_pp(dg, "\n\t\t%s: %s", #IDENT, xsysd->static_roles.IDENT ? xsysd->static_roles.IDENT->str : "<none>") 856 860 #define PD(IDENT) u_pp(dg, "\n\t\t%s: %s", #IDENT, roles.IDENT >= 0 ? xsysd->xdevs[roles.IDENT]->str : "<none>") 857 861 858 - P(head); 862 + PH(head); 859 863 P(eyes); 860 864 P(face); 861 865 PD(left);
+2 -2
src/xrt/targets/sdl_test/sdl_program.cpp
··· 115 115 if (spp.c.base.slot.layer_count == 0) { 116 116 glClearColor(0.2f, 0.2f, 0.2f, 0.0f); 117 117 glClear(GL_COLOR_BUFFER_BIT); 118 - } else if (spp.c.base.slot.layers[0].data.type == XRT_LAYER_STEREO_PROJECTION || 118 + } else if (spp.c.base.slot.layers[0].data.type == XRT_LAYER_PROJECTION || 119 119 spp.c.base.slot.layers[0].data.type == XRT_LAYER_STEREO_PROJECTION_DEPTH) { 120 120 121 121 auto &l = spp.c.base.slot.layers[0]; 122 122 auto &ssc = *(sdl_swapchain *)l.sc_array[0]; 123 - GLuint tex = ssc.textures[l.data.stereo.l.sub.image_index]; 123 + GLuint tex = ssc.textures[l.data.proj.v[0].sub.image_index]; 124 124 125 125 glClearColor(0.2f, 0.0f, 0.0f, 0.0f); 126 126 glClear(GL_COLOR_BUFFER_BIT);