this repo has no description
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

Made padding macros work. Fixed some other warnings to try to speed up the build. BREAKING: GPU_ErrorObject members changed order due to packing.

+73 -43
+53 -37
include/SDL_gpu.h
··· 81 81 82 82 // Struct padding for 32 or 64 bit alignment 83 83 #if SDL_GPU_BITNESS == 32 84 - #define GPU_PAD_1_TO_32 1 85 - #define GPU_PAD_2_TO_32 2 86 - #define GPU_PAD_3_TO_32 3 87 - #define GPU_PAD_1_TO_64 1 88 - #define GPU_PAD_2_TO_64 2 89 - #define GPU_PAD_3_TO_64 3 90 - #define GPU_PAD_4_TO_64 0 91 - #define GPU_PAD_5_TO_64 1 92 - #define GPU_PAD_6_TO_64 2 93 - #define GPU_PAD_7_TO_64 3 84 + #define GPU_PAD_1_TO_32 char _padding[1]; 85 + #define GPU_PAD_2_TO_32 char _padding[2]; 86 + #define GPU_PAD_3_TO_32 char _padding[3]; 87 + #define GPU_PAD_1_TO_64 char _padding[1]; 88 + #define GPU_PAD_2_TO_64 char _padding[2]; 89 + #define GPU_PAD_3_TO_64 char _padding[3]; 90 + #define GPU_PAD_4_TO_64 91 + #define GPU_PAD_5_TO_64 char _padding[1]; 92 + #define GPU_PAD_6_TO_64 char _padding[2]; 93 + #define GPU_PAD_7_TO_64 char _padding[3]; 94 94 #elif SDL_GPU_BITNESS == 64 95 - #define GPU_PAD_1_TO_32 1 96 - #define GPU_PAD_2_TO_32 2 97 - #define GPU_PAD_3_TO_32 3 98 - #define GPU_PAD_1_TO_64 1 99 - #define GPU_PAD_2_TO_64 2 100 - #define GPU_PAD_3_TO_64 3 101 - #define GPU_PAD_4_TO_64 4 102 - #define GPU_PAD_5_TO_64 5 103 - #define GPU_PAD_6_TO_64 6 104 - #define GPU_PAD_7_TO_64 7 95 + #define GPU_PAD_1_TO_32 char _padding[1]; 96 + #define GPU_PAD_2_TO_32 char _padding[2]; 97 + #define GPU_PAD_3_TO_32 char _padding[3]; 98 + #define GPU_PAD_1_TO_64 char _padding[1]; 99 + #define GPU_PAD_2_TO_64 char _padding[2]; 100 + #define GPU_PAD_3_TO_64 char _padding[3]; 101 + #define GPU_PAD_4_TO_64 char _padding[4]; 102 + #define GPU_PAD_5_TO_64 char _padding[5]; 103 + #define GPU_PAD_6_TO_64 char _padding[6]; 104 + #define GPU_PAD_7_TO_64 char _padding[7]; 105 105 #endif 106 106 107 107 #define GPU_FALSE 0 ··· 167 167 GPU_RendererEnum renderer; 168 168 int major_version; 169 169 int minor_version; 170 + 171 + GPU_PAD_4_TO_64 170 172 } GPU_RendererID; 171 173 172 174 ··· 330 332 struct GPU_Renderer* renderer; 331 333 GPU_Target* context_target; 332 334 GPU_Target* target; 335 + void* data; 336 + 333 337 Uint16 w, h; 334 338 GPU_FormatEnum format; 335 339 int num_layers; ··· 347 351 GPU_WrapEnum wrap_mode_x; 348 352 GPU_WrapEnum wrap_mode_y; 349 353 350 - void* data; 351 354 int refcount; 352 355 353 356 GPU_bool using_virtual_resolution; ··· 377 380 float zoom_x, zoom_y; 378 381 float z_near, z_far; // z clipping planes 379 382 GPU_bool use_centered_origin; // move rotation/scaling origin to the center of the camera's view 380 - char _padding[GPU_PAD_7_TO_64]; 383 + 384 + GPU_PAD_7_TO_64 381 385 } GPU_Camera; 382 386 383 387 ··· 420 424 { 421 425 /*! SDL_GLContext */ 422 426 void* context; 427 + 428 + /*! Last target used */ 429 + GPU_Target* active_target; 430 + 431 + GPU_ShaderBlock current_shader_block; 432 + GPU_ShaderBlock default_textured_shader_block; 433 + GPU_ShaderBlock default_untextured_shader_block; 434 + 423 435 424 436 /*! SDL window ID */ 425 437 Uint32 windowID; ··· 437 449 int stored_window_h; 438 450 439 451 440 - /*! Last target used */ 441 - GPU_Target* active_target; 442 452 443 453 /*! Internal state */ 444 454 Uint32 current_shader_program; 445 455 Uint32 default_textured_shader_program; 446 456 Uint32 default_untextured_shader_program; 447 - 448 - GPU_ShaderBlock current_shader_block; 449 - GPU_ShaderBlock default_textured_shader_block; 450 - GPU_ShaderBlock default_untextured_shader_block; 451 457 452 458 GPU_BlendMode shapes_blend_mode; 453 459 float line_thickness; ··· 459 465 GPU_bool failed; 460 466 GPU_bool use_texturing; 461 467 GPU_bool shapes_use_blending; 462 - char _padding[GPU_PAD_5_TO_64]; 468 + 469 + GPU_PAD_5_TO_64 463 470 } GPU_Context; 464 471 465 472 ··· 509 516 GPU_bool use_depth_test; 510 517 GPU_bool use_depth_write; 511 518 GPU_bool is_alias; 512 - char _padding[GPU_PAD_1_TO_64]; 519 + 520 + GPU_PAD_1_TO_64 513 521 }; 514 522 515 523 /*! \ingroup Initialization ··· 674 682 int offset_bytes; // Number of bytes to skip at the beginning of 'values' 675 683 GPU_bool is_per_sprite; // Per-sprite values are expanded to 4 vertices 676 684 GPU_bool normalize; 677 - char _padding[GPU_PAD_2_TO_32]; 685 + 686 + GPU_PAD_2_TO_32 678 687 } GPU_AttributeFormat; 679 688 680 689 /*! \ingroup ShaderInterface */ 681 690 typedef struct GPU_Attribute 682 691 { 683 - int location; 684 692 void* values; // Expect 4 values for each sprite 685 693 GPU_AttributeFormat format; 694 + int location; 695 + 696 + GPU_PAD_4_TO_64 686 697 } GPU_Attribute; 687 698 688 699 /*! \ingroup ShaderInterface */ 689 700 typedef struct GPU_AttributeSource 690 701 { 691 - int num_values; 692 702 void* next_value; 703 + void* per_vertex_storage; // Could point to the attribute's values or to allocated storage 704 + 705 + int num_values; 693 706 // Automatic storage format 694 707 int per_vertex_storage_stride_bytes; 695 708 int per_vertex_storage_offset_bytes; 696 709 int per_vertex_storage_size; // Over 0 means that the per-vertex storage has been automatically allocated 697 - void* per_vertex_storage; // Could point to the attribute's values or to allocated storage 698 710 GPU_Attribute attribute; 699 711 GPU_bool enabled; 700 - char _padding[GPU_PAD_7_TO_64]; 712 + 713 + GPU_PAD_7_TO_64 701 714 } GPU_AttributeSource; 702 715 703 716 ··· 720 733 typedef struct GPU_ErrorObject 721 734 { 722 735 char* function; 723 - GPU_ErrorEnum error; 724 736 char* details; 737 + GPU_ErrorEnum error; 738 + 739 + GPU_PAD_4_TO_64 725 740 } GPU_ErrorObject; 726 741 727 742 ··· 778 793 779 794 /*! 0 for inverted, 1 for mathematical */ 780 795 GPU_bool coordinate_mode; 781 - char _padding[GPU_PAD_7_TO_64]; 796 + 797 + GPU_PAD_7_TO_64 782 798 }; 783 799 784 800
+1 -1
src/SDL_gpu.c
··· 788 788 GPU_ErrorObject GPU_PopErrorCode(void) 789 789 { 790 790 unsigned int i; 791 - GPU_ErrorObject result = {NULL, GPU_ERROR_NONE, NULL}; 791 + GPU_ErrorObject result = {NULL, NULL, GPU_ERROR_NONE}; 792 792 793 793 gpu_init_error_queue(); 794 794
+2 -2
src/SDL_gpu_matrix.c
··· 94 94 95 95 void GPU_CopyMatrixStack(const GPU_MatrixStack* source, GPU_MatrixStack* dest) 96 96 { 97 - int i; 97 + unsigned int i; 98 98 unsigned int matrix_size = sizeof(float) * 16; 99 99 if (source == NULL || dest == NULL) 100 100 return; ··· 111 111 112 112 void GPU_ClearMatrixStack(GPU_MatrixStack* stack) 113 113 { 114 - int i; 114 + unsigned int i; 115 115 for (i = 0; i < stack->storage_size; ++i) 116 116 { 117 117 SDL_free(stack->matrix[i]);
+17 -3
src/renderer_GL_common.inl
··· 3 3 4 4 #ifdef _MSC_VER 5 5 // Disable warning: selection for inlining 6 - #pragma warning(disable: 4514 4711) 6 + #pragma warning(disable: 4514 4711 4710) 7 7 // Disable warning: Spectre mitigation 8 8 #pragma warning(disable: 5045) 9 9 #endif ··· 25 25 26 26 // Check for C99 support 27 27 // We'll use it for intptr_t which is used to suppress warnings about converting an int to a ptr for GL calls. 28 - #if __STDC_VERSION__ >= 199901L 28 + #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 29 29 #include <stdint.h> 30 30 #else 31 31 #define intptr_t long ··· 400 400 #else 401 401 static void GLAPIENTRY glBindFramebufferNOOP(GLenum target, GLuint framebuffer) 402 402 { 403 + (void)target; 404 + (void)framebuffer; 403 405 GPU_LogError("%s: Unsupported operation\n", __func__); 404 406 } 405 407 static GLenum GLAPIENTRY glCheckFramebufferStatusNOOP(GLenum target) 406 408 { 409 + (void)target; 407 410 GPU_LogError("%s: Unsupported operation\n", __func__); 408 411 return 0; 409 412 } 410 413 static void GLAPIENTRY glDeleteFramebuffersNOOP(GLsizei n, const GLuint* framebuffers) 411 414 { 415 + (void)n; 416 + (void)framebuffers; 412 417 GPU_LogError("%s: Unsupported operation\n", __func__); 413 418 } 414 419 static void GLAPIENTRY glFramebufferTexture2DNOOP(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) 415 420 { 421 + (void)target; 422 + (void)attachment; 423 + (void)textarget; 424 + (void)texture; 425 + (void)level; 416 426 GPU_LogError("%s: Unsupported operation\n", __func__); 417 427 } 418 428 static void GLAPIENTRY glGenFramebuffersNOOP(GLsizei n, GLuint *ids) 419 429 { 430 + (void)n; 431 + (void)ids; 420 432 GPU_LogError("%s: Unsupported operation\n", __func__); 421 433 } 422 434 static void GLAPIENTRY glGenerateMipmapNOOP(GLenum target) 423 435 { 436 + (void)target; 424 437 GPU_LogError("%s: Unsupported operation\n", __func__); 425 438 } 426 439 ··· 5511 5524 { 5512 5525 GPU_CONTEXT_DATA* cdata = (GPU_CONTEXT_DATA*)context->data; 5513 5526 (void)renderer; 5527 + (void)num_vertices; 5514 5528 #ifdef SDL_GPU_USE_ARRAY_PIPELINE 5515 5529 glEnableClientState(GL_VERTEX_ARRAY); 5516 5530 glEnableClientState(GL_TEXTURE_COORD_ARRAY); ··· 5616 5630 { 5617 5631 GPU_CONTEXT_DATA* cdata = (GPU_CONTEXT_DATA*)context->data; 5618 5632 (void)renderer; 5619 - 5633 + (void)num_vertices; 5620 5634 #ifdef SDL_GPU_USE_ARRAY_PIPELINE 5621 5635 glEnableClientState(GL_VERTEX_ARRAY); 5622 5636 glEnableClientState(GL_COLOR_ARRAY);