gltut / Documents / Positioning / Tutorial 07.xml

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
<?xml version="1.0" encoding="UTF-8"?>
<?oxygen RNGSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng" type="xml"?>
<?oxygen SCHSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng"?>
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
    <?dbhtml filename="Tutorial 07.html" ?>
    <title>World in Motion</title>
    <para>In this tutorial, we will show how to build a world of objects with a dynamic, moving
        camera.</para>
    <section>
        <title>World Space</title>
        <para>In the perspective projection tutorial, we defined a projection matrix that transforms
            objects from a specific camera space to clip-space. This camera space was defined
            primarily to make our perspective transformation as simple as possible. The camera
            itself sits immobile at the origin (0, 0, 0). The camera always looks down the Z axis,
            with objects that have a negative Z being considered in front of the camera.</para>
        <para>All of the tutorials we have seen since then have had model transformations that go
            directly to camera space. While this functions, it is not as useful as it could be.
            Camera space is not a particularly flexible space. If we want to have a moving camera,
            obviously something needs to change.</para>
        <para>We could modify our perspective matrix generation functions, so that we can project
            onto a camera that has an arbitrary position and orientation. But really, that's too
            much work; camera space itself works just fine for our needs. It would be easier to just
            introduce an additional transformation.</para>
        <section>
            <title>Defining the World</title>
            <para>Right now, the problem is that we transform all of the objects from their
                individual model spaces to camera space directly. The only time the objects are in
                the same space relative to one another is when they are in camera space. So instead,
                we will introduce an intermediate space between model and camera space; let us call
                this space <glossterm>world space.</glossterm></para>
            <para>All objects will be transformed into world space. The camera itself will also have
                a particular position and orientation in world space. And since the camera has a
                known space, with a known position and orientation relative to world space, we have
                a transformation from world space to camera space.</para>
            <para>So, how do we define world space? Well, we defined model space by fiat: it's the
                space the vertex positions are in. Clip-space was defined for us. The only space
                thus far that we have had a real choice about is camera space. And we defined that
                in a way that gave us the simplest perspective projection matrix.</para>
            <para>The last part gives us a hint. What defines a space is not the matrix that
                transforms to that space, but the matrix that transforms <emphasis>from</emphasis>
                that space. And this makes sense; a transformation matrix contains the basis vector
                and origin of the source space, as expressed in the destination coordinate system.
                Defining world space means defining the world-to-camera transform.</para>
            <para>We can define this transform with a matrix. But something said earlier gives us a
                more user-friendly mechanism. We stated that one of the properties of world space is
                that the camera itself has a position and orientation in world space. That position
                and orientation, expressed in world space, comprises the camera-to-world transform;
                do note the order: <quote>camera-to-world.</quote> We want the opposite:
                world-to-camera.</para>
            <para>The positioning is quite simple. Given the position of the camera in world space,
                the translation component of the world-to-camera matrix is the negation of that.
                This translates world space positions to be relative to the camera's position. So if
                the camera's position in world space is (3, 15, 4), then the translation component
                of the world-to-camera matrix is (-3, -15, -4).</para>
            <para>The orientation is a bit more troublesome. There are many ways to express an
                orientation. In the last tutorial, we expressed it as a rotation about an axis. For
                a camera, it is much more natural to express the orientation relative to something
                more basic: a set of directions.</para>
            <para>What a user most wants to do with a camera is look at something. So the direction
                that is dead center in camera space, that is directly along the -Z axis, is one
                direction vector. Another thing users want to do with cameras is rotate them around
                the viewing direction. So the second direction is the direction that is
                    <quote>up</quote> in camera space. In camera space, the up direction is
                +Y.</para>
            <para>We could specify a third direction, but that is unnecessary; it is implicit based
                on the other two and a single assumption. Because we want this to be a pure
                orientation matrix, the three basis vectors must be perpendicular to one another.
                Therefore, the third direction is the direction perpendicular to the other two. Of
                course, there are two vectors perpendicular to the two vectors. One goes left
                relative to the camera's orientation and the other goes right. By convention, we
                pick the direction that goes right.</para>
            <para>So we define the camera's orientation (in world space) as being the viewing
                direction and the up direction. Oftentimes, a view direction is not the most useful
                way to orient a camera; it is often useful to select a point in world space to look
                at.</para>
            <para>Therefore, we can define the camera-to-world (again, note the order) transform
                based on the camera's position in the world, a target point to look at in the world,
                and an up direction in the world. To get the world-to-camera transform, we need to
                expend some effort.</para>
            <para>For the sake of reference, here is a diagram of the full transform for vertex
                positions, from the initial attribute loaded from the buffer object, to the final
                window-space position.</para>
            <figure>
                <title>Full Vertex Transformation Pipeline</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="TransformPipeline.svg"/>
                    </imageobject>
                </mediaobject>
            </figure>
        </section>
        <section>
            <title>Aerial View</title>
            <para>The tutorial project <phrase role="propername">World Space</phrase> demonstrates
                the use of a mobile camera in a world-space scene.</para>
            <figure>
                <title>World Space Scene</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="World%20Scene.png"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The controls for this tutorial are as follows:</para>
            <table frame="all">
                <title>World Space Controls</title>
                <tgroup cols="3">
                    <colspec colname="c1" colnum="1" colwidth="1.0*"/>
                    <colspec colname="c2" colnum="2" colwidth="1.0*"/>
                    <colspec colname="c3" colnum="3" colwidth="1.0*"/>
                    <thead>
                        <row>
                            <entry>Function</entry>
                            <entry>Increase/Left</entry>
                            <entry>Decrease/Right</entry>
                        </row>
                    </thead>
                    <tbody>
                        <row>
                            <entry>Move camera target up/down</entry>
                            <entry><keycap>E</keycap></entry>
                            <entry><keycap>Q</keycap></entry>
                        </row>
                        <row>
                            <entry>Move camera target horizontally</entry>
                            <entry><keycap>A</keycap></entry>
                            <entry><keycap>D</keycap></entry>
                        </row>
                        <row>
                            <entry>Move camera target vertically</entry>
                            <entry><keycap>W</keycap></entry>
                            <entry><keycap>S</keycap></entry>
                        </row>
                        <row>
                            <entry>Rotate camera horizontally around target</entry>
                            <entry><keycap>L</keycap></entry>
                            <entry><keycap>J</keycap></entry>
                        </row>
                        <row>
                            <entry>Rotate camera vertically around target</entry>
                            <entry><keycap>I</keycap></entry>
                            <entry><keycap>K</keycap></entry>
                        </row>
                        <row>
                            <entry>Move camera towards/away from target</entry>
                            <entry><keycap>U</keycap></entry>
                            <entry><keycap>O</keycap></entry>
                        </row>
                    </tbody>
                </tgroup>
            </table>
            <para>In addition, if you hold down the shift key while pressing any of these keys, then
                the affected control will be much slower. This allows for more precision movements.
                The spacebar will toggle the appearance of an object indicating the position of the
                camera point.</para>
            <para>This world is more complicated than anything we've seen up until now. There are a
                lot of objects being rendered, and most of them are composed out of multiple
                objects.</para>
            <para>This tutorial is the first to incorporate a number of the tutorial framework's
                features. The <classname>Framework::MatrixStack</classname> class implements a
                matrix stack very much like we saw in the last tutorial. The main difference is that
                the stack class does not have public push/pop functions. To push a matrix onto the
                stack, we use a stack object, <classname>Framework::MatrixStackPusher</classname>.
                The constructor pushes the matrix and the destructor automatically pops. This way,
                we can never stack overflow or underflow.<footnote>
                    <para>This technique, using constructors and destructors to do this kind of
                        scope-bounded work, is called Resource Acquisition Is Initialization
                            (<acronym>RAII</acronym>). It is a common C++ resource management
                        technique. You can find more information about it <link
                            xlink:href="http://www.hackcraft.net/raii/">online</link>. If you are
                        unfamiliar with it, I suggest you become familiar with it.</para>
                </footnote></para>
            <para>The <classname>Framework::Mesh</classname> class is much more complicated. It
                implements mesh loading from an XML-based file format. We will discuss some of the
                functioning of this class in detail in the next section. For now, let us say that
                this class's <function>Mesh::Render</function> function is equivalent to binding a
                vertex array object, rendering with one or more <function>glDraw*</function> calls,
                and then unbinding the VAO. It expects a suitable program object to be bound to the
                context.</para>
        </section>
        <section>
            <title>Multiple Programs</title>
            <para>Speaking of suitable program objects, this will be the first tutorial that uses
                more than one program object. This is the perfect time to bring up an important
                issue.</para>
            <para>Separate programs do not share uniform locations. That is, if you call
                    <function>glGetUniformLocation</function> on one program object, it will not
                necessarily return the same value from a different program object. This is
                regardless of any other circumstance. You can declare the uniforms with the same
                name, with the same types, in the same order, but OpenGL will not
                    <emphasis>guarantee</emphasis> that you get the same uniform locations. It
                does not even guarantee that you get the same uniform locations on different
                run-through of the same executable.</para>
            <para>This means that uniform locations are local to a program object. Uniform data is
                also local to an object. For example:</para>
            <example>
                <title>Window Resizing</title>
                <programlisting language="cpp">void reshape (int w, int h)
{
    Framework::MatrixStack persMatrix;
    persMatrix.Perspective(45.0f, (h / (float)w), g_fzNear, g_fzFar);
    
    glUseProgram(UniformColor.theProgram);
    glUniformMatrix4fv(UniformColor.cameraToClipMatrixUnif, 1, GL_FALSE,
        glm::value_ptr(persMatrix.Top()));
    glUseProgram(ObjectColor.theProgram);
    glUniformMatrix4fv(ObjectColor.cameraToClipMatrixUnif, 1, GL_FALSE,
        glm::value_ptr(persMatrix.Top()));
    glUseProgram(UniformColorTint.theProgram);
    glUniformMatrix4fv(UniformColorTint.cameraToClipMatrixUnif, 1, GL_FALSE,
        glm::value_ptr(persMatrix.Top()));
    glUseProgram(0);
    
    glViewport(0, 0, (GLsizei) w, (GLsizei) h);
    glutPostRedisplay();
}</programlisting>
            </example>
            <para>Here's our new function of the window reshaping function, using the
                    <function>MatrixStack::Perspective</function> function to generate the correct
                perspective projection matrix. Notice that we must bind the 3 separate programs and
                individually update each one's uniform for the camera-to-clip matrix.</para>
        </section>
        <section>
            <title>Attributes and Programs</title>
            <para>Our three programs are made from 2 vertex shaders and 3 fragment shaders. The
                differences between these shaders is based on where they get their color information
                from.</para>
            <para>We create three programs. One that expects a per-vertex color and uses that to
                write the fragment color. One that expects a per-vertex color and multiplies that
                with a uniform color to determine the fragment color. And one that does not take a
                per-vertex color; it simply uses the uniform color as the fragment's color. All of
                these do the same positional transformation, which is a series of three matrix
                multiplications:</para>
            <example>
                <title>Position-only Vertex Shader</title>
                <programlisting language="glsl">#version 330

layout(location = 0) in vec4 position;

uniform mat4 cameraToClipMatrix;
uniform mat4 worldToCameraMatrix;
uniform mat4 modelToWorldMatrix;

void main()
{
    vec4 temp = modelToWorldMatrix * position;
    temp = worldToCameraMatrix * temp;
    gl_Position = cameraToClipMatrix * temp;
}</programlisting>
            </example>
            <sidebar>
                <title>Mismatched Attributes and Programs</title>
                <para>You may be wondering what happens if there is a mis-match between the
                    attributes provided by a VAO and the vertex shader inputs. For example, we could
                    use the position-only vertex shader with a mesh that provides attributes 0 and
                    1, with 0 being the position and 1 being the color.</para>
                <para>OpenGL is actually very lenient about this sort of thing. It also goes through
                    some effort to fully define what information the vertex shader gets in the event
                    of a mismatch.</para>
                <para>A VAO can provide attributes that a vertex shader does not use without
                    penalty. Well, there may be a performance penalty for reading unused
                    information, but it will still render correctly.</para>
                <para>If a vertex shader takes attributes that the VAO does not provide, then the
                    value the vertex shader gets will be a vector of (0, 0, 0, 1). If the vertex
                    shader input vector has fewer than 4 elements, then it fills them in in that
                    order. A vec3 input that is not provided by the VAO will be (0, 0, 0).</para>
                <para>Speaking of which, if a VAO provides more components of an attribute vector
                    than the vertex shader expects (the VAO provides 4 elements, but the vertex
                    shader input is a vec2), then the vertex shader input will be filled in as much
                    as it can be. If the reverse is true, if the VAO does not provide enough
                    components of the vector, then the unfilled values are always filled in from the
                    (0, 0, 0, 1) vector.</para>
            </sidebar>
        </section>
        <section>
            <title>Camera of the World</title>
            <para>The main rendering function implements the world-space and camera code. It begins
                by updating the world-to-camera matrix.</para>
            <example>
                <title>Upload World to Camera Matrix</title>
                <programlisting language="cpp">const glm::vec3 &amp;camPos = ResolveCamPosition();

Framework::MatrixStack camMatrix;
camMatrix.SetMatrix(CalcLookAtMatrix(camPos, g_camTarget,
    glm::vec3(0.0f, 1.0f, 0.0f)));

glUseProgram(UniformColor.theProgram);
glUniformMatrix4fv(UniformColor.worldToCameraMatrixUnif,
    1, GL_FALSE, glm::value_ptr(camMatrix.Top()));
glUseProgram(ObjectColor.theProgram);
glUniformMatrix4fv(ObjectColor.worldToCameraMatrixUnif,
    1, GL_FALSE, glm::value_ptr(camMatrix.Top()));
glUseProgram(UniformColorTint.theProgram);
glUniformMatrix4fv(UniformColorTint.worldToCameraMatrixUnif,
    1, GL_FALSE, glm::value_ptr(camMatrix.Top()));
glUseProgram(0);</programlisting>
            </example>
            <para>The function <function>ResolveCamPosition</function> computes the camera position,
                based on the user's input. <function>CalcLookAtMatrix</function> is the function
                that takes a camera position in the world, a point in the world to look at, and an
                up vector, and uses it to compute the world-to-camera matrix. We will look at that a
                bit later.</para>
            <para>Speaking of which, let's look at how <function>ResolveCamPosition</function>
                works. The basic idea of this camera system is that there is a target point, which
                is mobile. The camera's position is computed relative to this target point, so if
                the target moves, the camera will follow it perfectly.</para>
            <para>To do this, we use a special coordinate system trick. Instead of storing the
                relative position of the camera in a normal coordinate system, we instead use a
                    <glossterm>spherical coordinate system</glossterm>, also known as
                    <glossterm>polar coordinates</glossterm>.</para>
            <para>Previously, we said that a coordinate system was defined by a series of vectors
                and an origin point. This was a useful simplification of the possibilities; this is
                true of any coordinate system that follows the rules of <glossterm>Euclidean
                    geometry</glossterm>. Spherical coordinates (among many others) are
                non-Euclidean. For example, in Euclidean geometry, the sum of the angles of any
                triangle will add up to 180 degrees exactly. This is not true of spherical
                geometries or spherical coordinates. This is because <quote>lines</quote> in
                spherical geometries are curves when seen relative to Euclidean geometries.</para>
            <para>Spherical coordinates are three dimensional, so they have 3 values. One value,
                commonly given the name <quote>r</quote> (for radius) represents the distance of the
                coordinate from the center of the coordinate system. This value is on the range [0,
                ∞). The second value, called <quote>φ</quote> (phi), represents the angle in the
                elliptical plane. This value extends on the range [0, 360). The third value, called
                    <quote>θ</quote> (theta), represents the angle above and below the elliptical
                plane. This value is on the range [0, 180], where 0 means straight up and 180 means
                straight down.</para>
            <para>This is much easier to see in diagram form:</para>
            <figure>
                <title>Spherical Coordinates</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Coord_system_SZ_0.svg" />
                    </imageobject>
                </mediaobject>
            </figure>
            <para>This is a very convenient coordinate system for positioning an object around
                another object, particularly if you want to move along spheres relative to another
                object. The transformation from spherical coordinates back to Euclidean geometric
                coordinates is implemented in <function>ResolveCamPosition.</function></para>
            <example>
                <title>Spherical to Euclidean Transform</title>
                <programlisting language="cpp">glm::vec3 ResolveCamPosition()
{
    Framework::MatrixStack tempMat;
    
    float rho = Framework::DegToRad(g_sphereCamRelPos.x);
    float theta = Framework::DegToRad(g_sphereCamRelPos.y + 90.0f);
    
    float fSinTheta = sinf(theta);
    float fCosTheta = cosf(theta);
    float fCosRho = cosf(rho);
    float fSinRho = sinf(rho);
    
    glm::vec3 dirToCamera(fSinTheta * fCosRho, fCosTheta, fSinTheta * fSinRho);
    return (dirToCamera * g_sphereCamRelPos.z) + g_camTarget;
}</programlisting>
            </example>
            <para>The global variable <varname>g_sphereCamRelPos</varname> contains the spherical
                coordinates. The X value contains φ, the Y value contains θ, and the Z value is the
                radius.</para>
            <para>The Theta value used in our spherical coordinates is slightly different from the
                usual. Instead of being on the range [0, 180], it is on the range [-90, 90]; this is
                why there is an addition by 90 degrees before computing the Theta angle in
                radians.</para>
            <para>The <varname>dirToCamera</varname> is just a direction vector. Only by scaling it
                by the radius (<varname>g_sphereCamRelPos.z</varname>) do we get the full
                decomposition from spherical coordinates to Euclidean. Applying the camera target as
                an offset is what keeps the camera's position relative to the target.</para>
            <para>All of the above simply gets us a position for the camera and a location where the
                camera is looking. The matrix is computed by feeding these values into
                    <function>CalcLookAtMatrix</function>. It takes a position for the camera, a
                point in the world that the camera should be looking in, and a direction in
                world-space that should be considered <quote>up</quote> based on where the camera is
                looking.</para>
            <para>The implementation of this function is non-trivial. We will not go into detail
                explaining how it works, as it involves a lot of complex math concepts that have not
                been introduced. Using the function is is much easier than understanding how it
                works. Even so, there is one major caveat with this function (and any function of
                the like).</para>
            <para>It is very important that the <quote>up</quote> direction is not along the same
                line as the direction from the camera position to the look at target. If up is very
                close to that direction then the generated matrix is no longer valid and unpleasant
                things will happen.</para>
            <para>Since it does not make physical sense for <quote>up</quote> to be directly behind
                or in front of the viewer, it makes a degree of sense that this would likewise
                produce a nonsensical matrix. This problem usually crops up in camera systems like
                the one devised here, where the camera is facing a certain point and is rotating
                around that point, without rotating the up direction at the same time. In the case
                of this code, the up/down angle is clamped to never get high enough to cause a
                problem.</para>
        </section>
        <section>
            <title>World Rendering</title>
            <para>Once the camera matrix is computed, it is farmed out to each of the programs.
                After that, rendering is pretty simple.</para>
            <para>The meshes we have loaded for this tutorial are unit sized. That is, they are one
                unit across in their major axes. They also are usually centered at the origin in
                their local coordinate system. This make it easy to scale them to arbitrary sizes
                for any particular use.</para>
            <para>The ground is based on the unit plane mesh. This is just a square with the sides
                being unit length. This is rendered by the following code:</para>
            <example>
                <title>Draw the Ground</title>
                <programlisting language="cpp">Framework::MatrixStackPusher push(modelMatrix);

modelMatrix.Scale(glm::vec3(100.0f, 1.0f, 100.0f));

glUseProgram(UniformColor.theProgram);
glUniformMatrix4fv(UniformColor.modelToWorldMatrixUnif, 1, GL_FALSE,
    glm::value_ptr(modelMatrix.Top()));
glUniform4f(UniformColor.baseColorUnif, 0.302f, 0.416f, 0.0589f, 1.0f);
g_pPlaneMesh->Render();
glUseProgram(0);</programlisting>
            </example>
            <para>The unit plane mesh has no color attribute, so we use the
                    <varname>UniformColor</varname> program. We apply a scale matrix to the model
                stack, so that the 1x1 plane becomes 100x100 in size. After setting the color, the
                plane is rendered.</para>
            <para>All of the trees are drawn from the <function>DrawForest</function>
                function.</para>
            <example>
                <title>DrawForest Function</title>
                <programlisting language="cpp">void DrawForest(Framework::MatrixStack &amp;modelMatrix)
{
    for(int iTree = 0; iTree &lt; ARRAY_COUNT(g_forest); iTree++)
    {
        const TreeData &amp;currTree = g_forest[iTree];
        
        Framework::MatrixStackPusher push(modelMatrix);
        modelMatrix.Translate(glm::vec3(currTree.fXPos, 0.0f, currTree.fZPos));
        DrawTree(modelMatrix, currTree.fTrunkHeight, currTree.fConeHeight);
    }
}</programlisting>
            </example>
            <para>This function iterates over a large table and draws a tree for each element in
                that table. The table entries determine where in world space the tree is drawn and
                how tall it is. The location is stored as a translation in the matrix stack (after
                pushing), and the tree attributes are passed to the <function>DrawTree</function>
                function to render.</para>
            <para>The Parthenon is drawn from the <function>DrawParthenon</function> function. Since
                this draw function, like <function>DrawTree</function>, expects the matrix stack to
                transform it to its world-space position, the first step we see is applying a
                translation matrix to the stack.</para>
            <example>
                <title>Call to DrawParthenon</title>
                <programlisting language="cpp">Framework::MatrixStackPusher push(modelMatrix);
modelMatrix.Translate(glm::vec3(20.0f, 0.0f, -10.0f));

DrawParthenon(modelMatrix);</programlisting>
            </example>
            <para>The actual <function>DrawParthenon</function> function is pretty simple. It uses
                    <function>DrawColumn</function> to draw all of the columns at the various
                locations around the building. It draws scaled cubes for the base and ceiling, and
                uses the colored version of the cube for the headpiece at the front and the interior
                of the building. Columns are scaled cubes and cylinders.</para>
        </section>
        <section>
            <title>Non-World Rendering</title>
            <para>The last part of the <function>display</function> function is more interesting.
                Pressing the <keycap>Spacebar</keycap> toggles the drawing of a representation of
                the camera target point. Here is how it gets drawn:</para>
            <example>
                <title>Draw Camera Target</title>
                <programlisting language="cpp">glDisable(GL_DEPTH_TEST);
glm::mat4 idenity(1.0f);

Framework::MatrixStackPusher push(modelMatrix);

glm::vec3 cameraAimVec = g_camTarget - camPos;
modelMatrix.Translate(0.0f, 0.0, -glm::length(cameraAimVec));
modelMatrix.Scale(1.0f, 1.0f, 1.0f);

glUseProgram(ObjectColor.theProgram);
glUniformMatrix4fv(ObjectColor.modelToWorldMatrixUnif, 1, GL_FALSE,
    glm::value_ptr(modelMatrix.Top()));
glUniformMatrix4fv(ObjectColor.worldToCameraMatrixUnif, 1, GL_FALSE,
    glm::value_ptr(idenity));
g_pCubeColorMesh->Render();
glUseProgram(0);
glEnable(GL_DEPTH_TEST);</programlisting>
            </example>
            <para>The first thing that happens is that the depth test is turned off. This means that
                the camera target point will always be seen, no matter where it is. So if you move
                the target point inside the building or a tree, you will still see it. This is a
                useful technique for UI-type objects like this.</para>
            <para>The next important thing is that the world-to-camera matrix is set to identity.
                This means that the model-to-world matrix functions as a model-to-camera matrix. We
                are going back to positioning objects in front of the camera, which is what we
                actually want. The cube is translated down the -Z axis, which positions it directly
                in front of the camera. It positions the square at the same distance from the camera
                as the camera would be from the target point.</para>
            <para>For the last few tutorials, we have been building up a transformation framework
                and hierarchy. Model space to world space to camera space to clip space. But the
                important thing to remember is that this framework is only useful to you if it does
                what you want. If you need to position an object directly in front of the camera,
                then simply remove world space from the equation entirely.</para>
            <para>We could even turn the depth test back on, and the camera target would interact
                correctly with the rest of the world. It is a part of the world, even though it
                seems like it goes through a different transform pipe.</para>
            <para>Indeed, you could render part of a scene with one perspective matrix and part with
                another. This is a common technique for first-person shooter games. The main world
                is rendered with one perspective, and the part of the first-person character that is
                visible is rendered with another matrix.</para>
            <para>Do not get so caught up in <quote>the way things ought to be done</quote> that you
                forget what you could have done if you broke free of the framework. Never hold so
                tightly to one way of doing something that it prevents you from seeing how to do
                something you need to much easier. For example, we could have applied the reverse of
                the camera matrix to the model-to-world matrix. Or we could just get rid of that
                matrix altogether and make everything work very easily and simply.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut07 Primitive Drawing.html" ?>
        <title>Primitive Drawing</title>
        <para>We skipped over how the <function>Mesh::Render</function> function and mesh loading
            works. So let's cover that now.</para>
        <para>The XML-based mesh files define a number of vertex attribute arrays, followed by a
            number of rendering commands. The format fully supports all features of OpenGL,
            including options not previously discussed. One of these options deals with how vertex
            data is interpreted by OpenGL.</para>
        <para>The <function>glDraw*</function> commands, whether using indexed rendering or array
            rendering, establish a <glossterm>vertex stream.</glossterm> A vertex stream is an
            ordered list of vertices, with each vertex having a specific set of vertex attributes. A
            vertex stream is processed by the vertex shader in order.</para>
        <para>In array rendering, the order is determined by the order of the vertices in the
            attribute arrays. In indexed rendering, the order is determined by the order of the
            indices.</para>
        <para>Once the stream is processed by the vertex shader, it must be interpreted into
            something meaningful by OpenGL. Every <function>glDraw*</function> command takes, as its
            first parameter, a value that tells OpenGL how to interpret the stream. Thus far, we
            have used <literal>GL_TRIANGLES</literal>, but there are many options. This parameter is
            called the <glossterm>rendering mode</glossterm> or
            <glossterm>primitive</glossterm>.</para>
        <para>The parameter actually determines two things. The first it determines is what kind of
            things the vertex stream refers to; this is the <glossterm>primitive type</glossterm>.
            OpenGL can render points and lines in addition to triangles. These are all different
            primitive types.</para>
        <para>The other thing the parameter determines is how to interpret the vertex stream for
            that primitive type. This is the <glossterm>primitive representation</glossterm>.
                <literal>GL_TRIANGLES</literal> says more than simply that the primitive type is
            triangles.</para>
        <para>What <literal>GL_TRIANGLES</literal> means is that a vertex stream will generate
            triangles as follows: (0, 1, 2), (3, 4, 5), (6, 7, 8), …. The numbers represent vertices
            in the vertex stream, not indexed rendering indices. Among other things, this means that
            the vertex stream must have a length divisible by 3. For N vertices in the stream, this
            representation will generate N / 3 triangles.</para>
        <para>There are two other triangular primitive representations. They are both used in the
            cylinder mesh, so let's take a look at that.</para>
        <example>
            <title>Cylinder Mesh File</title>
            <programlisting language="xml">&lt;indices cmd="tri-fan" type="ushort" >0 1 3 5 7 9 11 ...&lt;/indices>
&lt;indices cmd="tri-fan" type="ushort" >61 60 58 56 54 ...&lt;/indices>
&lt;indices cmd="tri-strip" type="ushort" >1 2 3 4 5 6 7 8 ...&lt;/indices></programlisting>
        </example>
        <para>Each <quote>indices</quote> element maps to a call to
                <function>glDrawElements</function> with the given index array. The
                <quote>cmd</quote> attribute determines the primitive that will be passed to
                <function>glDrawElements</function>. The value <quote>triangles</quote> means to use
            the <literal>GL_TRIANGLES</literal> primitive.</para>
        <para>The <quote>tri-fan</quote> used above means to use the
                <literal>GL_TRIANGLE_FAN</literal> primitive. This primitive has the triangle
            primitive type, so this vertex stream will generate triangles. But it will generate them
            using a different representation.</para>
        <para><literal>GL_TRIANGLES</literal> takes each independent set of 3 vertices as a single
            triangle. <literal>GL_TRIANGLE_FAN</literal> takes the first vertex and holds on to it.
            Then, for every vertices and its next vertex, a triangle is made out of these two plus
            the initial vertex. So <literal>GL_TRIANGLE_FAN</literal> will generate triangles as
            follows: (0, 1, 2), (0, 2, 3), (0, 3, 4), …. Visually, a triangle fan looks like
            this:</para>
        <figure>
            <title>Triangle Fan</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="TriangleFan.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>The numbers represent the order that the vertices are in in the vertex stream. The red
            line shows the triangle edges that are directly specified by the vertex stream. All
            other edges are generated automatically by the primitive representation.</para>
        <para>This is why it is called a <quote>fan</quote>. The number of vertices in a triangle
            fan vertex stream must be 3 or greater, but can be otherwise any number. For N vertices
            in a stream, triangle fans will generate N-2 triangles.</para>
        <para>The cylinder mesh uses two fans to cap render the end pieces of the cylinder.</para>
        <para>The <quote>tri-strip</quote> in the cylinder mesh represents the
                <literal>GL_TRIANGLE_STRIP</literal> primitive. As the name suggests, it has a
            triangle primitive type. The primitive representation means that every 3 adjacent
            vertices will generate a triangle, in order. So strips generate triangles as follows:
            (0, 1, 2), (1, 2, 3), (2, 3, 4), …. Visually, a triangle strip looks like this:</para>
        <figure>
            <title>Triangle Strip</title>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="TriangleStrip.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Like with triangle fans, the number of vertices must be 3 or greater, but can be any
            number otherwise. For N vertices in a stream, triangle strips will generate N-2
            triangles.</para>
        <para>The cylinder mesh uses a single triangle strip to render the sides of the
            cylinder.</para>
        <formalpara>
            <title>Winding Order</title>
            <para>There is one other issue with triangle strips. This has to do with the winding
                order of the triangles.</para>
        </formalpara>
        <para>The winding order for the triangles in a strip looks like this:</para>
        <figure>
            <title>Triangle Strips with Winding Order</title>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="StripWindingOrder.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Notice how it alternates between clockwise and counter-clockwise. This means that,
            regardless of what face you consider front, and what face you cull, you'll always lose
            about half of the faces.</para>
        <para>However, OpenGL is rather intelligent about this. Triangle strips do face culling
            differently. For every second triangle, the one who's winding order is opposite from the
            first triangle's order, the winding order is considered backwards for culling
            purposes.</para>
        <para>So if you have set the front face to be clockwise, and have face culling cull
            back-facing triangles, everything will work exactly as you expect so long as the order
            of the first triangle is correct. Every even numbered triangle will be culled if it has
            a clockwise winding, and every odd numbered triangle will be culled if it has a
            counter-clockwise winding.</para>
    </section>
    <section>
        <?dbhtml filename="Tut07 Shared Uniforms.html" ?>
        <title>Shared Uniforms</title>
        <para>The <phrase role="propername">World Space</phrase> example had a few annoyances in it.
            Of particular pain was the fact that, whenever the perspective projection matrix or the
            world-to-camera matrix changed, we had to change uniforms in 3 programs. They all used
            the same value; it seems strange that we should have to go through so much trouble to
            change these uniforms.</para>
        <para>Also, 3 programs is a relatively simple case. When dealing with real examples, the
            number of programs can get quite large.</para>
        <para>There is a way to share uniforms between programs. To do this, we use a buffer object
            to store uniform data, and then tell our programs to use this particular buffer object
            to find its uniform data. A buffer object that stores uniforms is commonly called a
                <glossterm>uniform buffer object</glossterm>.</para>
        <para>It is important to understand that there is nothing special about a uniform buffer.
            Any of the things you could do with a regular buffer object can be done with a uniform
            buffer object. You can bind it to the <literal>GL_ARRAY_BUFFER</literal> and use it for
            vertex data, you can use it for indexed rendering with
                <literal>GL_ELEMENT_ARRAY_BUFFER</literal>, and many other things that buffer
            objects can be used for. Now granted, that doesn't mean that you should, only that you
            can.</para>
        <para>The example <phrase role="propername">World with UBO</phrase> uses a uniform buffer
            object to store the camera and perspective matrices.</para>
        <section>
            <title>Uniform Blocks</title>
            <para>This begins with how the vertex shaders are defined.</para>
            <example>
                <title>UBO-based Vertex Shader</title>
                <programlisting language="glsl">#version 330

layout(location = 0) in vec4 position;

layout(std140) uniform GlobalMatrices
{
    mat4 cameraToClipMatrix;
    mat4 worldToCameraMatrix;
};

uniform mat4 modelToWorldMatrix;

void main()
{
    vec4 temp = modelToWorldMatrix * position;
    temp = worldToCameraMatrix * temp;
    gl_Position = cameraToClipMatrix * temp;
}</programlisting>
            </example>
            <para>The definition of <type>GlobalMatrices</type> looks like a struct definition, but
                it is not. It defines a <glossterm>uniform block</glossterm>. A uniform block is a
                series of uniform definitions whose data is not stored in the program object, but
                instead must come from a uniform buffer.</para>
            <para>The name <type>GlobalMatrices</type> is used to identify this particular uniform
                block. This block has two members, both of the of mat4 type. The order of the
                components in a uniform block is very important.</para>
            <para>Notices that nothing else needs to change in the vertex shader. The
                    <varname>modelToWorldMatrix</varname> is unchanged, and the use of the
                components of the uniform block do not even need to be scoped with the
                    <type>GlobalMatrices</type> name.</para>
            <para>The <quote>layout(std140)</quote> part modifies the definition of the uniform
                block. Specifically, it specifies the <glossterm>uniform block
                layout</glossterm>.</para>
            <para>Buffer objects are unformatted arrays of bytes. Therefore, something must
                determine how the shader interprets a uniform buffer object's contents. OpenGL
                itself defines this to a degree, but the layout qualifier modifies the
                definition.</para>
            <para>OpenGL is very clear about how each element within a uniform block is laid out.
                Floating-point values are just the C++ representation of floats, so you can copy
                them directly from objects like <type>glm::vec4</type>.</para>
            <para>Matrices are slightly trickier due to the column-major vs. row-major issue. The
                    <function>glUniformMatrix*</function> functions all had a parameter that defines
                what order the matrix data given to the function is in. Similarly, a
                    <quote>layout</quote> qualifier can specify <quote>row-major</quote> or
                    <quote>column-major</quote>; these tell OpenGL how the matrices are stored in
                the buffer object. The default is <quote>column-major,</quote> and since GLM stores
                its matrices in column-major order, we can use the defaults.</para>
            <para>What OpenGL does not directly specify is the spacing <emphasis>between</emphasis>
                elements in the uniform block. This allows different hardware to position elements
                where it is most efficient for them. Some shader hardware can place 2
                    <type>vec3</type>'s directly adjacent to one another, so that they only take up
                6 floats. Other hardware cannot handle that, and must pad each <type>vec3</type> out
                to 4 floats.</para>
            <para>Normally, this would mean that, in order to set any values into the buffer object,
                you would have to query the program object for the byte offsets for each element in
                the uniform block.</para>
            <para>However, by using the <quote>std140</quote> layout, this is not necessary. The
                    <quote>std140</quote> layout has an explicit layout specification set down by
                OpenGL itself. It is basically a kind of lowest-common-denominator among the various
                different kinds of graphics hardware. The upside is that it allows you to easily
                know what the layout is without having to query it from OpenGL. The downside is that
                some space-saving optimizations may not be possible on certain hardware.</para>
            <para>One additional feature of <quote>std140</quote> is that the uniform block is
                sharable. Normally, OpenGL allows the GLSL compiler considerable leeway to make
                optimizations. In this instance, if a GLSL compiler detects that a uniform is unused
                in a program, it is allowed to mark it as unused.
                    <function>glGetUniformLocation</function> will return -1. It's actually legal to
                set a value to a location that is -1, but no data will actually be set.</para>
            <para>If a uniform block is marked with the <quote>std140</quote> layout, then the
                ability to disable uniforms in within that block is entirely removed. All uniforms
                must have storage, even if this particular program does not use them. This means
                that, as long as you declare the same uniforms in the same order within a block, the
                storage for that uniform block will have the same layout in <emphasis>any</emphasis>
                program. This means that multiple different programs can use the same uniform
                buffer.</para>
            <para>The other two alternatives to <quote>std140</quote> are <quote>packed</quote> and
                    <quote>shared</quote>. The default, <quote>shared,</quote> prevents the uniform
                optimization, thus allowing the block's uniform buffer data to be shared among
                multiple programs. However, the user must still query layout information about where
                each uniform is stored. <quote>packed</quote> allows uniform optimization, so these
                blocks cannot be shared between programs at all.</para>
            <para>For our needs, <quote>std140</quote> is sufficient. It's also a good first step in
                any implementation; moving to <quote>packed</quote> or <quote>shared</quote> as
                needed should generally be done only as an optimization. The rules for the
                    <quote>std140</quote> layout are spelled out explicitly in the OpenGL
                Specification.</para>
        </section>
        <section>
            <title>Uniform Block Indices</title>
            <para>Uniforms inside a uniform block do not have individual uniform locations. After
                all, they do not have storage within a program object; their data comes from a buffer
                object.</para>
            <para>So instead of calling glGetUniformLocation, we have a new function.</para>
            <programlisting language="cpp">data.globalUniformBlockIndex =
    glGetUniformBlockIndex(data.theProgram, "GlobalMatrices");</programlisting>
            <para>The function <function>glGetUniformBlockIndex</function> takes a program object
                and the name of a uniform block. It returns a <glossterm>uniform block
                    index</glossterm> that is used to refer to this uniform block. This is similar
                to how a uniform location value is used to refer to a uniform, rather than directly
                using its string name.</para>
        </section>
        <section>
            <title>Uniform Buffer Creation</title>
            <para>Now that the programs have a uniform block, we need to create a buffer object to
                store our uniforms in.</para>
            <example>
                <title>Uniform Buffer Creation</title>
                <programlisting language="cpp">glGenBuffers(1, &amp;g_GlobalMatricesUBO);
glBindBuffer(GL_UNIFORM_BUFFER, g_GlobalMatricesUBO);
glBufferData(GL_UNIFORM_BUFFER, sizeof(glm::mat4) * 2, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);</programlisting>
            </example>
            <para>For all intents and purposes, this is identical to the way we created other buffer
                objects. The only difference is the use of the <literal>GL_UNIFORM_BUFFER</literal>
                binding target.</para>
            <para>The <literal>GL_ARRAY_BUFFER</literal> target has a specific meaning. When
                something is bound to that target, calling
                    <function>glVertexAttribPointer</function> will cause the buffer object bound to
                that target to become the source for that particular attribute, as defined by the
                function call. The <literal>GL_ELEMENT_ARRAY_BUFFER</literal> target also has a
                meaning; it specifies where indices come from for indexed rendering. The element
                array binding is even stored as part of a VAO's data (recall that the array buffer
                binding is <emphasis>not</emphasis> stored in the VAO).</para>
            <para><literal>GL_UNIFORM_BUFFER</literal> does not really have an intrinsic meaning
                like these other two. Having something bound to this binding means nothing as far as
                any other function of OpenGL is concerned. Oh, you can call buffer object functions
                on it, like glBufferData as above. But it does not have any other role to play in
                rendering. The main reason to use it is to preserve the contents of more useful
                binding points. It also communicates to someone reading your code that this buffer
                object is going to be used to store uniform data.</para>
            <note>
                <para>This is not entirely 100% correct. OpenGL is technically allowed to infer
                    something about your intended use of a buffer object based on what target you
                        <emphasis>first</emphasis> use to bind it. So by allocating storage for this
                    buffer in <literal>GL_UNIFORM_BUFFER</literal>, we are signaling something to
                    OpenGL, which can change how it allocates storage for the buffer.</para>
                <para>However, OpenGL is <emphasis>not</emphasis> allowed to make any behavioral
                    changes based on this. It is still legal to use a buffer allocated on the
                        <literal>GL_UNIFORM_BUFFER</literal> target as a
                        <literal>GL_ARRAY_BUFFER</literal> or in any other buffer object usage. It
                    just may not be as fast as you might want.</para>
            </note>
            <para>We know that the size of this buffer needs to be two <type>glm::mat4</type>'s in
                size. The <quote>std140</quote> layout guarantees this. That and the size of
                    <type>glm::mat4</type>, which just so happens to correspond to how large a GLSL
                    <type>mat4</type> is when stored in a uniform buffer.</para>
            <para>The <function>reshape</function> function is guaranteed to be called after our
                    <function>init</function> function. That's why we can allocate this buffer
                without filling in a default matrix. The reshape function is as follows:</para>
            <example>
                <title>UBO-based Perspective Matrix</title>
                <programlisting language="cpp">void reshape (int w, int h)
{
    Framework::MatrixStack persMatrix;
    persMatrix.Perspective(45.0f, (h / (float)w), g_fzNear, g_fzFar);
    
    glBindBuffer(GL_UNIFORM_BUFFER, g_GlobalMatricesUBO);
    glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(glm::mat4),
        glm::value_ptr(persMatrix.Top()));
    glBindBuffer(GL_UNIFORM_BUFFER, 0);
    
    glViewport(0, 0, (GLsizei) w, (GLsizei) h);
    glutPostRedisplay();
}</programlisting>
            </example>
            <para>This function just uses <function>glBufferSubData</function> to upload the matrix
                data to the buffer object. Since we defined the perspective matrix as the first
                matrix in our uniform block, it is uploaded to byte 0.</para>
            <para>The <function>display</function> function is what uploads the world-to-camera
                matrix to the buffer object. It is quite similar to what it used to be:</para>
            <example>
                <title>UBO-based Camera Matrix</title>
                <programlisting language="cpp">const glm::vec3 &amp;camPos = ResolveCamPosition();

Framework::MatrixStack camMatrix;
camMatrix.SetMatrix(CalcLookAtMatrix(camPos, g_camTarget, glm::vec3(0.0f, 1.0f, 0.0f)));

glBindBuffer(GL_UNIFORM_BUFFER, g_GlobalMatricesUBO);
glBufferSubData(GL_UNIFORM_BUFFER, sizeof(glm::mat4), sizeof(glm::mat4),
    glm::value_ptr(camMatrix.Top()));
glBindBuffer(GL_UNIFORM_BUFFER, 0);</programlisting>
            </example>
            <para>The world-to-camera matrix is the second matrix, so we start the upload at the end
                of the previous matrix.</para>
        </section>
        <section>
            <title>Uniform Buffer Binding</title>
            <para>Thus far, we have a uniform buffer object into which we store matrices. And we
                have a program that has a uniform block that needs a uniform buffer to get its
                uniforms for. Now, the final step is to create the association between the uniform
                block in the programs and the uniform buffer object itself.</para>
            <para>Your first thought might be that there would be a function like glUniformBuffer
                that takes a program, a uniform block index, and the uniform buffer to associate
                that block with. But this is not the case; attaching a uniform buffer to a program's
                block is more complicated. And this is a good thing if you think about it.</para>
            <para>It works like this. The OpenGL context (effectively a giant struct containing each
                piece of data used to render) has an array of <glossterm>uniform buffer binding
                    points</glossterm>. A buffer object can be bound to each of these binding
                points. For each uniform block in a program, there is a reference, not to a buffer
                object, but to one of these uniform buffer binding points. This reference is just a
                numerical index: 0, 1, 2, etc.</para>
            <para>A diagram should make it clearer:</para>
            <figure>
                <title>Uniform Buffer and Block Binding Points</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="UBOContextBinding.svg" />
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The program object is given an index that represents one of the slots in the
                context. The uniform buffer is bound to one of those slots. Therefore, when you
                render with that program, the uniform buffer that is in the slot specified by the
                program will be where the program gets its uniform data from.</para>
            <para>Therefore, to use a uniform buffer, one first must tell the program object which
                binding point in the context to find the buffer. This association is made with the
                    <function>glUniformBlockBinding</function> function.</para>
            <programlisting language="cpp">glUniformBlockBinding(data.theProgram, data.globalUniformBlockIndex,
    g_iGlobalMatricesBindingIndex);</programlisting>
            <para>The first parameter is the program, the second is the uniform block index queried
                before. The third is the uniform buffer binding point that this block should
                use.</para>
            <para>After doing this for each program, the uniform buffer must be bound to that
                binding point. This is done with a new function,
                    <function>glBindBufferRange</function>.</para>
            <programlisting language="cpp">glBindBufferRange(GL_UNIFORM_BUFFER, g_iGlobalMatricesBindingIndex,
    g_GlobalMatricesUBO, 0, sizeof(glm::mat4) * 2);</programlisting>
            <para>This functions similarly to <function>glBindBuffer</function>; in addition to
                binding the buffer to the <literal>GL_UNIFORM_BUFFER</literal> target, it also binds
                the buffer to the given uniform buffer binding point. Lastly, it provides an offset
                and range, the last two parameters. This allows you to put uniform data in arbitrary
                places in a buffer object. You could have the uniform data for
                    <emphasis>several</emphasis> uniform blocks in several programs all in one
                buffer object. The range parameters would be how to say where that block's data
                begins and how big it is.</para>
            <para>The reason this is better than directly binding a buffer object to the program
                object can be seen in exactly where all of this happens. Both of these functions are
                called as part of initialization code. <function>glUniformBlockBinding</function> is
                called right after creating the program, and similarly
                    <function>glBindBufferRange</function> is called immediately after creating the
                buffer object.</para>
            <para>The global constant <varname>g_iGlobalMatricesBindingIndex</varname> is, as the
                name suggests, global. By convention, all programs get their buffer data from this
                index. If you want to bind a different buffer to the global matrix binding, you only
                need to bind that buffer object to that binding index. You do not need to change any
                state in any program object.</para>
            <para>This means that you can establish conventions about where certain kinds of uniform
                blocks are stored among the list of context binding points. You do not need to
                change each program to affect them all.</para>
        </section>
        <section>
            <title>The Viewpoint</title>
            <para>In the <phrase role="propername">World Space</phrase> example, we drew the
                camera's look-at target directly in camera space, bypassing the world-to-camera
                matrix. Doing that with uniform buffers would be harder, since we would have to set
                the uniform buffer value twice in the same draw call. This is not particularly
                difficult, but it could be a drain on performance.</para>
            <para>Instead, we just use the camera's target position to compute a model-to-world
                matrix that always positions the object at the target point.</para>
            <example>
                <title>Viewing Point with UBO</title>
                <programlisting language="cpp">glDisable(GL_DEPTH_TEST);

Framework::MatrixStackPusher push(modelMatrix);

modelMatrix.Translate(g_camTarget);
modelMatrix.Scale(1.0f, 1.0f, 1.0f);

glUseProgram(ObjectColor.theProgram);
glUniformMatrix4fv(ObjectColor.modelToWorldMatrixUnif, 1,
    GL_FALSE, glm::value_ptr(modelMatrix.Top()));
g_pCubeColorMesh->Render();
glUseProgram(0);
glEnable(GL_DEPTH_TEST);</programlisting>
            </example>
            <para>We do not get the neat effect of having the object always face the camera though.
                We still shut off the depth test, so that we can always see the object.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut07 The Perils of World Space.html" ?>
        <title>The Perils of World Space</title>
        <para>World space is a very useful intermediary between camera space and model space. It
            makes it easy to position cameras and so forth. But there is a lingering issue when
            dealing with world space directly. Namely, the problem of large worlds and numerical
            precision.</para>
        <para>Let us say that you're trying to model a very large area down to fairly small
            accuracy. Your units are inches, and you want precision to within 0.25 inches. You want
            to cover an area with a radius of 1,000 miles, or 63,360,000 inches.</para>
        <para>Let us also say that the various pieces of this world all have their own model spaces
            and are transformed into their appropriate positions via a model-to-world transformation
            matrix. So the world is assembled out of various parts. This is almost always true to
            some degree.</para>
        <para>Let us also say that, while you do have a large world, you are not concerned about
            rendering <emphasis>all</emphasis> of it at any one time. The part of the world you're
            interested in is the part within view from the camera. And you're not interested in
            viewing incredibly distant objects; the far depth plane is going to cull out the world
            beyond a certain point from the camera.</para>
        <para>The problem is that a 32-bit floating-point number can only hold about 7 digits of
            precision. So towards the edges of the world, at around 63,000,000 inches, you only have
            a precision out to about ±10 inches at best. This means that vertex positions closer
            than this will not be distinct from one another. Since your world is modeled down to
            0.25 inches of precision, this is a substantial problem. Indeed, even if you go out to
            6,000,000 inches, ten-times closer to the middle, you still have only ±1 inch, which is
            greater than the tolerance you need.</para>
        <para>One solution, if you have access to powerful hardware capable of OpenGL 4.0 or better,
            is to use double-precision floating point values for your matrices and shader values.
            Double-precision floats, 64-bits in size, give you about 16 digits of precision, which
            is enough to measure the size of atoms in inches at more than 60 miles away from the
            origin.</para>
        <para>However, you would be sacrificing a lot of performance to do this. Even though the
            hardware <emphasis>can</emphasis> do double-precision math, it loses about 50% of its
            speed in doing so. And why bother, when the real solution is much easier?</para>
        <para>Let's look at our shader again.</para>
        <programlisting language="glsl">#version 330

layout(location = 0) in vec4 position;

uniform mat4 cameraToClipMatrix;
uniform mat4 worldToCameraMatrix;
uniform mat4 modelToWorldMatrix;

void main()
{
    vec4 worldPos = modelToWorldMatrix * position;
    vec4 cameraPos = worldToCameraMatrix * worldPos;
    gl_Position = cameraToClipMatrix * cameraPos;
}</programlisting>
        <para>The <varname>position</varname> is relatively close to the origin, since model
            coordinates tend to be close to the model space origin. So you have plenty of
            floating-point precision there. The <varname>cameraPos</varname> value is also close to
            the origin. Remember, the camera in camera space is <emphasis>at</emphasis> the origin.
            The world-to-camera matrix simply transforms the world to the camera's position. And as
            stated before, the only parts of the world that we are interested in seeing are the
            parts close to the camera. So there's quite a bit of precision available in
                <varname>cameraPos</varname>.</para>
        <para>And in <varname>gl_Position</varname>, everything is in clip-space, which is again
            relative to the camera. While you can have depth buffer precision problems, that only
            happens at far distances from the near plane. Again, since everything is relative to the
            camera, there is no precision problem.</para>
        <para>The only precision problem is with <varname>worldPos</varname>. Or rather, in the
                <varname>modelToWorldMatrix</varname>.</para>
        <para>Think about what <varname>modelToWorldMatrix</varname> and
                <varname>worldToCameraMatrix</varname> must look like regardless of the precision of
            the values. The model to world transform would have a massive translational component.
            We're moving from model space, which is close to the origin, to world-space which is far
            away. However, almost all of that will be immediately <emphasis>negated</emphasis>,
            because everything you're drawing is close to the camera. The camera matrix will have
            another massive translational component, since the camera is also far from the
            origin.</para>
        <para>This means that, if you <emphasis>combined</emphasis> the two matrices into one, you
            would have one matrix with a relatively small translation component. Therefore, you
            would not have a precision problem.</para>
        <para>Now, 32-bit floats on the CPU are no more precise than on the GPU. However, on the CPU
            you are guaranteed to be able to do double-precision math. And while it is slower than
            single-precision math, the CPU is not doing as many computations. You are not doing
            vector/matrix multiplies per vertex; you're doing them per <emphasis>object</emphasis>.
            And since the final result would actually fit within 32-bit precision limitations, the
            solution is obvious.</para>
        <para>The take-home point is this: avoid presenting OpenGL with an explicit model-to-world
            matrix. Instead, generate a matrix that goes straight from model space to
                <emphasis>camera</emphasis> space. You can use double-precision computations to do
            this if you need to; simply transform them back to single-precision when uploading the
            matrices to OpenGL.</para>
    </section>
    <section>
        <?dbhtml filename="Tut07 In Review.html" ?>
        <title>In Review</title>
        <para>In this tutorial, you have learned the following:</para>
        <itemizedlist>
            <listitem>
                <para>World space is an intermediate space between model space and camera space. All
                    objects are transformed into it, and the position/orientation of the camera is
                    specified relative to it.</para>
            </listitem>
            <listitem>
                <para>OpenGL can processes a sequence of vertex data as triangles in different ways.
                    It can process the vertices as a list of triangles, a triangle fan, or a
                    triangle strip. Each of these has its own way of building triangles from a
                    sequence of vertices.</para>
            </listitem>
            <listitem>
                <para>Uniform data can be stored in buffer objects, so that multiple programs can
                    share the same uniform. Changing the buffer object data will automatically
                    change the data the programs get.</para>
            </listitem>
            <listitem>
                <para>It is usually not a good idea to have vertex positions in an explicit world
                    space. Doing so can lead to numerical precision problems if the vertex positions
                    are sufficiently far from 0.</para>
            </listitem>
        </itemizedlist>
        <section>
            <title>Further Study</title>
            <para>Play around with the world space tutorials in the following ways:</para>
            <itemizedlist>
                <listitem>
                    <para>In the World Space tutorial, we use 3 matrices. This requires an extra
                        matrix multiply, which is a wasteful per-vertex cost. Fold the camera matrix
                        into the perspective transformation matrix, so that only two matrices are
                        used. Any time parameters change to one matrix, make sure to recompute both
                        and combine them together before uploading the combined world-to-clip-space
                        matrix.</para>
                </listitem>
                <listitem>
                    <para>Instead of folding the world-to-camera transform into the perspective
                        matrix, fold it into the model-to-world matrix instead. Simply push it onto
                        the same stack as everything else. The function MatrixStack::ApplyMatrix can
                        right-multiply an arbitrary matrix with the current matrix.</para>
                </listitem>
            </itemizedlist>
        </section>
        <section>
            <title>OpenGL Functions of Note</title>
            <glosslist>
                <glossentry>
                    <glossterm>glGetUniformBlockIndex</glossterm>
                    <glossdef>
                        <para>Retrieves the uniform block index for a particular uniform block name
                            from a program.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glUniformBlockBinding</glossterm>
                    <glossdef>
                        <para>Sets the uniform buffer binding index used by a particular uniform
                            block in a given program.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glBindBufferRange</glossterm>
                    <glossdef>
                        <para>Binds a buffer object to a particular indexed location, as well as
                            binding it to the given. When used with GL_UNIFORM_BUFFER, it binds the
                            buffer object to a particular uniform buffer binding point. It has range
                            parameters that can be used to effectively bind part of the buffer
                            object to an indexed location.</para>
                    </glossdef>
                </glossentry>
            </glosslist>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut07 Glossary.html" ?>
        <title>Glossary</title>
        <glosslist>
            <glossentry>
                <glossterm>world space</glossterm>
                <glossdef>
                    <para>An intermediate space between model space and camera space. Conceptually,
                        all objects are transformed into this space along the transformation
                        chain.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>spherical coordinate system, polar coordinates</glossterm>
                <glossdef>
                    <para>A three dimensional coordinate system where the three coordinates are not
                        defined by 3 values multiplied by vectors, but by two angles and a radius.
                        One angle specifies rotation around a point in a known plane. The other
                        angle specifies rotation above and below this plane. And the radius
                        specifies the distance from the origin. Spherical coordinates are not a
                        Euclidean geometry.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>Euclidean geometry</glossterm>
                <glossdef>
                    <para>A specific kind of coordinate system that follows certain axioms. For the
                        sake of brevity, consider it a <quote>regular</quote> coordinate system, one
                        that follows the simple, obvious rules one might expect of a 2D sheet of
                        paper.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>vertex stream</glossterm>
                <glossdef>
                    <para>An ordered sequence of vertices given to OpenGL by one of the
                            <function>glDraw*</function> series of functions.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>primitive, rendering mode</glossterm>
                <glossdef>
                    <para>The mechanism used by OpenGL for interpreting and rendering a vertex
                        stream. Every <function>glDraw*</function> function takes a rendering mode
                        as its first parameter. The primitive mode defines two things: the primitive
                        type and the primitive representation.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>primitive type</glossterm>
                <glossdef>
                    <para>The kind of object that OpenGL will draw with a vertex stream. OpenGL
                        draws triangles, but it can also draw points or other things.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>primitive representation</glossterm>
                <glossdef>
                    <para>The way the vertex stream is converted into one or more of the primitive
                        type. Each primitive type consumes a number of vertices; the primitive
                        representation specifies the manor in which the stream of length N is
                        converted into a number M of primitives.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>uniform buffer object</glossterm>
                <glossdef>
                    <para>A buffer object used to store data for uniform blocks.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>uniform block</glossterm>
                <glossdef>
                    <para>A named set of uniform definitions. This set of uniforms is not stored in
                        the program object, but instead is taken from a buffer object bound to a
                        buffer object binding point in the OpenGL rendering context.</para>
                    <para>There is a limit on the number of uniform blocks a single program object
                        can use. There is also a per-shader stage limit as well.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>uniform block layout</glossterm>
                <glossdef>
                    <para>The way a uniform block is layed out by the GLSL compiler. This determines
                        whether uniform blocks can be shared with other programs, and whether the
                        user needs to query the location of each uniform within the block.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>uniform block index</glossterm>
                <glossdef>
                    <para>A number, queried from a program object, that represents a particular
                        uniform block. This number is used to refer to the uniform block in other
                        functions.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>uniform buffer binding points</glossterm>
                <glossdef>
                    <para>An array of locations in the OpenGL context where uniform buffers can be
                        bound to. Programs can have their uniform blocks associated with one of the
                        entries in this array. When using such a program, it will use the buffer
                        object bound to that location to find the data for that uniform
                        block.</para>
                </glossdef>
            </glossentry>
        </glosslist>
    </section>
</chapter>
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.