Source

gltut / Documents / Positioning / Tutorial 05.xml

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
<?xml version="1.0" encoding="UTF-8"?>
<?oxygen RNGSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng" type="xml"?>
<?oxygen SCHSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng"?>
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
    <?dbhtml filename="Tutorial 05.html" ?>
    <title>Objects in Depth</title>
    <para>In this tutorial, we will look at how to deal with rendering multiple objects, as well as
        what happens when multiple objects overlap.</para>
    <section>
        <title>Multiple Objects in OpenGL</title>
        <para>The first step in looking at what happens when objects overlap is to draw more than
            one object. This is an opportunity to talk about a concept that will be useful in the
            future.</para>
        <para>An object, in terms of what you draw, can be considered the results of a single
            drawing call. Thus, an object is the smallest series of triangles that you draw with a
            single set of program object state.</para>
        <section>
            <title>Vertex Array Objects</title>
            <para>Up until now, every time we have attempted to draw anything, we needed to do
                certain setup work before the draw call. In particular, we have to do the following,
                for <emphasis>each</emphasis> vertex attribute used by the vertex shader:</para>
            <orderedlist>
                <listitem>
                    <para>Use <function>glEnableVertexAttribArray</function> to enable this
                        attribute.</para>
                </listitem>
                <listitem>
                    <para>Use <function>glBindBuffer</function>(<literal>GL_ARRAY_BUFFER</literal>)
                        to bind to the context the buffer object that contains the data for this
                        attribute.</para>
                </listitem>
                <listitem>
                    <para>Use <function>glVertexAttribPointer</function> to define the format of the
                        data for the attribute within the buffer object previously bound to
                            <literal>GL_ARRAY_BUFFER</literal>.</para>
                </listitem>
            </orderedlist>
            <para>The more attributes you have, the more work you need to do for each object. To
                alleviate this burden, OpenGL provides an object that stores all of the state needed
                for rendering: the <glossterm>Vertex Array Object</glossterm>
                    (<acronym>VAO</acronym>).</para>
            <para>VAOs are created with the <function>glGenVertexArray</function> function. This
                works like <function>glGenBuffers</function> (and like most other OpenGL objects);
                you can create multiple objects with one call. As before, the objects are
                    <type>GLuint</type>s.</para>
            <para>VAOs are bound to the context with <function>glBindVertexArray</function>; this
                function doesn't take a target the way that <function>glBindBuffer</function> does.
                It only takes the VAO to bind to the context.</para>
            <para>Once the VAO is bound, calls to certain functions change the data in the bound
                VAO. Technically, they <emphasis>always</emphasis> have changed the VAO's state; all
                of the prior tutorials have these lines in the initialization function:</para>
            <programlisting language="cpp"><![CDATA[glGenVertexArrays(1, &vao);
glBindVertexArray(vao);]]></programlisting>
            <para>This creates a single VAO, which contains the vertex array state that we have been
                setting. This means that we have been changing the state of a VAO in all of the
                tutorials. We just didn't talk about it at the time.</para>
            <para>The following functions change VAO state. Therefore, if no VAO is bound to the
                context (if you call <function>glBindVertexArray(0)</function> or you do not bind a
                VAO at all), all of these functions, except as noted, will fail.</para>
            <itemizedlist>
                <listitem>
                    <para><function>glVertexAttribPointer</function>. Also
                            <function>glVertexAttribIPointer</function>, but we haven't talked about
                        that one yet.</para>
                </listitem>
                <listitem>
                    <para><function>glEnableVertexAttribArray</function>/<function>glDisableVertexAttribArray</function></para>
                </listitem>
                <listitem>
                    <para><function>glBindBuffer</function>(<literal>GL_ELEMENT_ARRAY_BUFFER</literal>):
                        Calling this without a VAO bound will not fail.</para>
                </listitem>
            </itemizedlist>
            <sidebar>
                <title>Buffer Binding and Attribute Association</title>
                <para>You may notice that
                        <function>glBindBuffer</function>(<literal>GL_ARRAY_BUFFER</literal>) is not
                    on that list, even though it is part of the attribute setup for rendering. The
                    binding to <literal>GL_ARRAY_BUFFER</literal> is not part of a VAO because the
                    association between a buffer object and a vertex attribute does
                        <emphasis>not</emphasis> happen when you call
                        <function>glBindBuffer</function>(<literal>GL_ARRAY_BUFFER</literal>). This
                    association happens when you call
                    <function>glVertexAttribPointer</function>.</para>
                <para>When you call <function>glVertexAttribPointer</function>, OpenGL takes
                    whatever buffer is <emphasis>at the moment of this call</emphasis> bound to
                        <literal>GL_ARRAY_BUFFER</literal> and associates it with the given vertex
                    attribute. Think of the <literal>GL_ARRAY_BUFFER</literal> binding as a global
                    pointer that <function>glVertexAttribPointer</function> reads. So you are free
                    to bind whatever you want or nothing at all to
                        <literal>GL_ARRAY_BUFFER</literal>
                    <emphasis>after</emphasis> making a <function>glVertexAttribPointer</function>
                    call; it will affect <emphasis>nothing</emphasis> in the final rendering. So
                    VAOs do store which buffer objects are associated with which attributes; but
                    they do not store the <literal>GL_ARRAY_BUFFER</literal> binding itself.</para>
                <para>If you want to know why <function>glVertexAttribPointer</function> doesn't
                    simply take a buffer object rather than requiring this bind+call mechanism, it
                    is again because of legacy API cruft. When buffer objects were first introduced,
                    they were designed to impact the API as little as possible. So the old
                        <function>glVertexAttribPointer</function> simply changed its behavior
                    depending on whether something was bound to <literal>GL_ARRAY_BUFFER</literal>
                    or not. Nowadays, since this function will fail if nothing is bound to
                        <literal>GL_ARRAY_BUFFER</literal>, it is simply an annoyance.</para>
            </sidebar>
            <para>This allows you to setup a VAO early on, during initialization, and then simply
                bind it and call a rendering function to draw your object. Be advised when using a
                VAO in this way: VAOs are <emphasis>not</emphasis> immutable. Calling any of the
                above functions will change the data stored in the VAO.</para>
        </section>
        <section>
            <title>Indexed Drawing</title>
            <para>In the last tutorial, we drew a rectangular prism. If you looked carefully at the
                vertex data, you may have noticed that a lot of vertex data was frequently repeated.
                To draw one face of the cube, we were required to have 6 vertices; the two shared
                vertices (along the shared line between the two triangles) had to be in the buffer
                object twice.</para>
            <para>For a simple case like ours, this is only a minor increase in the size of the
                vertex data. The compact form of the vertex data could be 4 vertices per face, or 24
                vertices total, while the expanded version we used took 36 total vertices. However,
                when looking at real meshes, like human-like characters and so forth that have
                thousands if not millions of vertices, sharing vertices becomes a major benefit in
                both performance and memory size. Removing duplicate data can shrink the size of the
                vertex data by 2x or greater in many cases.</para>
            <para>In order to remove this extraneous data, we must perform <glossterm>indexed
                    drawing</glossterm>, rather than the <glossterm>array drawing</glossterm> we
                have been doing up until now. In an earlier tutorial, we defined glDrawArrays
                conceptually as the following pseudo-code:</para>
            <example>
                <title>Draw Arrays Implementation</title>
                <programlisting language="cpp"><![CDATA[void glDrawArrays(GLenum type, GLint start, GLint count)
{
    for(GLint element = start; element < start + count; element++)
    {
        VertexShader(positionAttribArray[element], colorAttribArray[element]);
    }
}]]></programlisting>
            </example>
            <para>This defines how <glossterm>array drawing</glossterm> works. You start with a
                particular index into the buffers, defined by the <varname>start</varname>
                parameter, and proceed forward by <varname>count</varname> vertices.</para>
            <para>In order to share attribute data between multiple triangles, we need some way to
                random-access the attribute arrays, rather than sequentially accessing them. This is
                done with an <glossterm>element array</glossterm>, also known as an <glossterm>index
                    array</glossterm>.</para>
            <para>Let's assume you have the following attribute array data:</para>
            <programlisting>  Position Array:  Pos0, Pos1, Pos2, Pos3
  Color Array:     Clr0, Clr1, Clr2, Clr3</programlisting>
            <para>You can use <function>glDrawArrays</function> to render either the first 3
                vertices as a triangle, or the last 3 vertices as a triangle (using a
                    <varname>start</varname> of 1 and <varname>count</varname> of 3). However, with
                the right element array, you can render 4 triangles from just these 4
                vertices:</para>
            <programlisting>  Element Array: 0, 1, 2,  0, 2, 3,  0, 3, 1,  1, 2, 3</programlisting>
            <para>This will cause OpenGL to generate the following sequence of vertices:</para>
            <programlisting>  (Pos0, Clr0), (Pos1, Clr1), (Pos2, Clr2),
  (Pos0, Clr0), (Pos2, Clr2), (Pos3, Clr3),
  (Pos0, Clr0), (Pos3, Clr3), (Pos1, Clr1),
  (Pos1, Clr1), (Pos2, Clr2), (Pos3, Clr3),</programlisting>
            <para>12 vertices, which generate 4 triangles.</para>
            <sidebar>
                <title>Multiple Attributes and Index Arrays</title>
                <para>There is only <emphasis>one</emphasis> element array, and the indices fetched
                    from the array are used for <emphasis>all</emphasis> attributes of the vertex
                    arrays. So you cannot have an element array for positions and a separate one for
                    colors; they all have to use the same element array.</para>
                <para>This means that there can and often will be some duplication within a
                    particular attribute array. For example, in order to have solid face colors, we
                    will still have to replicate the color for every position of that triangle. And
                    corner positions that are shared between two triangles that have different
                    colors will still have to be duplicated in different vertices.</para>
                <para>It turns out that, for most meshes, duplication of this sort is fairly rare.
                    Most meshes are smooth across their surface, so different attributes don't
                    generally pop from location to location. Shared edges typically use the same
                    attributes for both triangles along the edges. The simple cubes and the like
                    that we use are one of the few cases where a per-attribute index would have a
                    significant benefit.</para>
            </sidebar>
            <para>Now that we understand how indexed drawing works, we need to know how to set it up
                in OpenGL. Indexed drawing requires two things: a properly-constructed element array
                and using a new drawing command to do the indexed drawing.</para>
            <para>Element arrays, as you might guess, are stored in buffer objects. They have a
                special buffer object binding point, <literal>GL_ELEMENT_ARRAY_BUFFER</literal>. You
                can use this buffer binding point for normal maintenance of a buffer object
                (allocating memory with glBufferData, etc), just like
                    <literal>GL_ARRAY_BUFFER</literal>. But it also has a special meaning to OpenGL:
                indexed drawing is only possible when a buffer object is bound to this binding
                point, and the element array comes from this buffer object.</para>
            <note>
                <para>All buffer objects in OpenGL are the same, regardless of what target they are
                    bound to; buffer objects can be bound to multiple targets. So it is perfectly
                    legal to use the same buffer object to store vertex attributes and element
                    arrays (and, FYI, any data for any other use of buffer objects that exists in
                    OpenGL). Obviously, the different data would be in separate regions of the
                    buffer.</para>
            </note>
            <para>In order to do indexed drawing, we must bind the buffer to
                    <literal>GL_ELEMENT_ARRAY_BUFFER</literal> and then call
                    <function>glDrawElements</function>.</para>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>void <function>glDrawElements</function></funcdef>
                    <paramdef>GLenum <parameter>mode</parameter></paramdef>
                    <paramdef>GLsizei <parameter>count</parameter></paramdef>
                    <paramdef>GLenum <parameter>type</parameter></paramdef>
                    <paramdef>GLsizeiptr <parameter>indices</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>The first parameter is the same as the first parameter of glDrawArrays. The
                    <parameter>count</parameter> parameter defines how many indices will be pulled
                from the element array. The <parameter>type</parameter> field defines what the basic
                type of the indices in the element array are. For example, if the indices are stored
                as 16-bit unsigned shorts (GLushort), then this field should be
                    <literal>GL_UNSIGNED_SHORT</literal>. This allows the user the freedom to use
                whatever size of index they want. <literal>GL_UNSIGNED_BYTE</literal> and
                    <literal>GL_UNSIGNED_INT</literal> (32-bit) are also allowed; indices must be
                unsigned.</para>
            <para>The last parameter is the byte-offset into the element array at which the index
                data begins. Index data (and vertex data, for that matter) should always be aligned
                to its size. So if we are using 16-bit unsigned shorts for indices, then
                    <parameter>indices</parameter> should be an even number.</para>
            <para>This function can be defined by the following pseudo-code:</para>
            <example>
                <title>Draw Elements Implementation</title>
                <programlisting language="cpp"><![CDATA[GLvoid *elementArray;

void glDrawElements(GLenum type, GLint count, GLenum type, GLsizeiptr indices)
{
    GLtype *ourElementArray = (type*)((GLbyte *)elementArray + indices);

    for(GLint elementIndex = 0; elementIndex < count; elementIndex++)
    {
        GLint element = ourElementArray[elementIndex];
        VertexShader(positionAttribArray[element], colorAttribArray[element]);
    }
}]]></programlisting>
            </example>
            <para>The <varname>elementArray</varname> represents the buffer object bound to
                    <literal>GL_ELEMENT_ARRAY_BUFFER</literal>.</para>
        </section>
        <section>
            <title>Multiple Objects</title>
            <para>The tutorial project <phrase role="propername">Overlap No Depth</phrase> uses VAOs
                to draw two separate objects. These objects are rendered using indexed drawing. The
                setup for this shows one way to have the attribute data for multiple objects stored
                in a single buffer.</para>
            <para>For this tutorial, we will be drawing two objects. They are both wedges, with the
                sharp end facing the viewer. The difference between them is that one is horizontal
                and the other is vertical on the screen.</para>
            <para>The shaders are essentially unchanged from before. We are using the perspective
                matrix shader from the last tutorial, with modifications to preserve the aspect
                ratio of the scene. The only difference is the pre-camera offset value; in this
                tutorial, it is a full 3D vector, which allows us to position each wedge in the
                scene.</para>
            <para>The initialization has changed, allowing us to create our VAOs once at start-up
                time, then use them to do the rendering. The initialization code is as
                follows:</para>
            <example>
                <title>VAO Initialization</title>
                <programlisting language="cpp"><![CDATA[void InitializeVertexArrayObjects()
{
    glGenVertexArrays(1, &vaoObject1);
    glBindVertexArray(vaoObject1);
    
    size_t colorDataOffset = sizeof(float) * 3 * numberOfVertices;
    
    glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
    glEnableVertexAttribArray(0);
    glEnableVertexAttribArray(1);
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)colorDataOffset);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);
    
    glBindVertexArray(0);
    
    glGenVertexArrays(1, &vaoObject2);
    glBindVertexArray(vaoObject2);
    
    size_t posDataOffset = sizeof(float) * 3 * (numberOfVertices/2);
    colorDataOffset += sizeof(float) * 4 * (numberOfVertices/2);

    //Use the same buffer object previously bound to GL_ARRAY_BUFFER.
    glEnableVertexAttribArray(0);
    glEnableVertexAttribArray(1);
    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)posDataOffset);
    glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)colorDataOffset);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);
    
    glBindVertexArray(0);
}]]></programlisting>
            </example>
            <para>This code looks complicated, but it is really just the rendering code we have seen
                before. The offset computations for the <function>glVertexAttribPointer</function>
                calls are more complex, due to having the data for 2 objects stored in a single
                buffer. But overall it is the same code.</para>
            <para>The code generates 2 VAOs, binds them, then sets their state. Recall that, while
                the <literal>GL_ARRAY_BUFFER</literal> binding is not part of the VAOs state, the
                    <literal>GL_ELEMENT_ARRAY_BUFFER</literal> binding <emphasis>is</emphasis> part
                of that state. So these VAOs store the attribute array data and the element buffer
                data; everything necessary to render each object except for the actual drawing
                call.</para>
            <para>In this case, both objects use the same element buffer. However, since the element
                buffer binding is part of the VAO state, it <emphasis>must</emphasis> be set into
                each VAO individually. Notice that we only set the
                    <literal>GL_ARRAY_BUFFER</literal> binding once, but the
                    <literal>GL_ELEMENT_ARRAY_BUFFER</literal> is set for each VAO.</para>
            <note>
                <para>If you look at the vertex position attribute and the shader, you will notice
                    that we now use a 3-component position vector rather than a 4-component one.
                    This saves on data, yet our matrix math shouldn't work, since you cannot
                    multiply a 4x4 matrix with a 3-component vector.</para>
                <para>This is a subtle feature of OpenGL. If you attempt to transform a matrix by a
                    vector that is one size smaller than the matrix, it will assume that the last
                    coordinate missing from the vector will be 1.0. This means that we do not have
                    to spend precious buffer object memory on a value we know to be 1.0.</para>
            </note>
            <para>Though the initialization code has been expanded, the rendering code is quite
                simple:</para>
            <example>
                <title>VAO and Indexed Rendering Code</title>
                <programlisting language="cpp"><![CDATA[    glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
    glClear(GL_COLOR_BUFFER_BIT);
    
    glUseProgram(theProgram);
    
    glBindVertexArray(vaoObject1);
    glUniform3f(offsetUniform, 0.0f, 0.0f, 0.0f);
    glDrawElements(GL_TRIANGLES, ARRAY_COUNT(indexData), GL_UNSIGNED_SHORT, 0);
    
    glBindVertexArray(vaoObject2);
    glUniform3f(offsetUniform, 0.0f, 0.0f, -1.0f);
    glDrawElements(GL_TRIANGLES, ARRAY_COUNT(indexData), GL_UNSIGNED_SHORT, 0);
    
    glBindVertexArray(0);
    glUseProgram(0);
    
    glutSwapBuffers();
    glutPostRedisplay();]]></programlisting>
            </example>
            <para>We bind a VAO, set its uniform data (in this case, to position the object
                properly), and then we draw it with a call to <function>glDrawElements</function>.
                This step is repeated for the second object.</para>
            <para>Running this tutorial will show the following image:</para>
            <figure>
                <title>Overlapping Objects</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Overlap%20No%20Depth.png" contentwidth="3in"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The two objects are essentially flipped versions of the same one, a wedge. One
                object appears smaller than the other because it is farther away, in terms of its Z
                distance to the camera. We are using a perspective transform, so it make sense that
                more distant objects appear smaller. However, if the smaller object is behind the
                larger one, why is it rendered on top of the one in front?</para>
            <para>Before we solve this mystery, there is one minor issue we should cover
                first.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut05 Optimization Base Vertex.html" ?>
        <title>Optimization: Base Vertex</title>
        <para>Using VAOs can dramatically simplify code. However, VAOs are not always the best case
            for performance, particularly if you use a lot of separate buffer objects.</para>
        <para>Binding a VAO for rendering can be an expensive proposition. Therefore, if there is a
            way to avoid binding one, then it can provide a performance improvement, if the program
            is currently bottlenecked on the CPU.</para>
        <para>Our two objects have much in common. They use the same vertex attribute indices, since
            they are being rendered with the same program object. They use the same format for each
            attribute (3 floats for positions, 4 floats for colors). The vertex data even comes from
            the same buffer object.</para>
        <para>Indeed, the <emphasis>only</emphasis> difference between the two objects is what
            offset each attribute uses. And even this is quite minimal, since the difference between
            the offsets is a constant factor of the size of each attribute.</para>
        <para>Look at the vertex data in the buffer object:</para>
        <example>
            <title>Vertex Attribute Data Abridged</title>
            <programlisting language="cpp">//Object 1 positions
LEFT_EXTENT,    TOP_EXTENT,       REAR_EXTENT,
LEFT_EXTENT,    MIDDLE_EXTENT,    FRONT_EXTENT,
RIGHT_EXTENT,   MIDDLE_EXTENT,    FRONT_EXTENT,

...

RIGHT_EXTENT,   TOP_EXTENT,       REAR_EXTENT,
RIGHT_EXTENT,   BOTTOM_EXTENT,    REAR_EXTENT,

//Object 2 positions
TOP_EXTENT,     RIGHT_EXTENT,     REAR_EXTENT,
MIDDLE_EXTENT,  RIGHT_EXTENT,     FRONT_EXTENT,
MIDDLE_EXTENT,  LEFT_EXTENT,      FRONT_EXTENT,

...

TOP_EXTENT,     RIGHT_EXTENT,     REAR_EXTENT,
TOP_EXTENT,     LEFT_EXTENT,      REAR_EXTENT,
BOTTOM_EXTENT,  LEFT_EXTENT,      REAR_EXTENT,

//Object 1 colors
GREEN_COLOR,
GREEN_COLOR,
GREEN_COLOR,

...

BROWN_COLOR,
BROWN_COLOR,

//Object 2 colors
RED_COLOR,
RED_COLOR,
RED_COLOR,

...

GREY_COLOR,
GREY_COLOR,</programlisting>
        </example>
        <para>Notice how the attribute array for object 2 immediately follows its corresponding
            attribute array for object 1. So really, instead of four attribute arrays, we really
            have just two attribute arrays.</para>
        <para>If we were doing array drawing, we could simply have one VAO, which sets up the
            beginning of both combined attribute arrays. We would still need 2 separate draw calls,
            because there is a uniform that is different for each object. But our rendering code
            could look like this:</para>
        <example>
            <title>Array Drawing of Two Objects with One VAO</title>
            <programlisting language="cpp">glUseProgram(theProgram);

glBindVertexArray(vaoObject);
glUniform3f(offsetUniform, 0.0f, 0.0f, 0.0f);
glDrawArrays(GL_TRIANGLES, 0, numTrianglesInObject1);

glUniform3f(offsetUniform, 0.0f, 0.0f, -1.0f);
glDrawArrays(GL_TRIANGLES, numTrianglesInObject1, numTrianglesInObject2);

glBindVertexArray(0);
glUseProgram(0);</programlisting>
        </example>
        <para>This is all well and good for array drawing, but we are doing indexed drawing. And
            while we can control the location we are reading from in the element buffer by using the
                <parameter>count</parameter> and <parameter>indices</parameter> parameter of
                <function>glDrawElements</function>, that only specifies which indices we are
            reading from the element buffer. What we would need is a way to modify the index data
            itself.</para>
        <para>This could be done by simply storing the index data for object 2 in the element
            buffer. This changes our element buffer into the following:</para>
        <example>
            <title>MultiObject Element Buffer</title>
            <programlisting language="cpp">const GLshort indexData[] =
{
//Object 1
0, 2, 1,        3, 2, 0,
4, 5, 6,        6, 7, 4,
8, 9, 10,       11, 13, 12,
14, 16, 15,     17, 16, 14,

//Object 2
18, 20, 19,     21, 20, 18,
22, 23, 24,     24, 25, 22,
26, 27, 28,     29, 31, 30,
32, 34, 33,     35, 34, 32,
};</programlisting>
        </example>
        <para>This would work for our simple example here, but it does needlessly take up room. What
            would be great is a way to simply add a bias value to the index after it is pulled from
            the element array, but <emphasis>before</emphasis> it is used to access the attribute
            data.</para>
        <para>I'm sure you'll be surprised to know that OpenGL offers such a mechanism, what with me
            bringing it up and all.</para>
        <para>The function <function>glDrawElementsBaseVertex</function> provides this
            functionality. It works like <function>glDrawElements</function> has one extra parameter
            at the end, which is the offset to be applied to each index. The tutorial project
                <phrase role="propername">Base Vertex With Overlap</phrase> demonstrates
            this.</para>
        <para>The initialization changes, building only one VAO.</para>
        <example>
            <title>Base Vertex Single VAO</title>
            <programlisting language="cpp">glGenVertexArrays(1, &amp;vao);
glBindVertexArray(vao);

size_t colorDataOffset = sizeof(float) * 3 * numberOfVertices;
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)colorDataOffset);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);

glBindVertexArray(0);</programlisting>
        </example>
        <para>This simply binds the beginning of each array. The rendering code is as
            follows:</para>
        <example>
            <title>Base Vertex Rendering</title>
            <programlisting language="cpp">glUseProgram(theProgram);

glBindVertexArray(vao);

glUniform3f(offsetUniform, 0.0f, 0.0f, 0.0f);
glDrawElements(GL_TRIANGLES, ARRAY_COUNT(indexData), GL_UNSIGNED_SHORT, 0);

glUniform3f(offsetUniform, 0.0f, 0.0f, -1.0f);
glDrawElementsBaseVertex(GL_TRIANGLES, ARRAY_COUNT(indexData),
	GL_UNSIGNED_SHORT, 0, numberOfVertices / 2);

glBindVertexArray(0);
glUseProgram(0);</programlisting>
        </example>
        <para>The first draw call uses the regular glDrawElements function, but the second uses the
            BaseVertex version.</para>
        <note>
            <para>This example of BaseVertex's use is somewhat artificial, because both objects use
                the same index data. The more compelling way to use it is with objects that have
                different index data. Of course, if objects have different index data, you may be
                wondering why you would bother with BaseVertex when you could just manually add the
                offset to the indices themselves when you create the element buffer.</para>
            <para>There are several reasons not to do this. One of these is that
                    <literal>GL_UNSIGNED_INT</literal> is twice as large as
                    <literal>GL_UNSIGNED_SHORT</literal>. If you have more than 65,536 entries in an
                array, whether for one object or for many, you would need to use ints instead of
                shorts for indices. Using ints can hurt performance, particularly on older hardware
                with less bandwidth. With BaseVertex, you can use shorts for everything, unless a
                particular object itself has more than 65,536 vertices.</para>
            <para>The other reason not to manually bias the index data is to more accurately match
                the files you are using. When loading indexed mesh data from files, the index data
                is not biased by a base vertex; it is all relative to the model's start. So it makes
                sense to keep things that way where possible; it just makes the loading code simpler
                and faster by storing a per-object BaseVertex with the object rather than biasing
                all of the index data.</para>
        </note>
    </section>
    <section>
        <?dbhtml filename="Tut05 Overlap and Depth Buffering.html" ?>
        <title>Overlap and Depth Buffering</title>
        <para>Regardless of how we render the objects, there is a strange visual problem with what
            we're rendering:</para>
        <informalfigure>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Overlap%20No%20Depth.png" contentwidth="3in"/>
                </imageobject>
            </mediaobject>
        </informalfigure>
        <para>If the smaller object is truly behind the larger one, why is it being rendered on top
            of the larger one? Well, to answer that question, we need to remember what OpenGL
            is.</para>
        <para>The OpenGL specification defines a rasterization-based renderer. Rasterizers offer
            great opportunities for optimizations and hardware implementation, and using them
            provides great power to the programmer. However, they're very stupid. A rasterizer is basically just a triangle drawer. Vertex shaders tell it what vertex positions are, and fragment shaders tell it what colors to put within that triangle. But no matter how fancy, a rasterization-based render is just drawing triangles.</para>
			<para>That's fine in general because rasterizers are very fast. They are very good at drawing triangles.</para>
        <para>But rasterizers do exactly and only what the user says. They draw each triangle
                <emphasis>in the order given</emphasis>. This means that, if there is overlap
            between multiple triangles in window space, the triangle that is rendered last will
            be the one that is seen.</para>
        <para>This problem is called <glossterm>hidden surface elimination.</glossterm></para>
        <para>The first thing you might think of when solving this problem is to simply render the
            most distant objects first. This is called <glossterm>depth sorting.</glossterm> As you might
            imagine, this <quote>solution</quote> scales incredibly poorly. Doing it for each
            triangle is prohibitive, particularly with scenes with millions of triangles.</para>
        <para>And the worst part is that even if you put in all the effort, it doesn't actually
            work. Not all the time. Many trivial cases can be solved via depth sorting, but
            non-trivial cases have real problems. You can have an arrangement of 3 triangles where
            each overlaps the other, such that there simply is no order you can render them in to
            achieve the right effect.</para>
			<!--TODO: Show the 3 triangle arrangement.-->
        <para>Even worse, it does nothing for interpenetrating triangles; that is, triangles that
            pass through each other in 3D space (as opposed to just from the perspective of the
            camera).</para>
        <para>Depth sorting isn't going to cut it; clearly, we need something better.</para>
        <para>One solution might be to tag fragments with the distance from the viewer. Then, if a
            fragment that is about to be written has a farther distance (ie: the fragment is behind
            what was already drawn), we simply do not write that fragment to the output image. That
            way, if you draw a triangle behind other triangles, the fragment distances that were
            already written will be closer to the camera than the fragment distances of the new
            triangle. And thus, the particular fragments of that triangle will not be drawn. And
            since this works at the fragment level, it will work just as well for intersecting
            triangles or the 3 triangle arrangement depicted above.</para>
        <para>The <quote>tag</quote> is the window-space Z value. You may recall from <link
                linkend="tut00_window_space">the introduction</link> that the window-space Z
            position of a fragment ranges from 0 to 1, where 0 is the closest and 1 is the
            farthest.</para>
        <para>Colors output from the fragment shader are output into the color image buffer.
            Therefore it naturally follows that depth values would be stored in a <glossterm>depth
                buffer</glossterm> (also called a <glossterm>z buffer</glossterm>, because it stores
            Z values). The depth buffer is an image that is the same size as the main color buffer,
            that stores depth values as pixels rather than colors. Where a color is a 4-component
            vector, a depth is just a single floating-point value.</para>
        <para>Like the color buffer, the depth buffer for the main window is created automatically
            by OpenGL when OpenGL is initialized. OpenGL can even be created without a depth buffer.
            Since FreeGLUT takes care of initializing OpenGL for us, we tell it in the standard
            initialization code to create OpenGL with a depth buffer.</para>
        <para>Writing the depth is not enough. The suggested idea requires stopping the fragment
            from writing anything if the current depth at that location is in front of this
            fragment's depth. This is called the <glossterm>depth test.</glossterm> In OpenGL, the
            test does not have to be in any particular direction; any of the typical numerical
            relation operator (greater than, less than, etc) will work fine. If the test passes,
            then the fragment's outputs (both color and depth) will be written to their appropriate
            buffer. If it fails, then they will not.</para>
        <para>To activate depth testing, we must call
                <function>glEnable</function>(<literal>GL_DEPTH_TEST</literal>); the corresponding
                <function>glDisable</function> call will cause depth testing to cease. After
            activating testing, we need to call <function>glDepthFunc</function> to set the relation
            of the depth test. When the test is true, the incoming fragment is written.</para>
        <para>The test functions can be <literal>GL_ALWAYS</literal> (always write the fragment),
                <literal>GL_NEVER</literal> (no fragments are written), <literal>GL_LESS</literal>,
                <literal>GL_GREATER</literal>, <literal>GL_LEQUAL</literal> (&lt;=),
                <literal>GL_GEQUAL</literal> (>=), <literal>GL_EQUAL</literal>, or
                <literal>GL_NOTEQUAL</literal>. The test function puts the incoming fragment's depth
            on the left of the equation and on the right is the depth from the depth buffer. So
            GL_LESS means that, when the incoming fragment's depth is less than the depth from the
            depth buffer, the incoming fragment is not written.</para>
        <para>With the fragment depth being something that is part of a fragment's output, you might
            imagine that this is something you have to compute in a fragment shader. You certainly
            can, but the fragment's depth is normally just the window-space Z coordinate of the
            fragment. This is computed automatically when the X and Y are computed.</para>
        <para>Using the window-space Z value as the fragment's output depth is so common that, if
            you do not deliberately write a depth value from the fragment shader, this value will be
            used by default.</para>
        <section>
            <title>Depth and the Viewport</title>
            <para>Speaking of window coordinates, there is one more issue we need to deal with when
                dealing with depth. The <function>glViewport</function> function defines the
                transform between normalized device coordinates (the range [-1, 1]) to window
                coordinates. But <function>glViewport</function> only defines the transform for the
                X and Y coordinates of the NDC-space vertex positions.</para>
            <para>The window-space Z coordinate ranges from [0, 1]; the transformation from NDC's
                [-1, 1] range is defined with the <function>glDepthRange</function> function. This
                function takes 2 floating-point parameters: the <glossterm>range zNear</glossterm>
                and the <glossterm>range zFar</glossterm>. These values are in window-space; they
                define a simple linear mapping from NDC space to window space. So if zNear is 0.5
                and zFar is 1.0, NDC values of -1 will map to 0.5 and values of 1 will result in
                1.0.</para>
            <note>
                <para>Don't confuse the range zNear/zFar with the <emphasis>camera</emphasis>
                    zNear/zFar used in the perspective projection matrix computation.</para>
            </note>
            <para>The range zNear can be greater than the range zFar; if it is, then the
                window-space values will be reversed, in terms of what constitutes closest or
                farthest from the viewer.</para>
            <para>Earlier, it was said that the window-space Z value of 0 is closest and 1 is
                farthest. However, if our clip-space Z values were negated, the depth of 1 would be
                closest to the view and the depth of 0 would be farthest. Yet, if we flip the
                direction of the depth test (GL_LESS to GL_GREATER, etc), we get the exact same
                result. So it's really just a convention. Indeed, flipping the sign of Z and the
                depth test was once a vital performance optimization for many games.</para>
        </section>
        <section>
            <title>Rendering with Depth</title>
            <para>The <phrase role="propername">Depth Buffering</phrase> project shows off how to
                turn on and use the depth buffer. It is based on the BaseVertex rendering of the
                objects.</para>
            <para>The initialization routine has all of the basic depth testing code in it:</para>
            <example>
                <title>Depth Buffer Setup</title>
                <programlisting language="cpp">glEnable(GL_DEPTH_TEST);
glDepthMask(GL_TRUE);
glDepthFunc(GL_LEQUAL);
glDepthRange(0.0f, 1.0f);</programlisting>
            </example>
            <para>These are the most common depth testing parameters. It turns on depth testing,
                sets the test function to less than or equal to, and sets the range mapping to the
                full accepted range.</para>
            <para>It is comment to use <literal>GL_LEQUAL</literal> instead of
                    <literal>GL_LESS</literal>. This allows for the use of multipass algorithms,
                where you render the same geometry with the same vertex shader, but linked with a
                different fragment shader. We'll look at those much, much later.</para>
            <para>The call to <function>glDepthMask</function> causes rendering to write the depth
                value from the fragment to the depth buffer. The activation of depth testing alone
                is not sufficient to actually write depth values. This allows us to have depth
                testing for objects where their <emphasis>own</emphasis> depth (the incoming
                fragment's depth) is not written to the depth buffer, even when their color outputs
                are written. We don't use this here, but a special algorithm might need this
                feature.</para>
            <note>
                <para>Due to an odd quirk of OpenGL, writing to the depth buffer is always inactive
                    if <literal>GL_DEPTH_TEST</literal> is disabled, regardless of the depth mask.
                    If you just want to write to the depth buffer, without actually doing a test,
                    you must enable <literal>GL_DEPTH_TEST</literal> and use the depth function of
                        <literal>GL_ALWAYS.</literal></para>
            </note>
            <para>There is one more issue. We know what the depth value is in the depth buffer after
                a fragment is written to it. But what is its value before any rendering is done at
                all? Depth buffers and color buffers are very similar; color buffers get their
                initial colors from calling <function>glClear</function>. So you might imagine a
                similar call for depth buffers.</para>
            <para>As it turns out, they share the same clearing call. If you recall,
                    <function>glClearColor</function> sets the color for clearing color buffers.
                Similarly, <function>glClearDepth</function> sets the depth value that the depth
                buffer will be cleared to.</para>
            <para>In order to clear the depth buffer with <function>glClear</function>, you must use
                the <literal>GL_DEPTH_BUFFER_BIT</literal>. So, the drawing function's clearing, at
                the top of the function, happens as follows:</para>
            <example>
                <title>Depth Buffer Clearing</title>
                <programlisting language="cpp">glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClearDepth(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);</programlisting>
            </example>
            <para>This will set all of the depth values in the depth buffer to 1.0, which is our
                range zFar.</para>
            <note>
                <para>This is all that is necessary to do depth buffering, as far as OpenGL proper
                    is concerned. However, in order to use depth buffering, the framebuffer must
                    include a depth buffer in addition to an image buffer. This initialization code
                    is platform-specific, but FreeGLUT takes care of it for us. If you do graduate
                    from FreeGLUT, make sure that you use the appropriate initialization mechanism
                    for your platform to create a depth buffer if you need to do depth
                    buffering.</para>
            </note>
            <para>Our new image looks like this:</para>
            <figure>
                <title>Depth Buffering</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Depth%20Buffering.png" contentwidth="3in"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>Which makes a lot more sense. No matter what order we draw the objects in, we get
                a reasonable result.</para>
            <para>Let's test our depth buffering a bit more. Let's create a little overlap between
                the two objects. Change the first offset uniform statement in
                    <function>display</function> to be this:</para>
            <programlisting language="cpp">glUniform3f(offsetUniform, 0.0f, 0.0f, -0.75f);</programlisting>
            <para>We now get some overlap, but the result is still reasonable:</para>
            <figure>
                <title>Mild Overlap</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Depth%20Buffering%20Mild%20Overlap.png"
                            contentwidth="3in"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>We can even change the line to cause major overlap without incident:</para>
            <programlisting language="cpp">glUniform3f(offsetUniform, 0.0f, 0.0f, -1.0f);</programlisting>
            <para>Which gives us:</para>
            <figure>
                <title>Major Overlap</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Depth%20Buffering%20Major%20Overlap.png"
                            contentwidth="3in"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>No amount of depth sorting will help with <emphasis>that</emphasis>.</para>
            <sidebar>
                <title>Fragments and Depth</title>
                <para>Way back in the <link linkend="tut_00">introduction</link>, we said that part
                    of the fragment's data was the window-space position of the fragment. This is a
                    3D coordinate; the Z value is naturally what would be written to the depth
                    buffer. We saw <link linkend="FragPosition">later</link> that the built-in input
                    variable <varname>gl_FragCoord</varname> holds this position;
                        <literal>gl_FragCoord.z</literal> is the window-space depth of the fragment,
                    as generated by OpenGL.</para>
                <para>Part of the job of the fragment shader is to generate output colors for the
                    output color images. Another part of the job of the fragment shader is to
                    generate the output <emphasis>depth</emphasis> of the fragment.</para>
                <para>If that's true, then how can we use the same fragment shader as we did before
                    turning on depth buffering? The default behavior of OpenGL is, if a fragment
                    shader does <emphasis>not</emphasis> write to the output depth, then simply take
                    the generated window-space depth as the final depth of the fragment.</para>
                <para>Oh, you could do this manually. We could add the following statement to the
                        <function>main</function> function of our fragment shader:</para>
                <programlisting language="glsl">gl_FragDepth = gl_FragCoord.z</programlisting>
                <para>This is, in terms of behavior a noop; it does nothing OpenGL wouldn't have
                    done itself. However, in terms of <emphasis>performance</emphasis>, this is a
                    drastic change.</para>
                <para>The reason fragment shaders aren't required to have this line in all of them
                    is to allow for certain optimizations. If the OpenGL driver can see that you do
                    not set <varname>gl_FragDepth</varname> anywhere in the fragment shader, then it
                    can dramatically improve performance in certain cases.</para>
                <para>If the driver knows that the output fragment depth is the same as the
                    generated one, it can do the whole depth test <emphasis>before</emphasis>
                    executing the fragment shader. This is called <glossterm>early depth
                        test</glossterm> or <glossterm>early-z</glossterm>. This means that it can
                    discard fragments <emphasis>before</emphasis> wasting precious time executing
                    potentially complex fragment shaders. Indeed, most hardware nowadays has
                    complicated early z culling hardware that can discard multiple fragments with
                    one test.</para>
                <para>The moment your fragment shader writes anything to
                        <varname>gl_FragDepth</varname>, all of those optimizations have to go away. So
                    generally, you should only write a depth value yourself if you
                        <emphasis>really</emphasis> need to do it.</para>
            </sidebar>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut05 Boundaries and Clipping.html" ?>
        <title>Boundaries and Clipping</title>
        <para>If you recall back to the <link linkend="ShaderPerspective">Perspective projection
                tutorial,</link> we choose to use some special hardware in the graphics chip to do
            the final division of the W coordinate, rather than doing the entire perspective
            projection ourselves in the vertex shader. At the time, it was promised that we would see why this is
            hardware functionality rather than something the shader does.</para>
        <para>Let us review the full math operation we are computing here:</para>
        <equation>
            <title>Perspective Computation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="PerspectiveFunc.svg" format="SVG" contentwidth="3in"/>
                </imageobject>
            </mediaobject>
        </equation>
        <para><literal>R</literal> is the perspective projected position, P is the camera-space
            position, E<subscript>z</subscript> is the Z-position of the eye relative to the plane
            (assumed to be -1), and P<subscript>z</subscript> is the camera-space Z position.</para>
        <para>One question you should always ask when dealing with equations is this: can it divide
            by zero? And this equation certainly can; if the camera-space position of any vertex is
            ever exactly 0, then we have a problem.</para>
        <para>This is where clip-space comes in to save the day. See, until we actually
                <emphasis>do</emphasis> the divide, everything is fine. A 4-dimensional vector that
            will be divided by the fourth component but hasn't <emphasis>yet</emphasis> is still
            valid, even if the fourth component is zero. This kind of coordinate system is called a
                <glossterm>homogeneous coordinate system</glossterm>. It is a way of talking about
            things that you could not talk about in a normal, 3D coordinate system. Like dividing by
            zero, which in visual terms refers to coordinates at infinity.</para>
        <para>This is all nice theory, but we still know that the clip-space positions need to be
            divided by their W coordinate. So how to we get around this problem?</para>
        <para>First, we know that a W of zero means that the camera-space Z position of the point
            was zero as well. We also know that this point <emphasis>must</emphasis> lie outside of
            the viable region for camera space. That is because of the camera Z range: camera zNear
                <emphasis>must</emphasis> be strictly greater than zero. Thus any point with a
            camera Z value of 0 must be in front of the zNear, and therefore outside of the visible
            world.</para>
        <para>Since the vertex coordinate is not going to be visible anyway, why bother drawing it
            and dividing by that pesky 0? Well, because that vertex happens to be part of a
            triangle, and if part of the triangle is visible, we have to draw it.</para>
        <para>But we don't have to draw <emphasis>all</emphasis> of it.</para>
        <para><glossterm>Clipping</glossterm> is the process of taking a triangle and breaking it up
            into smaller triangles, such that only the part of the original triangle that is within
            the viewable region remains. This may generate only one triangle, or it may generate
            multiple triangles.</para>
        <para>Any vertex attributes associated with that vertex are interpolated (based on the
            vertex shader's interpolation qualifiers) to determine the relative value of the
            post-clipping vertex.</para>
        <para>As you might have guessed, clipping happens in <emphasis>clip space</emphasis>, not
            NDC space. Hence the name. Since clip-space is a homogeneous coordinate system, we don't
            have to worry about those pesky zeros. Unfortunately, because homogeneous spaces are not
            easy to draw, we can't show you what it would look like. But we can show you what it
            would look like if you clipped in camera space, in 2D:</para>
        <figure>
            <title>Triangle Clipping</title>
            <mediaobject>
                <imageobject>
                    <imagedata format="SVG" fileref="TriangleClipping.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>To see the results of clipping in action, run the <phrase role="propername">Vertex
                Clipping</phrase> tutorial. It is the same as the one for depth buffering, except
            one object has been moved very close to the zNear plane. Close enough that part of it is
            beyond the zNear and therefore is not part of the viewable area:</para>
        <figure>
            <title>Near Plane Clipping</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Vertex%20Clipping.png" contentwidth="4in"/>
                </imageobject>
            </mediaobject>
        </figure>
        <sidebar>
            <title>A Word on Clipping Performance</title>
            <para>We have phrased the discussion of clipping as a way to avoid dividing by zero for
                good reason. The OpenGL specification states that clipping must be done against all
                sides of the viewable region. And it certainly appears that way; if you move objects
                far enough away that they overlap with zFar, then you won't see the objects.</para>
            <para>You can also see apparent clipping with objects against the four sides of the view
                frustum. To see this, you would need to modify the viewport with
                    <function>glViewport</function>, so that only part of the window is being
                rendered to. If you move objects to the edge of the viewport, you will find that
                part of them does not get rendered outside this region.</para>
            <para>So clipping is happening all the time?</para>
            <para>Of course not. Clipping takes triangles and breaks them into pieces using
                4-dimensional homogeneous mathematics. One triangle can be broken up into several;
                depending on the location of the triangle, you can get quite a few different pieces.
                The simple act of turning one triangle into several is hard and time
                consuming.</para>
            <para>So, if OpenGL states that this must happen, but supposedly OpenGL-compliant
                hardware doesn't do it, then what's going on?</para>
            <para>Consider this: if we hadn't told you just now that the hardware doesn't do
                clipping most of the time, could you tell? No. And that's the point: OpenGL
                specifies <emphasis>apparent</emphasis> behavior; the spec doesn't care if you
                actually do vertex clipping or not. All the spec cares about is that the user can't
                tell the difference in terms of the output.</para>
            <para>That's how hardware can get away with the early-z optimization mentioned before;
                the OpenGL spec says that the depth test must happen after the fragment program
                executes. But if the fragment shader doesn't modify the depth, then would you be
                able to tell the difference if it did the depth test before the fragment shader? No;
                if it passes, it would have passed either way, and the same goes for failing.</para>
            <para>Instead of clipping, the hardware usually just lets the triangles go through if
                part of the triangle is within the visible region. It generates fragments from those
                triangles, and if a fragment is outside of the visible window, it is discarded
                before any fragment processing takes place.</para>
            <para>Hardware usually can't do this however, if any vertex of the triangle has a
                clip-space W &lt;= zero. In terms of a perspective projection, this means that part
                of the triangle is fully behind the eye, rather than just behind the camera zNear
                plane. In these cases, clipping is much more likely to happen.</para>
            <para>Even so, clipping only happens if the triangle is partially visible; a triangle
                that is entirely in front of the zNear plane is dropped entirely.</para>
            <para>In general, you should try to avoid rendering things that will clip against the
                eye plane (clip-space W &lt;= 0, or camera-space Z >= 0). You don't need to be
                pedantic about it; long walls and the like are fine. But, particularly for low-end
                hardware, a lot of clipping can really kill performance.</para>
        </sidebar>
    </section>
    <section>
        <?dbhtml filename="Tut05 Depth Clamping.html" ?>
        <title>Depth Clamping</title>
        <para>That's all well and good, but this:</para>
        <informalfigure>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Vertex%20Clipping.png" contentwidth="4in"/>
                </imageobject>
            </mediaobject>
        </informalfigure>
        <para>This is never a good thing. Sure, it keeps the hardware from dividing by zero, which I
            guess is important, but it looks really bad. It's showing the inside of an object that
            has no insides. Plus, you can also see that it has no backside (since we're doing face
            culling); you can see right through to the object behind it.</para>
        <para>If computer graphics is an elaborate illusion, then clipping utterly
                <emphasis>shatters</emphasis> this illusion. It's a big, giant hole that screams,
                    <quote><emphasis>this is fake!</emphasis></quote> as loud as possible to the
            user. What can we do about this?</para>
        <para>The most common technique is to simply not allow it. That is, know how close objects
            are getting to the near clipping plane (ie: the camera) and don't let them get close
            enough to clip.</para>
        <para>And while this can <quote>function</quote> as a solution, it isn't exactly good. It
            limits what you can do with objects and so forth.</para>
        <para>A more reasonable mechanism is <glossterm>depth clamping</glossterm>. What this does
            is turn off camera near/far plane clipping altogether. Instead, the depth for these
            fragments are clamped to the [-1, 1] range in NDC space.</para>
        <para>We can see this in the <phrase role="propername">Depth Clamping</phrase> tutorial.
            This tutorial is identical to the vertex clipping one, except that the
                <function>keyboard</function> function has changed as follows:</para>
        <example>
            <title>Depth Clamping On/Off</title>
            <programlisting language="cpp">void keyboard(unsigned char key, int x, int y)
{
    static bool bDepthClampingActive = false;
    switch (key)
    {
    case 27:
        glutLeaveMainLoop();
        break;
    case 32:
        if(bDepthClampingActive)
            glDisable(GL_DEPTH_CLAMP);
        else
            glEnable(GL_DEPTH_CLAMP);
        
        bDepthClampingActive = !bDepthClampingActive;
        break;
    }
}</programlisting>
        </example>
        <para>When you press the space bar (ASCII code 32), the code will toggle depth clamping,
            with the
                <function>glEnable</function>/<function>glDisable</function>(<literal>GL_DEPTH_CLAMP</literal>)
            calls. It will start with depth clamping off, since that is the OpenGL default.</para>
        <para>When you run the tutorial, you will see what we saw in the last one; pressing the
            space bar shows this:</para>
        <figure>
            <title>Depth Clamping</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Depth%20Clamping.png" contentwidth="4in"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>This looks correct; it appears as if all of our problems are solved.</para>
        <para>Appearances can be deceiving. Let's see what happens if you move the other object
            forward, so that the two intersect like in the earlier part of the tutorial:</para>
        <figure>
            <title>Depth Clamp With Overlap</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Double%20Depth%20Clamping.png" contentwidth="4in"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Oops. Part of it looks right, just not the part where the depth is being clamped.
            What's going on?</para>
        <para>Well, recall what depth clamping does; it makes fragment depth values outside of the
            range be clamped to within the range. So depth values smaller than depth zNear become
            depth zNear, and values larger than depth zFar become depth zFar.</para>
        <para>Therefore, when you go to render the second object, some of the clamped fragments from
            the first are there. So the incoming fragment from the new object has a depth of 0, and
            some of the values from the depth buffer also have a depth of 0. Since our depth test is
                <literal>GL_LESS</literal>, the incoming 0 is not less than the depth buffer's 0, so
            the part of the second object does not get rendered. This is pretty much the opposite of
            where we started: previous rendered objects are in front of newer ones. We could change it to <literal>GL_LEQUAL</literal>, but that only gets
            us to <emphasis>exactly</emphasis> where we started.</para>
        <para>So a word of warning: be careful with depth clamping when you have overlapping objects
            near the planes. Similar problems happen with the far plane, though backface culling can
            be a help in some cases.</para>
        <note>
            <para>We defined depth clamping as, in part, turning off clipping against the camera
                near and far planes. If you're wondering what happens when you have depth clamping,
                which turns off clipping, and a clip-space W &lt;= 0, it's simple. In camera space,
                near and far clipping is represented as turning a pyramid into a frustum: cutting
                off the top and bottom. If near/far clipping isn't active, then the frustum becomes
                a pyramid. The other 4 clipping planes are still fully in effect. Clip-space
                vertices with a W of less than 0 are all outside of the boundary of any of the other
                four clipping planes.</para>
			<para>The only clip-space point with a W of 0 that is within
                this volume is the homogeneous origin point: (0, 0, 0, 0); everything else will be
                clipped. And a triangle made from three positions that all are at the same position would have no area; it would therefore generate no fragments anyway. It can be safely eliminated before the perspective divide.</para>
        </note>
    </section>
    <!--
        <section>
            <?dbhtml filename="Tut05 Depth Precision.html" ?>
            <title>Depth Precision</title>
            <para>There is one other thing that needs to be discussed with regard to depth buffers:
                precision.</para>
            <para>In the previous tutorial, we saw that the transform from camera space to
                normalized device coordinate (<acronym>NDC</acronym>), in 2D, looked like
                this:</para>
            <figure>
                <title>2D Camera to NDC Space</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="CameraToPerspective.svg" format="SVG" contentwidth="6in"
                        />
                    </imageobject>
                </mediaobject>
            </figure>
            <para>This transformation used a special function to calculate the depth, one designed
                to keep lines linear after performing the perspective divide. While it does do this,
                it has a number of other effects. In particular, it changes the Z spacing between
                points.</para>
            <para>We can see that there is a lot of spacing between the points in NDC space at the
                bottom (close to the view) and much less at the top (far from the view). The
                third-nearest point to the viewer in camera space (Z = -1.75) maps to a point well
                past halfway to the camera in NDC space.</para>
            <para>Let us take just the front half of NDC space as an example. In NDC space, this is
                the range [-1, 0]. In camera space, the exact range depends on the camera zNear and
                zFar values. In the above example where the camera range is [-1, -3], the range that
                maps to the front half of NDC space is [-1, -1.5], only a quarter of the
                range.</para>
            <para>The larger the difference between N and F, the <emphasis>smaller</emphasis> the
                half-space. If the camera range goes from [-500, -1000], then half of NDC space
                represents the range from [-500, -666.67]. This is 33.3% of the camera space range
                mapping to 50% of the NDC range. However, if the camera range goes from [-1, -1000],
                fully <emphasis>half</emphasis> of NDC space will represent only [-1, -1.998] in
                camera space; less than 0.1% of the range.</para>
            <para>This has real consequences for the precision of your depth buffer. Earlier, we
                said that the depth buffer stores floating-point values. While this is conceptually
                true, most depth buffers actually use fixed-point values and convert them into
                floating-point values automatically. If you have a 16-bit depth buffer, you have
                65536 possible depth values. Half of this is 32768 depth values, equivalent to a
                15-bit depth buffer.</para>
            <para>Even so, the difference between 16 bits and 15 bits is not that great. Instead of
                looking at half of NDC space, let's look at half of the
                <emphasis>precision.</emphasis> So, what is the camera-space range at which you
                lose half of your precision?</para>
            <para>For a 16-bit depth buffer, half-precision is 8 bits. In fixed-point, if the near
                value is 0 and the far is 65535 (representing 1.0), then half-precision happens when
                the first 8 bits are all ones. This value is 65280 (65535 - 255). As a
                floating-point value, this represents a value of ~0.996. In NDC space, this is a Z
                value of ~0.992.</para>
            <para>So what is the camera-space range at which you lose half precision? If the camera
                depth range is [-500, -1000], then you get the half precision range of [-500, -996],
                which is over 99% of the camera-space range. What about [-1, -1000]? This comes out
                to [-1, -200], which is 20% of the range.</para>
            <para>Before we can assess the consequences of this, we must first discuss what the
                consequences are for low depth precision. Remember that the depth buffer exists to
                allow each fragment to have a depth value, such that if an incoming fragment is
                behind the already existing value, it is not written to the image.</para>
            <para>If the available precision is too small, then what happens is that part of one
                triangle will start showing through triangles that are supposed to be farther away.
                If the camera or these objects are in motion, horrible flickering artifacts can be
                seen. This is called <glossterm>z-fighting,</glossterm> as multiple objects appear
                to be fighting each other when animated.</para>
            <para>Fortunately, the days of 16-bit depth buffers are long over; the modern standard
                is (and has been for years now) 24-bits of precision. Half-precision of 24-bits is
                12-bits, which is not too far from a 16-bit depth buffer in and of itself. If you
                use a 24-bit depth buffer, it turns out that you lose half precision on a [-1,
                -1000] camera range at [-1, -891], which is 89% of the range. At a 1:10,000 ratio,
                you have 45% of the camera range in most of the precision. At 1:100,000 this drops
                to ~7%, and at 1:1,000,000 it is down to 0.8%.</para>
            <para>The most important question to be asked is this: is this bad? Not really.</para>
            <para>Let's take the 1:100,000 example. 7% may not sound like a lot, but this is still a
                range of [-1, -7573]. If these units are conceptually in inches, then you've got
                most of the precision sitting in the first 600+ feet.</para>
            <para>And let's see what happens if we move the zNear plane forward just
                <emphasis>four</emphasis> inches, to 5:100,000. The percentage jumps to almost
                30%, with half-precision happening at over 29,000 inches; that's a good half-mile.
                Increase the zNear to a mere 10 inches, and you have the equivalent of 1:10,000
                again: 45%. 10 inches may seem like a lot, but that's still less than a foot away
                from the eye. Depending on what you are rendering, this may be a perfectly
                legitimate trade-off.</para>
            <para>What this teaches us is that the absolute numbers don't matter: it is the ratio of
                zNear to zFar that dictates where you lose precision. 0.1:1000 is just as bad as
                1:10,000. So push the zNear distance forward as far as you can. What happens if you
                push it too far? That's the next section.</para>
            <section>
                <title>Large Camera Depth Ranges</title>
                <para>You may ask what to do if you really need a wide camera depth range, like
                    1:4,000,000 or something, where each unit represents an inch or something
                    equally small.</para>
                <para>First, it needs to be pointed out that a 24-bit depth buffer only goes from 0
                    to 16,777,215. Even if the depth values were evenly distributed, you would only
                    get a resolution of 1/4th of an inch.</para>
                <para>Second, this range is starting to come perilously close to the issues with
                    <emphasis>floating-point</emphasis> precision. Yes, this still provides a
                    lot of precision, but remember: the depth range is for the current view. This
                    means that your world is probably much larger than this. If you're getting
                    numbers that large, you may need to start worrying about floating-point
                    precision error in computing these positions. There are certainly ways around it
                    (and we will discuss some later), but if you need a camera-space range that
                    large, you may run into other problems at the same time.</para>
                <para>Third, most applications render lower-quality models when objects are far
                    away. This is mainly for the purpose of focusing performance where the user
                    needs it: the things closest to him. If some of the z-fighting comes from
                    overlap within a model, a lower-detail model without those overlapping parts can
                    help reduce z-fighting as well.</para>
                <para>Fourth, you usually really, <emphasis>really</emphasis> need that precision
                    up-close. If you think z-fighting looks bad when it happens with a distant
                    object, imagine how bad it will look if it's up in your face. Even if you could
                    make the z-values linear, it could cause problems in near objects.</para>
                <para>Fifth, if you really need a camera range this large, you can play some tricks
                    with the depth range. But only do this if you actually do get z-fighting; don't
                    simply do it because you have a large camera range.</para>
                <para>The camera range defines how the perspective matrix transforms the Z to
                    clip-space and therefore NDC space. The <emphasis>depth</emphasis> range defines
                    what part of the [0, 1] range of window coordinates that the NDC depth maps to.
                    So you can draw the front half of your scene into the [0, 0.5] depth range with
                    a camera range like [-1, -2,000,000]. Then, you can draw the back half of the
                    scene in the [0.5, 1] depth range, with a camera range of [-2,000,000,
                    -4,000,000]. Dividing it in half like this isn't very fair to your front
                    objects, so it's more likely that you would want to use something like [-1,
                    -10,000] for the front half and [-10,000, -4,000,000] for the second. Each of
                    these would still map to half of the depth range.</para>
                <para>Objects that lie on the border between the split would have to be rendered
                    into both, just to make sure their depth values show up properly.</para>
            </section>
        </section>
        -->
    <section>
        <?dbhtml filename="Tut05 In Review.html" ?>
        <title>In Review</title>
        <para>In this tutorial, you have learned about the following:</para>
        <itemizedlist>
            <listitem>
                <para>Vertex array objects encapsulate all of the state necessary to render objects.
                    This includes vertex attribute arrays, buffer objects to feed those arrays, and
                    the element buffer, if present.</para>
            </listitem>
            <listitem>
                <para>Indexed rendering pulls data from the current vertex arrays by using a
                    separate sequence of indices. The sequence of indices defines the sequence that
                    OpenGL sees the vertices in. Indexed rendering can be performed by storing index
                    data in a buffer object, and using <function>glDrawElements</function>.</para>
            </listitem>
            <listitem>
                <para>Indices in indexed rendering calls can be offset by a value using the
                        <function>glDrawElementsBaseVertex</function> function.</para>
            </listitem>
            <listitem>
                <para>Hidden surface elimination can be performed by using a depth buffer and
                    properly setting up the depth test.</para>
            </listitem>
            <listitem>
                <para>Triangles that are outside of the camera zNear and zFar range are clipped
                    against this range. This can open up holes in models if they are too close to
                    the camera.</para>
            </listitem>
            <listitem>
                <para>Clipping holes can be repaired to a degree by activating depth clamping, so long as there
                    is no overlap. And as long as the triangles don't extend beyond 0 in camera space.</para>
            </listitem>
			<!--TODO: Reinstate this.-->
			<!--
            <listitem>
                <para>Depth buffers have a finite precision, and this can cause z-fighting.
                    Z-fighting can be repaired by moving the Camera zNear forward, or moving objects
                    farther apart.</para>
            </listitem>
			-->
        </itemizedlist>
        <section>
            <title>OpenGL Functions of Note</title>
            <glosslist>
                <glossentry>
                    <glossterm>glGenVertexArrays/glDeleteVertexArrays</glossterm>
                    <glossdef>
                        <para>Creates/destroys one or more vertex array objects.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glBindVertexArray</glossterm>
                    <glossdef>
                        <para>Binds a vertex array object to the <literal>GL_VERTEX_ARRAY</literal>
                            target.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glDrawElements</glossterm>
                    <glossdef>
                        <para>Performs indexed rendering with the currently bound
                                <literal>GL_ELEMENT_ARRAY_BUFFER</literal> (provided via the VAO)
                            and the current attribute arrays.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glDrawElementsBaseVertex</glossterm>
                    <glossdef>
                        <para>Performs indexed rendering as <function>glDrawElements</function>,
                            except that each element index is offset by a constant value before
                            performing the array lookup. This is useful for minimizing the number of
                            buffer object binds performed in a program.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glEnable/glDisable(GL_DEPTH_TEST)</glossterm>
                    <glossdef>
                        <para>Enables/disables the per-fragment depth test. If the depth test is
                            enabled, then the result of applying the depth function, set by
                                <function>glDepthFunc</function>, to the incoming fragment's depth
                            and the destination pixel's depth will determine if the incoming
                            fragment is written or not.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glDepthMask</glossterm>
                    <glossdef>
                        <para>Sets or unsets the writing of values to the depth buffer.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glDepthFunc</glossterm>
                    <glossdef>
                        <para>Sets the depth comparison function for depth testing. Has no effect if
                                <literal>GL_DEPTH_TEST</literal> is not enabled.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glDepthRange</glossterm>
                    <glossdef>
                        <para>Sets the mapping between NDC space and window space for the Z
                            coordinate of the position. The XY counterpart to this function is
                                <function>glViewport</function>. The range for the window-space
                            depth must be [0, 1], though the near does not have to be less than the
                            far. The range zNear value, the first value, is the window-space value
                            that will map to -1 in NDC space. The range zFar is the window-space
                            value that maps to +1 in NDC space.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glClearDepth</glossterm>
                    <glossdef>
                        <para>Sets the clear depth value. This is the value that the depth buffer
                            will be cleared to when calling <function>glClear</function> with the
                                <literal>GL_DEPTH_BUFFER_BIT</literal> bit set.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>glEnable/glDisable(GL_DEPTH_CLAMP)</glossterm>
                    <glossdef>
                        <para>Enables/disables depth clamping behavior. When enabled, clipping is
                            deactivated, and any fragments that an object would render that are
                            outside of the [-1, 1] range in NDC space are clamped to this
                            range.</para>
                    </glossdef>
                </glossentry>
            </glosslist>
        </section>
    </section>
    <section xml:id="Tut05_Glossary">
        <?dbhtml filename="Tut05 Glossary.html" ?>
        <title>Glossary</title>
        <glosslist>
            <glossentry>
                <glossterm>vertex array object (VAO)</glossterm>
                <glossdef>
                    <para>Vertex array objects are OpenGL Objects that store all of the state needed
                        to make one or more draw calls. This includes attribute array setup
                        information (from <function>glVertexAttribArray</function>), buffer objects
                        used for attribute arrays, and the
                            <literal>GL_ELEMENT_ARRAY_BUFFER</literal> binding, which is a buffer
                        object that stores the index arrays, if needed.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>array drawing</glossterm>
                <glossdef>
                    <para>Rendering a contiguous range of vertices pulled from the currently bound
                        attribute arrays (within the vertex array object). The vertices are sent in
                        order from first to last in the range.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>indexed drawing</glossterm>
                <glossdef>
                    <para>Rendering an arbitrary set of vertices pulled from the currently bound
                        attribute arrays. The set of vertices is defined by the element array. The
                        vertices are rendered in the order specified by the element array.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>element array, index array</glossterm>
                <glossdef>
                    <para>A list of indices, stored within a buffer object, that refer to elements
                        in the currently bound attribute arrays.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>hidden surface elimination</glossterm>
                <glossdef>
                    <para>The ability to render a scene such that objects that are behind other
                        objects do not show through them. There are several methods available for
                        achieving this.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>depth sorting</glossterm>
                <glossdef>
                    <para>Rendering objects or triangles in an order based on their Z-depth from the
                        camera. An attempt at hidden surface elimination.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>depth buffer, z-buffer</glossterm>
                <glossdef>
                    <para>An image in the framebuffer that conceptually stores the distance of the
                        pixel from the camera zNear plane. The depth buffer stores only
                        one-dimensional values, instead of the 4-dimensional colors of the regular
                        image buffer. Depth values are usually restricted to the range [0,
                        1].</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>depth test</glossterm>
                <glossdef>
                    <para>The process of testing the incoming fragment's depth value against the
                        depth value from the depth buffer for the pixel that the fragment would
                        overwrite. If the test passes, then the fragment is written. If the test
                        fails, the fragment is not written. This, combined with a depth buffer, can
                        be used as a good method of hidden surface elimination.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>range zNear, range zFar</glossterm>
                <glossdef>
                    <para>The mapping from NDC-space Z coordinate [-1, 1] to window-space Z
                        coordinates [0, 1]. This mapping is set with the
                            <function>glDepthRange</function> function. These are specified in
                        window-space coordinates. The -1 Z coordinate in NDC space maps to range
                        zNear, and the 1 maps to range zFar. The range zNear does not have to be
                        less than the range zFar.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>early depth test, early-z</glossterm>
                <glossdef>
                    <para>An optimization in the depth test, where the incoming fragment's depth
                        value is tested <emphasis>before</emphasis> the fragment shader executes. If
                        the fragment shader is long, this can save a great deal of time. If the
                        fragment shader exercises the option to modify or replace the fragment's
                        depth, then the early depth test optimization will not be active.</para>
                </glossdef>
            </glossentry>
			<!--TODO: Reinstate this.-->
			<!--
            <glossentry>
                <glossterm>z-fighting</glossterm>
                <glossdef>
                    <para>Happens when the window-space Z values for two surfaces are sufficiently
                        close together that part of one shows through a surface that it shouldn't.
                        This is usually due to a lack of depth buffer precision. The common remedy
                        is to try to move the camera zNear further from 0.</para>
                </glossdef>
            </glossentry>
			-->
            <glossentry>
                <glossterm>homogeneous coordinate system</glossterm>
                <glossdef>
                    <para>A 4-dimensional coordinate system used to represent a 3-dimensional
                        position. To compute the 3D position, the fourth coordinate is divided into
                        the other 3. This kind of coordinate system allows mathematics to function
                        in the presence of what would be undefined values otherwise. Namely division
                        by zero.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>clipping</glossterm>
                <glossdef>
                    <para>The act of breaking a single triangle into one or more smaller ones so
                        that they all fit within the visible region. Actual clipping, generating new
                        vertices and such, is not often done by hardware; instead they usually try
                        to cull fragments that are outside of the viewing area.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>depth clamping</glossterm>
                <glossdef>
                    <para>A rendering mode where clipping is turned off and the Z value of fragments
                        is clamped to the depth range. This is used to prevent clipping from
                        punching holes in objects, though it is not a foolproof solution.</para>
                </glossdef>
            </glossentry>
        </glosslist>
    </section>
</chapter>