Source

gltut / Documents / Illumination / Tutorial 13.xml

Full commit
   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
<?xml version="1.0" encoding="UTF-8"?>
<?oxygen RNGSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng" type="xml"?>
<?oxygen SCHSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng"?>
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
    <?dbhtml filename="Tutorial 13.html" ?>
    <title>Lies and Impostors</title>
    <para>Lighting in these tutorials has ultimately been a form of deception. An increasingly
        accurate one, but it is deception all the same. We are not rendering round objects; we
        simply use lighting and interpolation of surface characteristics to make an object appear
        round. Sometimes we have artifacts or optical illusions that show the lie for what it is.
        Even when the lie is near-perfect, the geometry of a model still does not correspond to what
        the lighting makes the geometry appear to be.</para>
    <para>In this tutorial, we will be looking at the ultimate expression of this lie. We will use
        lighting computations to make an object appear to be something entirely different from its
        geometry.</para>
    <section>
        <?dbhtml filename="Tut13 Simple Sham.html" ?>
        <title>Simple Sham</title>
        <para>We want to render a sphere. We could do this as we have done in previous tutorials.
            That is, generate a mesh of a sphere and render it. But this will never be a
            mathematically perfect sphere. It is easy to generate a sphere with an arbitrary number
            of triangles, and thus improve the approximation. But it will always be an
            approximation.</para>
        <para>Spheres are very simple, mathematically speaking. They are simply the set of points in
            a space that are a certain distance from a specific point. This sounds like something we
            might be able to compute in a shader.</para>
        <para>Our first attempt to render a sphere will be quite simple. We will use the vertex
            shader to compute the vertex positions of a <emphasis>square</emphasis> in clip-space.
            This square will be in the same position and width/height as the actual circle would be,
            and it will always face the camera. In the fragment shader, we will compute the position
            and normal of each point along the sphere's surface. By doing this, we can map each
            point on the square to a point on the sphere we are trying to render. This square is
            commonly called a <glossterm>flat card</glossterm> or
            <glossterm>billboard</glossterm>.</para>
        <para>For those points on the square that do not map to a sphere point (ie: the corners), we
            have to do something special. Fragment shaders are required to write a value to the
            output image. But they also have the ability to abort processing and write neither color
            information nor depth to the color and depth buffers. We will employ this to draw our
            square-spheres.</para>
        <para>This technique is commonly called <glossterm>impostors.</glossterm> The idea is that
            we're actually drawing a square, but we use the fragment shaders to make it look like
            something else. The geometric shape is just a placeholder, a way to invoke the fragment
            shader over a certain region of the screen. The fragment shader is where the real magic
            happens.</para>
        <para>The tutorial project <phrase role="propername">Basic Impostor</phrase> demonstrates
            this technique. It shows a scene with several spheres, a directional light, and a moving
            point light source.</para>
        <figure>
            <title>Basic Impostor</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Basic%20Impostor.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>The camera movement is controlled in the same way as previous tutorials. The
                <keycap>T</keycap> key will toggle a display showing the look-at point. The
                <keycap>-</keycap> and <keycap>=</keycap> keys will rewind and fast-forward the
            time, and the <keycap>P</keycap> key will toggle pausing of the time advancement.</para>
        <para>The tutorial starts showing mesh spheres, to allow you to switch back and forth
            between actual meshes and impostor spheres. Each sphere is independently
            controlled:</para>
        <table frame="all">
            <title>Sphere Impostor Control Key Map</title>
            <tgroup cols="2">
                <colspec colname="c1" colnum="1" colwidth="1.0*"/>
                <colspec colname="c2" colnum="2" colwidth="1.0*"/>
                <thead>
                    <row>
                        <entry>Key</entry>
                        <entry>Sphere</entry>
                    </row>
                </thead>
                <tbody>
                    <row>
                        <entry><keycap>1</keycap></entry>
                        <entry>The central blue sphere.</entry>
                    </row>
                    <row>
                        <entry><keycap>2</keycap></entry>
                        <entry>The orbiting grey sphere.</entry>
                    </row>
                    <row>
                        <entry><keycap>3</keycap></entry>
                        <entry>The black marble on the left.</entry>
                    </row>
                    <row>
                        <entry><keycap>4</keycap></entry>
                        <entry>The gold sphere on the right.</entry>
                    </row>
                </tbody>
            </tgroup>
        </table>
        <para>This tutorial uses a rendering setup similar to the last one. The shaders use uniform
            blocks to control most of the uniforms. There is a shared global lighting uniform block,
            as well as one for the projection matrix.</para>
        <section>
            <title>Grifting Geometry</title>
            <para>The way this program actually renders the geometry for the impostors is
                interesting. The vertex shader looks like this:</para>
            <example>
                <title>Basic Impostor Vertex Shader</title>
                <programlisting language="glsl">#version 330

layout(std140) uniform;

out vec2 mapping;

uniform Projection
{
	mat4 cameraToClipMatrix;
};

uniform float sphereRadius;
uniform vec3 cameraSpherePos;

void main()
{
    vec2 offset;
    switch(gl_VertexID)
    {
    case 0:
        //Bottom-left
        mapping = vec2(-1.0, -1.0);
        offset = vec2(-sphereRadius, -sphereRadius);
        break;
    case 1:
        //Top-left
        mapping = vec2(-1.0, 1.0);
        offset = vec2(-sphereRadius, sphereRadius);
        break;
    case 2:
        //Bottom-right
        mapping = vec2(1.0, -1.0);
        offset = vec2(sphereRadius, -sphereRadius);
        break;
    case 3:
        //Top-right
        mapping = vec2(1.0, 1.0);
        offset = vec2(sphereRadius, sphereRadius);
        break;
    }
    
    vec4 cameraCornerPos = vec4(cameraSpherePos, 1.0);
    cameraCornerPos.xy += offset;
    
    gl_Position = cameraToClipMatrix * cameraCornerPos;
}</programlisting>
            </example>
            <para>Notice anything missing? There are no input variables declared anywhere in this
                vertex shader.</para>
            <para>It does still use an input variable: <varname>gl_VertexID</varname>. This is a
                built-in input variable; it contains the current index of this particular vertex.
                When using array rendering, it's just the count of the vertex we are in. When using
                indexed rendering, it is the index of this vertex.</para>
            <para>When we render this mesh, we render 4 vertices as a
                    <literal>GL_TRIANGLE_STRIP</literal>. This is rendered in array rendering mode,
                so the <varname>gl_VertexID</varname> will vary from 0 to 3. Our switch/case
                statement determines which vertex we are rendering. Since we're trying to render a
                square with a triangle strip, the order of the vertices needs to be appropriate for
                this.</para>
            <para>After computing which vertex we are trying to render, we use the radius-based
                offset as a bias to the camera-space sphere position. The Z value of the sphere
                position is left alone, since it will always be correct for our square. After that,
                we transform the camera-space position to clip-space as normal.</para>
            <para>The output <varname>mapping</varname> is a value that is used by the fragment
                shader, as we will see below.</para>
            <para>Since this vertex shader takes no inputs, you might think that you could get away
                with binding a vertex array object that had no enabled attributes. Alas, this does
                not work; we must have a dummy attribute enabled and a dummy buffer object to pull
                data from. We do this in the initialization function of this tutorial.</para>
            <note>
                <para>The OpenGL 3.3 core specification is quite clear that it should be possible to
                    render with no enabled attributes. Sadly, certain implementations of OpenGL
                    (*cough*AMD*cough*) incorrectly forbid it, so our tutorial has to work around
                    them.</para>
            </note>
        </section>
        <section>
            <title>Racketeering Rasterization</title>
            <para>Our lighting equations in the past needed only a position and normal in
                camera-space (as well as other material and lighting parameters) in order to work.
                So the job of the fragment shader is to provide them. Even though they do not
                correspond to those of the actual triangles in any way.</para>
            <para>Here are the salient new parts of the fragment shader for impostors:</para>
            <example>
                <title>Basic Impostor Fragment Shader</title>
                <programlisting language="glsl">in vec2 mapping;

void Impostor(out vec3 cameraPos, out vec3 cameraNormal)
{
    float lensqr = dot(mapping, mapping);
    if(lensqr > 1.0)
        discard;
    	
    cameraNormal = vec3(mapping, sqrt(1.0 - lensqr));
    cameraPos = (cameraNormal * sphereRadius) + cameraSpherePos;
}

void main()
{
    vec3 cameraPos;
    vec3 cameraNormal;
    
    Impostor(cameraPos, cameraNormal);
    
    vec4 accumLighting = Mtl.diffuseColor * Lgt.ambientIntensity;
    for(int light = 0; light &lt; numberOfLights; light++)
    {
        accumLighting += ComputeLighting(Lgt.lights[light],
            cameraPos, cameraNormal);
    }
    
    outputColor = sqrt(accumLighting); //2.0 gamma correction
}</programlisting>
            </example>
            <para>In order to compute the position and normal, we first need to find the point on
                the sphere that corresponds with the point on the square that we are currently on.
                And to do that, we need a way to tell where on the square we are.</para>
            <para>Using <varname>gl_FragCoord</varname> will not help, as it is relative to the
                entire screen. We need a value that is relative only to the impostor square. That is
                the purpose of the <varname>mapping</varname> variable. When this variable is at (0,
                0), we are in the center of the square, which is the center of the sphere. When it
                is at (-1, -1), we are at the bottom left corner of the square.</para>
            <para>Given this, we can now compute the sphere point directly <quote>above</quote> the
                point on the square, which is the job of the <function>Impostor</function>
                function.</para>
            <para>Before we can compute the sphere point however, we must make sure that we are
                actually on a point that has the sphere above it. This requires only a simple
                distance check. Since the size of the square is equal to the radius of the sphere,
                if the distance of the <varname>mapping</varname> variable from its (0, 0) point is
                greater than 1, then we know that this point is off of the sphere.</para>
            <para>Here, we use a clever way of computing the length; we do not. Instead, we compute
                the square of the length. We know that if <inlineequation>
                    <mathphrase>X<superscript>2</superscript> >
                        Y<superscript>2</superscript></mathphrase>
                </inlineequation> is true, then <inlineequation>
                    <mathphrase>X > Y</mathphrase>
                </inlineequation> must also be true for all positive real numbers X and Y. So we
                just do the comparison as squares, rather than taking a square-root to find the true
                length.</para>
            <para>If the point is not under the sphere, we execute something new:
                    <literal>discard</literal>. The <literal>discard</literal> keyword is unique to
                fragment shaders. It tells OpenGL that the fragment is invalid and its data should
                not be written to the image or depth buffers. This allows us to carve out a shape in
                our flat square, turning it into a circle.</para>
            <sidebar>
                <title>A Word on Discard</title>
                <para>Using <literal>discard</literal> sounds a lot like throwing an exception.
                    Since the fragment's outputs will be ignored and discarded, you might expect
                    that executing this instruction will cause the fragment shader to stop
                    executing. This is not necessarily the case.</para>
                <para>Due to the way that shaders tend to work, multiple executions of the same
                    shader are often operating at the same time. All of them are running in
                    lock-step with one another; they all execute instructions at the same time, just
                    on different datasets. If one of them does a discard, it still has to keep doing
                    what it was doing, because the other three may not have discarded, since the
                    discard was based on data that may be different between each shader. This is
                    also why branches in shaders will often execute both sides rather than actually
                    branching; it keeps the shader logic simpler.</para>
                <para>However, that does not mean <literal>discard</literal> is without use for
                    stopping unwanted processing. If all of the shaders that are running together
                    hit a <literal>discard</literal>, then they can all be aborted with no problems.
                    And hardware often does this where possible. So if there is a great deal of
                    spatial coherency with discard, this is useful.</para>
            </sidebar>
            <para>The computation of the normal is based on simple trigonometry. The normal of a
                sphere does not change based on the sphere's radius. Therefore, we can compute the
                normal in the space of the mapping, which uses a normalized sphere radius of 1. The
                normal of a sphere at a point is in the same direction as the direction from the
                sphere's center to that point on the surface.</para>
            <para>Let's look at the 2D case. To have a 2D vector direction, we need an X and Y
                coordinate. If we only have the X, but we know that the vector has a certain length,
                then we can compute the Y component of the vector based on the Pythagorean
                theorem:</para>
            <figure>
                <title>Circle Point Computation</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="CirclePythagorean.svg" />
                    </imageobject>
                </mediaobject>
            </figure>
            <informalfigure>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="PythagoreanTheorem.svg" />
                    </imageobject>
                </mediaobject>
            </informalfigure>
            <para>We simply use the 3D version of this. We have X and Y from
                    <varname>mapping</varname>, and we know the length is 1.0. So we compute the Z
                value easily enough. And since we are only interested in the front-side of the
                sphere, we know that the Z value must be positive.</para>
            <para>Computing the position is also easy. The position of a point on the surface of a
                sphere is the normal at that position scaled by the radius and offset by the center
                point of the sphere.</para>
            <para>One final thing. Notice the square-root at the end, being applied to our
                accumulated lighting. This effectively simulates a gamma of 2.0, but without the
                expensive <function>pow</function> function call. A <function>sqrt</function> call
                is much less expensive and far more likely to be directly built into the shader
                hardware. Yes, this is not entirely accurate, since most displays simulate the 2.2
                gamma of CRT displays. But it's a lot less inaccurate than applying no correction at
                all. We'll discuss a much cheaper way to apply proper gamma correction in future
                tutorials.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut13 Correct Chicanery.html" ?>
        <title>Correct Chicanery</title>
        <para>Our perfect sphere looks pretty nice. It has no polygonal outlines and you can zoom in
            on it forever. However, it is unfortunately very wrong.</para>
        <para>To see how, toggle back to rendering the mesh on sphere <keycap>1</keycap> (the
            central blue one). Then move the camera so that the sphere is at the left edge of the
            screen. Then toggle back to impostor rendering.</para>
        <figure>
            <title>Bad Impostor</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Impostor%20No%20Perspective.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>What's going on here? The mesh sphere seems to be wider than the impostor sphere. This
            must mean that the mesh sphere is doing something our impostor is not. Does this have to
            do with the inaccuracy of the mesh sphere?</para>
        <para>Quite the opposite, in fact. The mesh sphere is correct. The problem is that our
            impostor is too simple.</para>
        <para>Look back at how we did our computations. We map a sphere down to a flat surface. The
            problem is that <quote>down</quote> in this case is in the camera-space Z direction. The
            mapping between the surface and the sphere is static; it does not change based on the
            viewing angle.</para>
        <para>Consider this 2D case:</para>
        <figure>
            <title>Circle Projection</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="CircleInPerspective.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>The dark line through the circle represents the square we drew. When viewing the
            sphere off to the side like this, we should not be able to see the left-edge of the
            sphere facing perpendicular to the camera. And we should see some of the sphere on the
            right that is behind the plane.</para>
        <para>So how do we solve this?</para>
        <para>Use better math. Our last algorithm is a decent approximation if the spheres are
            somewhat small. But if the spheres are reasonably large (which also can mean close to
            the camera), then our approximation is shown to be very fake. Our new algorithm needs to
            take this into account.</para>
        <para>This algorithm is based on a term you may have heard before: <glossterm>ray
                tracing.</glossterm> We will not be implementing a full ray tracing algorithm here;
            instead, we will use it solely to get the position and normal of a sphere at a certain
            point.</para>
        <para>A ray is a direction and a position; it represents a line extending from the position
            along that direction. The points on the ray can be expressed as the following
            equation:</para>
        <equation>
            <title>Ray Equation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayEquation.svg" />
                </imageobject>
            </mediaobject>
        </equation>
        <para>The <varname>t</varname> value can be positive or negative, but for our needs, we'll
            stick with positive values.</para>
        <para>For each fragment, we want to create a ray from the camera position in the direction
            towards that point on the impostor square. Then we want to detect the point on the
            sphere that it hits, if any. If the ray intersects the sphere, then we use that point
            and normal for our lighting equation.</para>
        <para>The math for this is fairly simple. The equation for the points on a sphere is
            this:</para>
        <equation>
            <title>Sphere Equation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="SphereEquation.svg" />
                </imageobject>
            </mediaobject>
        </equation>
        <para>For any point P, if this equation is true, if the length between that point and the
            sphere's center equals the radius, then P is on the sphere. So we can substitute our ray
            equation for P:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayTraceDeriv_1.svg" />
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>Our ray goes from the camera into the scene. Since we're in camera space, the camera
            is at the origin. So O can be eliminated from the equation. To solve for
                <varname>t</varname>, we need to get rid of that length. One way to do it is to
            re-express the sphere equation as the length squared. So then we get:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayTraceDeriv_2.svg" />
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>The square of the length of a vector is the same as that vector dot-producted with
            itself. So let's do that:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayTraceDeriv_3.svg" />
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>The dot product is distributive. Indeed, it follows most of the rules of scalar
            multiplication. This gives us:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayTraceDeriv_4.svg" />
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>While this equation has a lot of vector elements in it, as far as t is concerned, it
            is a scalar equation. Indeed, it is a quadratic equation, with respect to t. Ah, good
            old algebra.</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="RayTraceDeriv_5.svg" />
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>In case you've forgotten, the part under the square root in the quadratic formula is
            called the discriminant. If this value is negative, then the equation has no solution.
            In terms of our ray test, this means the ray misses the sphere.</para>
        <para>As you may recall, the square root can be either positive or negative. This gives us
            two t values. Which makes sense; the ray hits the sphere in two places: once going in,
            and once coming out. The correct t value that we're interested in is the smallest one.
            Once we have that, we can use the ray equation to compute the point. With the point and
            the center of the sphere, we can compute the normal. And we're back in business.</para>
        <section>
            <title>Extorting and Expanding</title>
            <para>To see this done, open up the last tutorial project. Since they use the exact same
                source, and since they use the same uniforms and other interfaces for their shaders,
                there was no need to make another code project for it. To see the ray-traced
                version, press the <keycap>J</keycap> key; all impostors will use the perspective
                version. To go back to the flat version, press <keycap>L</keycap>.</para>
            <figure>
                <title>Bad vs. Good</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Bad%20vs%20Good%20Impostor2.png"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The top is the original impostor, the middle is the actual mesh, and the bottom is
                our new ray traced impostor.</para>
            <para>The <function>Impostor</function> function in the new fragment shader implements
                our ray tracing algorithm. More important than this are the changes to the vertex
                shader's computation of the impostor square:</para>
            <example>
                <title>Ray Traced Impostor Square</title>
                <programlisting language="glsl">const float g_boxCorrection = 1.5;

void main()
{
    vec2 offset;
    switch(gl_VertexID)
    {
    case 0:
        //Bottom-left
        mapping = vec2(-1.0, -1.0) * g_boxCorrection;
        offset = vec2(-sphereRadius, -sphereRadius);
        break;
    case 1:
        //Top-left
        mapping = vec2(-1.0, 1.0) * g_boxCorrection;
        offset = vec2(-sphereRadius, sphereRadius);
        break;
    case 2:
        //Bottom-right
        mapping = vec2(1.0, -1.0) * g_boxCorrection;
        offset = vec2(sphereRadius, -sphereRadius);
        break;
    case 3:
        //Top-right
        mapping = vec2(1.0, 1.0) * g_boxCorrection;
        offset = vec2(sphereRadius, sphereRadius);
        break;
    }

    vec4 cameraCornerPos = vec4(cameraSpherePos, 1.0);
    cameraCornerPos.xy += offset * g_boxCorrection;
    
    gl_Position = cameraToClipMatrix * cameraCornerPos;
}</programlisting>
            </example>
            <para>We have expanded the size of the square by 50%. What is the purpose of this? Well,
                let's look at our 2D image again.</para>
            <informalfigure>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="CircleInPerspective.svg" />
                    </imageobject>
                </mediaobject>
            </informalfigure>
            <para>The black line represents the square we used originally. There is a portion to the
                left of the projection that we should be able to see. However, with proper ray
                tracing, it would not fit onto the area of the radius-sized square.</para>
            <para>This means that we need to expand the size of the square. Rather than finding a
                clever way to compute the exact extent of the sphere's area projected onto a square,
                it's much easier to just make the square bigger. This is even moreso considering
                that such math would have to take into account things like the viewport and the
                perspective matrix. Sure, we will end up running the rasterizer rather more than
                strictly necessary. But it's overall much simpler.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut13 Deceit in Depth.html" ?>
        <title>Deceit in Depth</title>
        <para>While the perspective version looks great, there remains one problem. Move the time
            around until the rotating grey sphere ducks underneath the ground.</para>
        <figure>
            <title>Bad Intersection</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Bad%20Impostor%20Intersection.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Hmm. Even though we've made it look like a mathematically perfect sphere, it does not
            act like one to the depth buffer. As far as it is concerned, it's just a circle
            (remember: <literal>discard</literal> prevents depth writes and tests as well).</para>
        <para>Is that the end for our impostors? Hardly.</para>
        <para>Part of the fragment shader's output is a depth value. If you do not write one, then
            OpenGL will happily use <varname>gl_FragCoord.z</varname> as the depth output from the
            fragment shader. This value will be depth tested against the current depth value and, if
            the test passes, written to the depth buffer.</para>
        <para>But we do have the ability to write a depth value ourselves. To see how this is done,
            load up the tutorial (using the same code again) and press the <keycap>H</keycap> key.
            This will cause all impostors to use depth-correct shaders.</para>
        <figure>
            <title>Depth Correct Impostor</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Depth%20Correct%20Impostor.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>This shader is identical to the ray traced version, except for these lines in the
            fragment shader:</para>
        <example>
            <title>Depth Correct Fragment Shader</title>
            <programlisting language="glsl">Impostor(cameraPos, cameraNormal);
	
//Set the depth based on the new cameraPos.
vec4 clipPos = cameraToClipMatrix * vec4(cameraPos, 1.0);
float ndcDepth = clipPos.z / clipPos.w;
gl_FragDepth = ((gl_DepthRange.diff * ndcDepth) +
    gl_DepthRange.near + gl_DepthRange.far) / 2.0;</programlisting>
        </example>
        <para>Basically, we go through the process OpenGL normally goes through to compute the
            depth. We just do it on the camera-space position we computed with the ray tracing
            function. The position is transformed to clip space. The perspective division happens,
            transforming to normalized device coordinate (<acronym>NDC</acronym>) space. The depth
            range function is applied, forcing the [-1, 1] range in the fragment shader to the range
            that the user provided with <function>glDepthRange.</function></para>
        <para>We write the final depth to a built-in output variable
                <varname>gl_FragDepth.</varname></para>
        <section>
            <section>
                <sidebar>
                    <title>Fragments and Depth</title>
                    <para>The default behavior of OpenGL is, if a fragment shader does not write to
                        the output depth, then simply take the <varname>gl_FragCoord.z</varname>
                        depth as the depth of the fragment. Oh, you could do this manually. One
                        could add the following statement to any fragment shader that uses the
                        default depth value:</para>
                    <programlisting language="glsl">gl_FragDepth = gl_FragCoord.z</programlisting>
                    <para>This is, in terms of behavior a noop; it does nothing OpenGL would not have
                        done itself. However, in terms of <emphasis>performance</emphasis>, this is
                        a drastic change.</para>
                    <para>The reason fragment shaders are not required to have this line in all of
                        them is to allow for certain optimizations. If the OpenGL driver can see
                        that you do not set <varname>gl_FragDepth</varname> anywhere in the fragment
                        shader, then it can dramatically improve performance in certain
                        cases.</para>
                    <para>If the driver knows that the output fragment depth is the same as the
                        generated one, it can do the whole depth test <emphasis>before</emphasis>
                        executing the fragment shader. This is called <glossterm>early depth
                            test</glossterm> or <glossterm>early-z</glossterm>. This means that it
                        can discard fragments <emphasis>before</emphasis> wasting precious time
                        executing potentially complex fragment shaders. Indeed, most hardware
                        nowadays has complicated early z culling hardware that can discard multiple
                        fragments with a single test.</para>
                    <para>The moment your fragment shader writes anything to
                            <varname>gl_FragDepth</varname>, all of those optimizations have to go
                        away. So generally, you should only write a depth value yourself if you
                            <emphasis>really</emphasis> need to do it.</para>
                    <para>Also, if your shader writes <varname>gl_FragDepth</varname> anywhere, it
                        must ensure that it is <emphasis>always</emphasis> written to, no matter
                        what conditional branches your shader uses. The value is not initialized to
                        a default; you either always write to it or never mention
                                <quote><varname>gl_FragDepth</varname></quote> in your fragment
                        shader at all. Obviously, you do not always have to write the same value; you
                        can conditionally write different values. But you cannot write something in
                        one path and not write something in another. Initialize it explicitly with
                            <varname>gl_FragCoord.z</varname> if you want to do something like
                        that.</para>
                </sidebar>
            </section>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut13 Purloined Primitives.html" ?>
        <title>Purloined Primitives</title>
        <para>Our method of rendering impostor spheres is very similar to our method of rendering
            mesh spheres. In both cases, we set uniforms that define the sphere's position and
            radius. We bind a material uniform buffer, then bind a VAO and execute a draw command.
            We do this for each sphere.</para>
        <para>However, this seems rather wasteful for impostors. Our per-vertex data for the
            impostor is really the position and the radius. If we could somehow send this data 4
            times, once for each square, then we could simply put all of our position and radius
            values in a buffer object and render every sphere in one draw call. Of course, we would
            also need to find a way to tell it which material to use.</para>
        <para>We accomplish this task in the <phrase role="propername">Geometry Impostor</phrase>
            tutorial project. It looks exactly the same as before; it always draws impostors, using
            the depth-accurate shader.</para>
        <section>
            <title>Impostor Interleaving</title>
            <para>To see how this works, we will start from the front of the rendering pipeline and
                follow the data. This begins with the buffer object and vertex array object we use
                to render.</para>
            <example>
                <title>Impostor Geometry Creation</title>
                <programlisting language="cpp">glBindBuffer(GL_ARRAY_BUFFER, g_imposterVBO);
glBufferData(GL_ARRAY_BUFFER, NUMBER_OF_SPHERES * 4 * sizeof(float), NULL, GL_STREAM_DRAW);

glGenVertexArrays(1, &amp;g_imposterVAO);
glBindVertexArray(g_imposterVAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)(0));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_FLOAT, GL_FALSE, 4 * sizeof(float), (void*)(12));

glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);</programlisting>
            </example>
            <para>This code introduces us to a new feature of
                    <function>glVertexAttribPointer</function>. In all prior cases the fifth
                parameter was 0. Now it is <literal>4 * sizeof(float)</literal>. What does this
                parameter mean?</para>
            <para>This parameter is the array's <varname>stride</varname>. It is the number of bytes
                from one value for this attribute to the next in the buffer. When this parameter is
                0, that means that the actual stride is the size of the base type
                    (<literal>GL_FLOAT</literal> in our case) times the number of components. When
                the stride is non-zero, it must be larger than that value.</para>
            <para>What this means for our vertex data is that the first 3 floats represent attribute
                0, and the next float represents attribute 1. The next 3 floats is attribute 0 of
                the next vertex, and the float after that is attribute 1 of that vertex. And so
                on.</para>
            <para>Arranging attributes of the same vertex alongside one another is called
                    <glossterm>interleaving</glossterm>. It is a very useful technique; indeed, for
                performance reasons, data should generally be interleaved where possible. One thing
                that it allows us to do is build our vertex data based on a struct:</para>
            <programlisting language="cpp">struct VertexData
{
    glm::vec3 cameraPosition;
    float sphereRadius;
};</programlisting>
            <para>Our vertex array object perfectly describes the arrangement of data in an array of
                    <classname>VertexData</classname> objects. So when we upload our positions and
                radii to the buffer object, we simply create an array of these structs, fill in the
                values, and upload them with <function>glBufferData</function>.</para>
        </section>
        <section>
            <title>Misnamed and Maligned</title>
            <para>So, our vertex data now consists of a position and a radius. But we need to draw
                four vertices, not one. How do we do that?</para>
            <para>We could replicate each vertex data 4 times and use some simple
                    <varname>gl_VertexID</varname> math in the vertex shader to figure out which
                corner we're using. Or we could get complicated and learn something new. That new
                thing is an entirely new programmatic shader stage: <glossterm>geometry
                    shaders</glossterm>.</para>
            <para>Our initial pipeline discussion ignored this shader stage, because it is an
                entirely optional part of the pipeline. If a program object does not contain a
                geometry shader, then OpenGL just does its normal stuff.</para>
            <para>The most confusing thing about geometry shaders is that they do not shade
                geometry. Vertex shaders take a vertex as input and write a vertex as output.
                Fragment shader take a fragment as input and potentially writes a fragment as
                output. Geometry shaders take a <emphasis>primitive</emphasis> as input and write
                zero or more primitives as output. By all rights, they should be called
                    <quote>primitive shaders.</quote></para>
            <para>In any case, geometry shaders are invoked just after the hardware that collects
                vertex shader outputs into a primitive, but before any clipping, transforming or
                rasterization happens. Geometry shaders get the values output from multiple vertex
                shaders, performs arbitrary computations on them, and outputs one or more sets of
                values to new primitives.</para>
            <para>In our case, the logic begins with our drawing call:</para>
            <programlisting language="cpp">glBindVertexArray(g_imposterVAO);
glDrawArrays(GL_POINTS, 0, NUMBER_OF_SPHERES);
glBindVertexArray(0);</programlisting>
            <para>This introduces a completely new primitive and primitive type:
                    <literal>GL_POINTS.</literal> Recall that multiple primitives can have the same
                base type. <literal>GL_TRIANGLE_STRIP</literal> and <literal>GL_TRIANGLES</literal>
                are both separate primitives, but both generate triangles.
                    <literal>GL_POINTS</literal> does not generate triangle primitives; it generates
                point primitives.</para>
            <para><literal>GL_POINTS</literal> interprets each individual vertex as a separate point
                primitive. There are no other forms of point primitives, because points only contain
                a single vertex worth of information.</para>
            <para>The vertex shader is quite simple, but it does have some new things to show
                us:</para>
            <example>
                <title>Vertex Shader for Points</title>
                <programlisting language="glsl">#version 330

layout(location = 0) in vec3 cameraSpherePos;
layout(location = 1) in float sphereRadius;

out VertexData
{
    vec3 cameraSpherePos;
    float sphereRadius
} outData;

void main()
{
	outData.cameraSpherePos = cameraSpherePos;
    outData.sphereRadius = sphereRadius;
}</programlisting>
            </example>
            <para><classname>VertexData</classname> is not a struct definition, though it does look
                like one. It is an <glossterm>interface block</glossterm> definition. Uniform blocks
                are a kind of interface block, but inputs and outputs can also have interface
                blocks.</para>
            <para>An interface block used for inputs and outputs is a way of collecting them into
                groups. One of the main uses for these is to separate namespaces of inputs and
                outputs using the interface name (<varname>outData</varname>, in this case). This
                allows us to use the same names for inputs as we do for their corresponding outputs.
                They do have other virtues, as we will soon see.</para>
            <para>Do note that this vertex shader does not write to <varname>gl_Position.</varname>
                That is not necessary when a vertex shader is paired with a geometry shader.</para>
            <para>Speaking of which, let's look at the global definitions of our geometry
                shader.</para>
            <example>
                <title>Geometry Shader Definitions</title>
                <programlisting language="glsl">#version 330
#extension GL_EXT_gpu_shader4 : enable

layout(std140) uniform;
layout(points) in;
layout(triangle_strip, max_vertices=4) out;

uniform Projection
{
    mat4 cameraToClipMatrix;
};

in VertexData
{
    vec3 cameraSpherePos;
    float sphereRadius;
} vert[];

out FragData
{
    flat vec3 cameraSpherePos;
    flat float sphereRadius;
    smooth vec2 mapping;
};</programlisting>
            </example>
            <note>
                <para>The <literal>#extension</literal> line exists to fix a compiler bug for
                    NVIDIA's OpenGL. It should not be necessary.</para>
            </note>
            <para>We see some new uses of the <literal>layout</literal> directive. The
                    <literal>layout(points) in</literal> command is geometry shader-specific. It
                tells OpenGL that this geometry shader is intended to take point primitives. This is
                required; also, OpenGL will fail to render if you try to draw something other than
                    <literal>GL_POINTS</literal> through this geometry shader.</para>
            <para>Similarly, the output layout definition states that this geometry shader outputs
                triangle strips. The <literal>max_vertices</literal> directive states that we will
                write at most 4 vertices. There are implementation defined limits on how large
                    <literal>max_vertices</literal> can be. Both of these declarations are required
                for geometry shaders.</para>
            <para>Below the <classname>Projection</classname> uniform block, we have two interface
                blocks. The first one matches the definition from the vertex shader, with two
                exceptions. It has a different interface name. But that interface name also has an
                array qualifier on it.</para>
            <para>Geometry shaders take a primitive. And a primitive is defined as some number of
                vertices in a particular order. The input interface blocks define what the input
                vertex data is, but there is more than one set of vertex data. Therefore, the
                interface blocks must be defined as arrays. Granted, in our case, it is an array of
                length 1, since point primitives have only one vertex. But this is still necessary
                even in that case.</para>
            <para>We also have another output fragment block. This one matches the definition from
                the fragment shader, as we will see a bit later. It does not have an instance name.
                Also, note that several of the values use the <literal>flat</literal> qualifier. We
                could have just used <literal>smooth</literal>, since we're passing the same values
                for all of the triangles. However, it's more descriptive to use the
                    <literal>flat</literal> qualifier for values that are not supposed to be
                interpolated. It might even save performance.</para>
            <para>Here is the geometry shader code for computing one of the vertices of the output
                triangle strip:</para>
            <example>
                <title>Geometry Shader Vertex Computation</title>
                <programlisting language="glsl">//Bottom-left
mapping = vec2(-1.0, -1.0) * g_boxCorrection;
cameraSpherePos = vec3(vert[0].cameraSpherePos);
sphereRadius = vert[0].sphereRadius;
cameraCornerPos = vec4(vert[0].cameraSpherePos, 1.0);
cameraCornerPos.xy += vec2(-vert[0].sphereRadius, -vert[0].sphereRadius) * g_boxCorrection;
gl_Position = cameraToClipMatrix * cameraCornerPos;
gl_PrimitiveID = gl_PrimitiveIDIn;
EmitVertex();</programlisting>
            </example>
            <para>This code is followed by three more of these, using different mapping and offset
                values for the different corners of the square. The
                    <varname>cameraCornerPos</varname> is a local variable that is re-used as
                temporary storage.</para>
            <para>To output a vertex, write to each of the output variables. In this case, we have
                the three from the output interface block, as well as the built-in variables
                    <varname>gl_Position</varname> and <varname>gl_PrimitiveID</varname> (which we
                will discuss more in a bit). Then, call <function>EmitVertex()</function>; this
                causes all of the values in the output variables to be transformed into a vertex
                that is sent to the output primitive type. After calling this function, the contents
                of those outputs are undefined. So if you want to use the same value for multiple
                vertices, you have to store the value in a different variable or recompute
                it.</para>
            <para>Note that clipping, face-culling, and all of that stuff happens after the geometry
                shader. This means that we must ensure that the order of our output positions will
                be correct given the current winding order.</para>
            <para><varname>gl_PrimitiveIDIn</varname> is a special input value. Much like
                    <varname>gl_VertexID</varname> from the vertex shader,
                    <varname>gl_PrimitiveIDIn</varname> represents the current primitive being
                processed by the geometry shader (once more reason for calling it a primitive
                shader). We write this to the built-in output <varname>gl_PrimitiveID</varname>, so
                that the fragment shader can use it to select which material to use.</para>
            <para>And speaking of the fragment shader, it's time to have a look at that.</para>
            <example>
                <title>Fragment Shader Changes</title>
                <programlisting language="glsl">in FragData
{
    flat vec3 cameraSpherePos;
    flat float sphereRadius;
    smooth vec2 mapping;
};

out vec4 outputColor;

layout(std140) uniform;

struct MaterialEntry
{
    vec4 diffuseColor;
    vec4 specularColor;
    vec4 specularShininess;        //ATI Array Bug fix. Not really a vec4.
};

const int NUMBER_OF_SPHERES = 4;

uniform Material
{
    MaterialEntry material[NUMBER_OF_SPHERES];
} Mtl;</programlisting>
            </example>
            <para>The input interface is just the mirror of the output from the geometry shader.
                What's more interesting is what happened to our material blocks.</para>
            <para>In our original code, we had an array of uniform blocks stored in a single uniform
                buffer in C++. We bound specific portions of this material block when we wanted to
                render with a particular material. That will not work now that we are trying to
                render multiple spheres in a single draw call.</para>
            <para>So, instead of having an array of uniform blocks, we have a uniform block that
                    <emphasis>contains</emphasis> an array. We bind all of the materials to the
                shader, and let the shader pick which one it wants as needed. The source code to do
                this is pretty straightforward.</para>
            <note>
                <para>Notice that the material <varname>specularShininess</varname> became a
                        <type>vec4</type> instead of a simple <type>float</type>. This is due to an
                    unfortunate bug in ATI's OpenGL implementation.</para>
            </note>
            <para>As for how the material selection happens, that's simple. In our case, we use the
                primitive identifier. The <varname>gl_PrimitiveID</varname> value written from the
                vertex shader is used to index into the <varname>Mtl.material[]</varname>
                array.</para>
            <para>Do note that uniform blocks have a maximum size that is hardware-dependent. If we
                wanted to have a large palette of materials, on the order of several thousand, then
                we may exceed this limit. At that point, we would need an entirely new way to handle
                this data. Once that we have not learned about yet.</para>
            <para>Or we could just split it up into multiple draw calls instead of one.</para>
        </section>
    </section>
    
    <section>
        <?dbhtml filename="Tut13 In Review.html" ?>
        <title>In Review</title>
        <para>In this tutorial, you have learned the following:</para>
        <itemizedlist>
            <listitem>
                <para>Impostors are objects who's geometric representation has little or no
                    resemblance to what the viewer sees. These typically generate an object
                    procedurally by cutting fragments out to form a shape, and then use normals to
                    do lighting computations on the cut-out.</para>
            </listitem>
            <listitem>
                <para>Fragments can be discarded from within a fragment shader. This prevents the
                    outputs from the shader from being written to the final image.</para>
            </listitem>
            <listitem>
                <para>Ray tracing can be employed by a fragment shader to determine the position and
                    normal for a point. Those values can be fed into the lighting equation to
                    produce a color value.</para>
            </listitem>
            <listitem>
                <para>Fragment shaders can change the depth value that is used for the depth test
                    and is written to the framebuffer.</para>
            </listitem>
            <listitem>
                <para>Geometry shaders are a shader stage between the vertex shader and the
                    rasterizer. They take a primitive as input and return one or more primitives as
                    output.</para>
            </listitem>
        </itemizedlist>
        <section>
            <title>Further Study</title>
            <para>Try doing these things with the given programs.</para>
            <itemizedlist>
                <listitem>
                    <para>Change the geometry impostor tutorial to take another vertex input: the
                        material to use. The vertex shader should pass it along to the geometry
                        shader, and the geometry shader should hand it to the fragment shader. You
                        can still use <varname>gl_PrimitiveID</varname> as the way to tell the
                        fragment shader. Regardless of how you send it, you will need to convert the
                        value to an integer at some point. That can be done with this
                        constructor-like syntax: <literal>int(value_to_convert)</literal>.</para>
                </listitem>
            </itemizedlist>
        </section>
        <section>
            <title>Further Research</title>
            <para>This is an introduction to the concept of impostors. Indeed, the kind of ray
                tracing that we did has often been used to render more complex shapes like cylinders
                or quadratic surfaces. But impostors are capable of much, much more.</para>
            <para>In effect, impostors allow you to use the fragment shader to just draw stuff to an
                area of the screen. They can be used to rasterize perfect circles, rather than
                drawing line-based approximations. Some have even used them to rasterize Bézier
                curves perfectly.</para>
            <para>There are other impostor-based solutions. Most particle systems (a large and
                vibrant topic that you should investigate) use flat-cards to draw pictures that move
                through space. These images can animate, changing from one image to another based on
                time, and large groups of these particle can be used to simulate various phenomena
                like smoke, fire, and the like.</para>
            <para>All of these subjects are worthy of your time.</para>
        </section>
        <section>
            <title>GLSL Features of Note</title>
            <glosslist>
                <glossentry>
                    <glossterm>discard</glossterm>
                    <glossdef>
                        <para>This fragment shader-only directive will cause the outputs of the
                            fragment to be ignored. The fragment outputs, including the implicit
                            depth, will not be written to the framebuffer.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>gl_VertexID</glossterm>
                    <glossdef>
                        <para>An input to the vertex shader of type <type>int</type>. This is the
                            index of the vertex being processed.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>gl_FragDepth</glossterm>
                    <glossdef>
                        <para>An output from the fragment shader of type <type>float</type>. This
                            value represents the depth of the fragment. If the fragment shader does
                            not use this value in any way, then the depth will be written
                            automatically, using <varname>gl_FragCoord.z</varname>. If the fragment
                            shader writes to it somewhere, then it must ensure that
                                <emphasis>all</emphasis> codepaths write to it.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>gl_PrimitiveID</glossterm>
                    <glossdef>
                        <para>A geometry shader output and the corresponding fragment shader input
                            of type <type>int</type>. If there is no geometry shader, then this
                            value will be the current count of primitives that was previously
                            rendered in this draw call. If there is a geometry shader, but it
                            does not write to this value, then the value will be undefined.</para>
                    </glossdef>
                </glossentry>
                <glossentry>
                    <glossterm>gl_PrimitiveIDin</glossterm>
                    <glossdef>
                        <para>A geometry shader input. It is the current count of primitives
                            previously processed in this draw call.</para>
                    </glossdef>
                </glossentry>
            </glosslist>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>void <function>EmitVertex</function></funcdef>
                </funcprototype>
            </funcsynopsis>
            <para>Available on in the geometry shader, when this function is called, all output
                variables previously set by the geometry shader are consumed and transformed into a
                vertex. The value of those variables becomes undefined after calling this
                function.</para>
        </section>
        
    </section>
    <section>
        <?dbhtml filename="Tut13 Glossary.html" ?>
        <title>Glossary</title>
        <glosslist>
            <glossentry>
                <glossterm>billboard, flat card</glossterm>
                <glossdef>
                    <para>Terms used to describe the actual geometry used for impostors that are
                        based on rendering camera-aligned shapes.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>impostor</glossterm>
                <glossdef>
                    <para>Any object who's geometry does not even superficially resemble the final
                        rendered product. In these cases, the mesh geometry is usually just a way to
                        designate an area of the screen to draw to, while the fragment shader does
                        the real work.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>ray tracing</glossterm>
                <glossdef>
                    <para>For the purposes of this book, ray tracing is a technique whereby a
                        mathematical object is tested against a ray (direction + position), to see
                        if the ray intersects the object. At the point of intersection, one can
                        generate a normal. With a position and normal in hand, one can use lighting
                        equations to produce an image.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>early depth test, early-z</glossterm>
                <glossdef>
                    <para>An optimization in the depth test, where the incoming fragment's depth
                        value is tested <emphasis>before</emphasis> the fragment shader executes. If
                        the fragment shader is long, this can save a great deal of time. If the
                        fragment shader exercises the option to modify or replace the fragment's
                        depth, then the early depth test optimization will not be active.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>interleaving</glossterm>
                <glossdef>
                    <para>A way of storing vertex attributes in a buffer object. This involves
                        entwining the attribute data, so that most or all of each vertex's
                        attributes are spatially adjacent in the buffer object. This is as opposed
                        to giving each vertex attribute its own array. Interleaving can lead to
                        higher rendering performance in vertex transfer limited cases.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>geometry shaders</glossterm>
                <glossdef>
                    <para>A programmable stage between the vertex shader and the
                        clipping/rasterization state. Geometry shaders take a primitive of a certain
                        type as input, and returns zero or more primitives of a possibly different
                        type as output. The vertex data taken as input does not have to match the
                        vertex data taken as output, but the geometry shader's output interface must
                        match that of the fragment shader's input interface.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>interface block</glossterm>
                <glossdef>
                    <para>A ordered grouping of uniforms, shader inputs or shader outputs. When used
                        with uniforms, these are called uniform blocks. These are useful for name
                        scoping, so that inputs and outputs can use the name that is most convenient
                        and descriptive.</para>
                </glossdef>
            </glossentry>
        </glosslist>
        
    </section>
</chapter>