gltut / Documents / Illumination / Tutorial 10.xml

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
<?xml version="1.0" encoding="UTF-8"?>
<?oxygen RNGSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng" type="xml"?>
<?oxygen SCHSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng"?>
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
    <?dbhtml filename="Tutorial 10.html" ?>
    <title>Plane Lights</title>
    <para>Directional lights are useful for representing light sources like the sun and so forth.
        But most light sources are more likely to be represented as point lights.</para>
    <para>A <glossterm>point light source</glossterm> is a light source that has a position in the
        world and shines with equal intensity in all directions. Our simple diffuse lighting
        equation is a function of these properties:</para>
    <itemizedlist>
        <listitem>
            <para>The surface normal at that point.</para>
        </listitem>
        <listitem>
            <para>The direction from the point on the surface to the light.</para>
        </listitem>
    </itemizedlist>
    <para>The direction to the light source from the point is a constant when dealing with
        directional light. It is a parameter for lighting, but it is a constant value for all points
        in the scene. The difference between directional lighting and point lights is only that this
        direction must be computed for each position in the scene.</para>
    <para>Computing this is quite simple. At the point of interest, we take the difference between
        the point on the surface and the light's position. We normalize the result to produce a unit
        vector direction to the light. Then we use the light direction as we did before. The surface
        point, light position, and surface normal must all be in the same space for this equation to
        make sense.</para>
    <section>
        <?dbhtml filename="Tut10 Vertex Point Lighting.html" ?>
        <title>Vertex Point Lighting</title>
        <para>Thus far, we have computed the lighting equation at each vertex and interpolated the
            results across the surface of the triangle. We will continue to do so for point lights.
            For the moment, at least.</para>
        <para>We implement point lights per-vertex in the <phrase role="propername">Vertex Point
                Lighting</phrase> tutorial. This tutorial has a moving point light that circles
            around the cylinder.</para>
        <figure>
            <title>Vertex Point Lighting</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Vertex%20Point%20Lighting.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>To toggle an indicator of the light's position, press the <keycap>Y</keycap> key. The
                <keycap>B</keycap> key will toggle rotation of the light. The <keycap>I</keycap> and
                <keycap>K</keycap> keys move the light up and down respectively, while the
                <keycap>J</keycap> and <keycap>L</keycap> keys will decrease and increase the
            light's radius. Holding shift with these keys will move in smaller increments.</para>
        <para>Most of the code is nothing we have not seen elsewhere. The main changes are at the top
            of the rendering function.</para>
        <example>
            <title>Per-Vertex Point Light Rendering</title>
            <programlisting language="cpp">glutil::MatrixStack modelMatrix;
modelMatrix.SetMatrix(g_viewPole.CalcMatrix());

const glm::vec4 &amp;worldLightPos = CalcLightPosition();

glm::vec4 lightPosCameraSpace = modelMatrix.Top() * worldLightPos;

glUseProgram(g_WhiteDiffuseColor.theProgram);
glUniform3fv(g_WhiteDiffuseColor.lightPosUnif, 1, glm::value_ptr(lightPosCameraSpace));
glUseProgram(g_VertexDiffuseColor.theProgram);
glUniform3fv(g_VertexDiffuseColor.lightPosUnif, 1, glm::value_ptr(lightPosCameraSpace));</programlisting>
        </example>
        <para>The light is computed initially in world space, then transformed into camera space.
            The camera-space light position is given to both of the shaders. Rendering proceeds
            normally from there.</para>
        <para>Our vertex shader, <filename>PosVertexLighting_PCN.vert</filename> has had a few
            changes:</para>
        <example>
            <title>Per-Vertex Point Light Vertex Shader</title>
            <programlisting language="glsl">#version 330

layout(location = 0) in vec3 position;
layout(location = 1) in vec4 diffuseColor;
layout(location = 2) in vec3 normal;

smooth out vec4 interpColor;

uniform vec3 lightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;

uniform mat4 modelToCameraMatrix;
uniform mat3 normalModelToCameraMatrix;

uniform Projection
{
    mat4 cameraToClipMatrix;
};

void main()
{
    vec4 cameraPosition = (modelToCameraMatrix * vec4(position, 1.0));
    gl_Position = cameraToClipMatrix * cameraPosition;
    
    vec3 normCamSpace = normalize(normalModelToCameraMatrix * normal);
    
    vec3 dirToLight = normalize(lightPos - vec3(cameraPosition));
    
    float cosAngIncidence = dot(normCamSpace, dirToLight);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    interpColor = (diffuseColor * lightIntensity * cosAngIncidence) +
        (diffuseColor * ambientIntensity);
}</programlisting>
        </example>
        <para>The vertex shader takes a camera-space light position instead of a camera-space light
            direction. It also stores the camera-space vertex position in a temporary in the first
            line of <function>main</function>. This is used to compute the direction to the light.
            From there, the computation proceeds normally.</para>
        <para>Note the order of operations in computing <varname>dirToLight.</varname> The
                <varname>lightPos</varname> is on the left and the <varname>cameraPosition</varname>
            is on the right. Geometrically, this is correct. If you have two points, and you want to
            find the direction from point A to point B, you compute B - A. The
                <function>normalize</function> call is just to convert it into a unit vector.</para>
    </section>
    <section>
        <?dbhtml filename="Tut10 Interpolation.html" ?>
        <title>Interpolation</title>
        <para>As you can see, doing point lighting is quite simple. Unfortunately, the visual
            results are not.</para>
        <para>For example, use the controls to display the position of the point light source, then
            position it near the ground plane. See anything wrong?</para>
        <para>If everything were working correctly, one would expect to see a bright area directly
            under the light. After all, geometrically, this situation looks like this:</para>
        <figure>
            <title>Light Near Surface</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="LightNearSurface.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>The surface normal for the areas directly under the light are almost the same as the
            direction towards the light. This means that the angle of incidence is small, so the
            cosine of this angle is close to 1. That should translate to having a bright area under
            the light, but darker areas farther away. What we see is nothing of the sort. Why is
            that?</para>
        <para>Well, consider what we are doing. We are computing the lighting at every triangle's
                <emphasis>vertex</emphasis>, and then interpolating the results across the surface
            of the triangle. The ground plane is made up of precisely four vertices: the four
            corners. And those are all very far from the light position and have a very large angle
            of incidence. Since none of them have a small angle of incidence, none of the colors
            that are interpolated across the surface are bright.</para>
        <para>You can see this is evident by putting the light position next to the cylinder. If the
            light is at the top or bottom of the cylinder, then the area near the light will be
            bright. But if you move the light to the middle of the cylinder, far the top or bottom
            vertices, then the illumination will be much dimmer.</para>
        <para>This is not the only problem with doing per-vertex lighting. For example, run the
            tutorial again and do not move the light. Just watch how the light behaves on the
            cylinder's surface as it animates around. Unlike with directional lighting, you can very
            easily see the triangles on the cylinder's surface. Though the per-vertex computations
            are not helping matters, the main problem here has to do with interpolating the
            values.</para>
        <para>If you move the light source farther away, you can see that the triangles smooth out
            and become indistinct from one another. But this is simply because, if the light source
            is far enough away, the results are indistinguishable from a directional light. Each
            vertex's direction to the light is almost the same as each other vertex's direction to
            the light.</para>
        <para>Per-vertex lighting was reasonable when dealing with directional lights. But it simply
            is not a good idea for point lighting. The question arises: why was per-vertex lighting
            good with directional lights to begin with?</para>
        <para>Remember that our diffuse lighting equation has two parameters: the direction to the
            light and the surface normal. In directional lighting, the direction to the light is
            always the same. Therefore, the only value that changes over a triangle's surface is the
            surface normal.</para>
        <para>Linear interpolation of vectors looks like this:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="StandardLerp_1.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>The α in the equation is the factor of interpolation between the two values. When α is
            one, we get V<subscript>a</subscript>, and when it is zero, we get
                V<subscript>b</subscript>. The two values, V<subscript>a</subscript> and
                V<subscript>b</subscript> can be scalars or vectors.</para>
        <para>Our diffuse lighting equation is this:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="DiffuseLightEq_2.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>If the surface normal N is being interpolated, then at any particular point on the
            surface, we get this equation for a directional light (the light direction L does not
            change):</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LerpDiffuse_3.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>The dot product is distributive, like scalar multiplication. So we can distribute the
            L to both sides of the dot product term:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LerpDiffuse_4.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>We can extract the linear terms from the dot product. Remember that the dot product is
            the cosine of the angle between two vectors, times the length of those vectors. The two
            scaling terms directly modify the length of the vectors. So they can be pulled out to
            give us:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LerpDiffuse_5.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>Recall that vector/scalar multiplication is distributive. We can distribute the
            multiplication by the diffuse color and light intensity to both terms. This gives
            us:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LerpDiffuse_6.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>This means that if L is constant, linearly interpolating N is exactly equivalent to
            linearly interpolating the results of the lighting equation. And the addition of the
            ambient term does not change this, since it is a constant and would not be affected by
            linear interpolation.</para>
        <para>When doing point lighting, you would have to interpolate both N and L. And that does
            not yield the same results as linearly interpolating the two colors you get from the
            lighting equation. This is a big part of the reason why the cylinder does not look
            correct.</para>
        <para>The more physically correct method of lighting is to perform lighting at every rendered
            pixel. To do that, we would have to interpolate the lighting parameters across the
            triangle, and perform the lighting computation in the fragment shader.</para>
    </section>
    <section>
        <?dbhtml filename="Tut10 Fragment Lighting.html" ?>
        <title>Fragment Lighting</title>
        <para>So, in order to deal with interpolation artifacts, we need to interpolate the actual
            light direction and normal, instead of just the results of the lighting equation. This
            is called per-fragment lighting or just <glossterm>fragment lighting.</glossterm></para>
        <para>This is pretty simple, conceptually. We simply need to do the lighting computations in
            the fragment shader. So the fragment shader needs the position of the fragment, the
            light's position (or the direction to the light from that position), and the surface
            normal of the fragment at that position. And all of these values need to be in the same
            coordinate space.</para>
        <para>There is a problem that needs to be dealt with first. Normals do not interpolate well.
            Or rather, wildly different normals do not interpolate well. And light directions can be
            very different if the light source is close to the triangle relative to that triangle's
            size.</para>
        <para>Consider the large plane we have. The direction toward the light will be very
            different at each vertex, so long as our light remains in relatively close proximity to
            the plane.</para>
        <para>Part of the problem is with interpolating values along the diagonal of our triangle.
            Interpolation within a triangle works like this. For any position within the area of the
            triangle, that position can be expressed as a weighted sum of the positions of the three
            vertices.</para>
        <figure>
            <title>Triangle Interpolation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="BarycentricTriangle.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <informalequation>
            <mathphrase>P = αA + βB + γC, where α + β + γ = 1.0</mathphrase>
        </informalequation>
        <para>The α, β, and γ values are not the distances from their respective points to the point
            of interest. In the above case, the point P is in the exact center of the triangle.
            Thus, the three values are each ⅓.</para>
        <para>If the point of interest is along an edge of the triangle, then the contribution of
            the vertex not sharing that edge is zero.</para>
        <figure>
            <title>Triangle Edge Interpolation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="BarycentricTriangleEdge.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>Here, point P is exactly halfway between points C and B. Therefore,  β, and γ are both
            0.5, but α is 0.0. If point P is anywhere along the edge of a triangle, it gets none of
            its final interpolated value from the third vertex. So along a triangle's edge, it acts
            like the kind of linear interpolation we have seen before.</para>
        <para>This is how OpenGL interpolates the vertex shader outputs. It takes the α, β, and γ
            coordinates for the fragment's position and combines them with the vertex output value
            for the three vertices in the same way it does for the fragment's position. There is
            slightly more to it than that, but we will discuss that later.</para>
        <para>The ground plane in our example is made of two large triangles. They look like
            this:</para>
        <figure>
            <title>Two Triangle Quadrilateral</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="TwoTriangleQuad.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>What happens if we put the color black on the top-right and bottom-left points, and
            put the color green on the top-left and bottom-right points? If you interpolate these
            across the surface, you would get this:</para>
        <figure>
            <title>Two Triangle Interpolation</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="TwoTriangleInterpolation.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>The color is pure green along the diagonal. That is because along a triangle's edge,
            the value interpolated will only be the color of the two vertices along that edge. The
            value is interpolated based only on each triangle individually, not on extra data from
            another neighboring triangle.</para>
        <para>In our case, this means that for points along the main diagonal, the light direction
            will only be composed of the direction values from the two vertices on that diagonal.
            This is not good. This would not be much of a problem if the light direction did not
            change much along the surface, but with large triangles (relative to how close the light
            is to them), that is simply not the case.</para>
        <para>Since we cannot interpolate the light direction very well, we need to interpolate
            something else. Something that does exhibit the characteristics we need when
            interpolated.</para>
        <para>Positions interpolate quite well. Interpolating the top-left position and bottom-right
            positions gets an accurate position value along the diagonal. So instead of
            interpolating the light direction, we interpolate the components of the light direction.
            Namely, the two positions. The light position is a constant, so we only need to
            interpolate the vertex position.</para>
        <para>Now, we could do this in any space. But for illustrative purposes, we will be doing
            this in model space. That is, both the light position and vertex position will be in
            model space.</para>
        <para>One of the advantages of doing things in model space is that it gets rid of that pesky
            matrix inverse/transpose we had to do to transform normals correctly. Indeed, normals
            are not transformed at all. One of the disadvantages is that it requires computing an
            inverse matrix for our light position, so that we can go from world space to model
            space.</para>
        <para>The <phrase role="propername">Fragment Point Lighting</phrase> tutorial shows off how
            fragment lighting works.</para>
        <figure>
            <title>Fragment Point Lighting</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Fragment%20Point%20Lighting.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Much better.</para>
        <para>This tutorial is controlled as before, with a few exceptions. Pressing the
                <keycap>t</keycap> key will toggle a scale factor onto to be applied to the
            cylinder, and pressing the <keycap>h</keycap> key will toggle between per-fragment
            lighting and per-vertex lighting.</para>
        <para>The rendering code has changed somewhat, considering the use of model space for
            lighting instead of camera space. The start of the rendering looks as follows:</para>
        <example>
            <title>Initial Per-Fragment Rendering</title>
            <programlisting language="cpp">glutil::MatrixStack modelMatrix;
modelMatrix.SetMatrix(g_viewPole.CalcMatrix());

const glm::vec4 &amp;worldLightPos = CalcLightPosition();

glm::vec4 lightPosCameraSpace = modelMatrix.Top() * worldLightPos;</programlisting>
        </example>
        <para>The new code is the last line, where we transform the world-space light into camera
            space. This is done to make the math much easier. Since our matrix stack is building up
            the transform from model to camera space, the inverse of this matrix would be a
            transform from camera space to model space. So we need to put our light position into
            camera space before we transform it by the inverse.</para>
        <para>After doing that, it uses a variable to switch between per-vertex and per-fragment
            lighting. This just selects which shaders to use; both sets of shaders take the same
            uniform values, even though they use them in different program stages.</para>
        <para>The ground plane is rendered with this code:</para>
        <example>
            <title>Ground Plane Per-Fragment Rendering</title>
            <programlisting language="cpp">glutil::PushStack push(modelMatrix);

glUseProgram(pWhiteProgram->theProgram);
glUniformMatrix4fv(pWhiteProgram->modelToCameraMatrixUnif, 1, GL_FALSE,
glm::value_ptr(modelMatrix.Top()));

glm::mat4 invTransform = glm::inverse(modelMatrix.Top());
glm::vec4 lightPosModelSpace = invTransform * lightPosCameraSpace;
glUniform3fv(pWhiteProgram->modelSpaceLightPosUnif, 1, glm::value_ptr(lightPosModelSpace));

g_pPlaneMesh->Render();
glUseProgram(0);</programlisting>
        </example>
        <para>We compute the inverse matrix using <function>glm::inverse</function> and store it.
            Then we use that to compute the model space light position and pass that to the shader.
            Then the plane is rendered.</para>
        <para>The cylinder is rendered using similar code. It simply does a few transformations to
            the model matrix before computing the inverse and rendering.</para>
        <para>The shaders are where the real action is. As with previous lighting tutorials, there
            are two sets of shaders: one that take a per-vertex color, and one that uses a constant
            white color. The vertex shaders that do per-vertex lighting computations should be
            familiar:</para>
        <example>
            <title>Model Space Per-Vertex Lighting Vertex Shader</title>
            <programlisting language="glsl">#version 330

layout(location = 0) in vec3 position;
layout(location = 1) in vec4 inDiffuseColor;
layout(location = 2) in vec3 normal;

out vec4 interpColor;

uniform vec3 modelSpaceLightPos;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;

uniform mat4 modelToCameraMatrix;
uniform mat3 normalModelToCameraMatrix;

uniform Projection
{
    mat4 cameraToClipMatrix;
};

void main()
{
    gl_Position = cameraToClipMatrix * (modelToCameraMatrix * vec4(position, 1.0));
    
    vec3 dirToLight = normalize(modelSpaceLightPos - position);
    
    float cosAngIncidence = dot( normal, dirToLight);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    interpColor = (lightIntensity * cosAngIncidence * inDiffuseColor) +
        (ambientIntensity * inDiffuseColor);
}</programlisting>
        </example>
        <para>The main differences between this version and the previous version are simply what one
            would expect from the change from camera-space lighting to model space lighting. The
            per-vertex inputs are used directly, rather than being transformed into camera space.
            There is a second version that omits the <varname>inDiffuseColor</varname> input.</para>
        <para>With per-vertex lighting, we have two vertex shaders:
                <filename>ModelPosVertexLighting_PCN.vert</filename> and
                <filename>ModelPosVertexLighting_PN.vert</filename>. With per-fragment lighting, we
            also have two shaders: <filename>FragmentLighting_PCN.vert</filename> and
                <filename>FragmentLighting_PN.vert</filename>. They are disappointingly
            simple:</para>
        <example>
            <title>Model Space Per-Fragment Lighting Vertex Shader</title>
            <programlisting language="glsl">#version 330

layout(location = 0) in vec3 position;
layout(location = 1) in vec4 inDiffuseColor;
layout(location = 2) in vec3 normal;

out vec4 diffuseColor;
out vec3 vertexNormal;
out vec3 modelSpacePosition;

uniform mat4 modelToCameraMatrix;

uniform Projection
{
    mat4 cameraToClipMatrix;
};

void main()
{
    gl_Position = cameraToClipMatrix * (modelToCameraMatrix * vec4(position, 1.0));
    
    modelSpacePosition = position;
    vertexNormal = normal;
    diffuseColor = inDiffuseColor;
}</programlisting>
        </example>
        <para>Since our lighting is done in the fragment shader, there is not much to do except pass
            variables through and set the output clip-space position. The version that takes no
            diffuse color just passes a <type>vec4</type> containing just 1.0.</para>
        <para>The fragment shader is much more interesting:</para>
        <example>
            <title>Per-Fragment Lighting Fragment Shader</title>
            <programlisting language="glsl">#version 330

in vec4 diffuseColor;
in vec3 vertexNormal;
in vec3 modelSpacePosition;

out vec4 outputColor;

uniform vec3 modelSpaceLightPos;

uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;

void main()
{
    vec3 lightDir = normalize(modelSpaceLightPos - modelSpacePosition);
    
    float cosAngIncidence = dot(normalize(vertexNormal), lightDir);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    outputColor = (diffuseColor * lightIntensity * cosAngIncidence) +
        (diffuseColor * ambientIntensity);
}</programlisting>
        </example>
        <para>The math is essentially identical between the per-vertex and per-fragment case. The
            main difference is the normalization of <varname>vertexNormal</varname>. This is
            necessary because interpolating between two unit vectors does not mean you will get a
            unit vector after interpolation. Indeed, interpolating the 3 components guarantees that
            you will not get a unit vector.</para>
        <section>
            <title>Gradient Matters</title>
            <para>While this may look perfect, there is still one problem. Use the <keycombo>
                    <keycap>Shift</keycap>
                    <keycap>J</keycap>
                </keycombo> key to move the light really close to the cylinder, but without putting
                the light inside the cylinder. You should see something like this:</para>
            <figure>
                <title>Close Lit Cylinder</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Cylinder%20Close%20Light.png"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>Notice the vertical bands on the cylinder. This are reminiscent of the same
                interpolation problem we had before. Was not doing lighting at the fragment level
                supposed to fix this?</para>
            <para>It is similar to the original problem, but technically different. Per-vertex
                lighting caused lines because of color interpolation artifacts. This is caused by an
                optical illusion created by adjacent linear gradients.</para>
            <para>The normal is being interpolated linearly across the surface. This also means that
                the lighting is changing somewhat linearly across the surface. While the lighting
                isn't a linear change, it can be approximated as one over a small area of the
                surface.</para>
            <para>The edge between two triangles changes how the light interacts. On one side, the
                nearly-linear gradient has one slope, and on the other side, it has a different one.
                That is, the rate at which the gradients change abruptly changes.</para>
            <para>Here is a simple demonstration of this:</para>
            <figure>
                <title>Adjacent Gradient</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="GradientIssue.svg"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>These are two adjacent linear gradients, from the bottom left corner to the top
                right. The color value increases in intensity as it goes from the bottom left to the
                top right. They meet along the diagonal in the middle. Both gradients have the same
                color value in the middle, yet it appears that there is a line down the center that
                is brighter than the colors on both sides. But it is not; the color on the right
                side of the diagonal is actually brighter than the diagonal itself.</para>
            <para>That is the optical illusion. Here is a diagram that shows the color intensity as
                it moves across the above gradient:</para>
            <figure>
                <title>Gradient Intensity Plot</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="GradientDiagram.svg"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The color curve is continuous; there are no breaks or sudden jumps. But it is not
                a smooth curve; there is a sharp edge.</para>
            <para>It turns out that human vision really wants to find sharp edges in smooth
                gradients. Anytime we see a sharp edge, our brains try to turn that into some kind
                of shape. And if there is a shape to the gradient intersection, such as a line, we
                tend to see that intersection <quote>pop</quote> out at us.</para>
            <para>The solution to this problem is not yet available to us. One of the reasons we can
                see this so clearly is that the surface has a very regular diffuse reflectance (ie:
                color). If the surface color was irregular, if it changed at most every fragment,
                then the effect would be virtually impossible to notice.</para>
            <para>But the real source of the problem is that the normal is being linearly
                interpolated. While this is certainly much better than interpolating the per-vertex
                lighting output, it does not produce a normal that matches with the normal of a
                perfect cylinder. The correct solution, which we will get to eventually, is to
                provide a way to encode the normal for a surface at many points, rather than simply
                interpolating vertex normals.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut10 Distant Points of Light.html" ?>
        <title>Distant Points of Light</title>
        <para>There is another issue with our current example. Use the <keycap>i</keycap> key to
            raise the light up really high. Notice how bright all of the upwardly-facing surfaces
            get:</para>
        <figure>
            <title>High Light</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="High%20Light.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>You probably have no experience with this in real life. Holding a light farther from
            the surface in reality does not make the light brighter. So obviously something is
            happening in reality that our simple lighting model is not accounting for.</para>
        <para>In reality, lights emit a certain quantity of light per unit time. For a point-like
            light such as a light bulb, it emits this light radially, in all directions. The farther
            from the light source one gets, the more area that this must ultimately cover.</para>
        <para>Light is essentially a wave. The farther away from the source of the wave, the less
            intense the wave is. For light, this is called <glossterm>light
            attenuation.</glossterm></para>
        <para>Our model does not include light attenuation, so let's fix that.</para>
        <para>Attenuation is a well-understood physical phenomenon. In the absence of other factors
            (atmospheric light scattering, etc), the light intensity varies with the inverse of the
            square of the distance. An object 2 units away from the light feels the light with
            one-fourth the intensity. So our equation for light attenuation is as follows:</para>
        <equation>
            <title>Physical Light Attenuation</title>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LightAttenEq.svg"/>
                </imageobject>
            </mediaobject>
        </equation>
        <para>There is a constant in the equation, which is used for unit correction. Of course, we
            can (and will) use it as a fudge factor to make things look right.</para>
        <para>The constant can take on a physical meaning. The constant can mean the distance at
            which half of the light intensity is lost. To compute such a constant, for a half-light
            distance of r<subscript>λ</subscript>, use this equation:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="HalfLightAtten.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>This equation computes physically realistic light attenuation for point-lights. But it
            often does not look very good. The equation tends to create a sharper intensity falloff than
            one would expect.</para>
        <para>There is a reason for this, but it is not one we are ready to get into quite yet. What
            is often done is to simply use the inverse rather than the inverse-square of the
            distance:</para>
        <equation>
            <title>Light Attenuation Inverse</title>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="LightAttenInvEq.svg"/>
                </imageobject>
            </mediaobject>
        </equation>
        <para>It looks brighter at greater distances than the physically correct model. This is fine
            for simple examples, but as we get more advanced, it will not be acceptable. This
            solution is really just a stop-gap; the real solution is one that we will discuss in a
            few tutorials.</para>
        <section>
            <title>Reverse of the Transform</title>
            <para>However, there is a problem. We previously did per-fragment lighting in model
                space. And while this is a perfectly useful space to do lighting in, model space is
                not world space.</para>
            <para>We want to specify the attenuation constant factor in terms of world space
                distances. But we are not dealing in world space; we are in model space. And model
                space distances are, naturally, in model space, which may well be scaled relative to
                world space. Here, any kind of scale in the model-to-world transform is a problem,
                not just non-uniform scales. Although if there was a uniform scale, we could apply
                theoretically apply the scale to the attenuation constant.</para>
            <para>So now we cannot use model space. Fortunately, camera space is a space that has
                the same scale as world space, just with a rotation/translation applied to it. So we
                can do our lighting in that space.</para>
            <para>Doing it in camera space requires computing a camera space position and passing it
                to the fragment shader to be interpolated. And while we could do this, that's not
                clever enough. Is not there some way to get around that?</para>
            <para>Yes, there is. Recall <varname>gl_FragCoord</varname>, an intrinsic value given to
                every fragment shader. It represents the location of the fragment in window space.
                So instead of transforming from model space to camera space, we will transform from
                window space to camera space.</para>
            <note>
                <para>The use of this reverse-transformation technique here should not be taken as a
                    suggestion to use it in all, or even most cases like this. In all likelihood, it
                    will be much slower than just passing the camera space position to the fragment
                    shader. It is here primarily for demonstration purposes, and because it will be
                    useful in the future.</para>
            </note>
            <para>The sequence of transformations that take a position from camera space to window
                space is as follows:</para>
            <table frame="all">
                <title>Transform Legend</title>
                <tgroup cols="2">
                    <colspec colname="c1" colnum="1" colwidth="1.0*"/>
                    <colspec colname="c2" colnum="2" colwidth="1.0*"/>
                    <thead>
                        <row>
                            <entry>
                                <para>Field Name</para>
                            </entry>
                            <entry>
                                <para>Meaning</para>
                            </entry>
                        </row>
                    </thead>
                    <tbody>
                        <row>
                            <entry>
                                <para>M</para>
                            </entry>
                            <entry>The camera-to-clip transformation matrix.</entry>
                        </row>
                        <row>
                            <entry>
                                <para>P<subscript>camera</subscript></para>
                            </entry>
                            <entry>The camera-space vertex position.</entry>
                        </row>
                        <row>
                            <entry>
                                <para>C</para>
                            </entry>
                            <entry>
                                <para>The clip-space vertex position.</para>
                            </entry>
                        </row>
                        <row>
                            <entry>
                                <para>N</para>
                            </entry>
                            <entry>
                                <para>The normalized device coordinate position.</para>
                            </entry>
                        </row>
                        <row>
                            <entry>
                                <para>V<subscript>xy</subscript></para>
                            </entry>
                            <entry>
                                <para>The X and Y values passed to
                                    <function>glViewport</function>.</para>
                            </entry>
                        </row>
                        <row>
                            <entry>
                                <para>V<subscript>wh</subscript></para>
                            </entry>
                            <entry>
                                <para>The width and height passed to
                                    <function>glViewport</function>.</para>
                            </entry>
                        </row>
                        <row>
                            <entry>
                                <para>D<subscript>nf</subscript></para>
                            </entry>
                            <entry>
                                <para>The depth near and far values passed to
                                        <function>glDepthRange</function>.</para>
                            </entry>
                        </row>
                    </tbody>
                </tgroup>
            </table>
            <equation>
                <title>Camera to Window Transforms</title>
                <mediaobject>
                    <imageobject>
                        <imagedata  fileref="CameraToWindowTrans.svg"/>
                    </imageobject>
                </mediaobject>
            </equation>
            <para>Therefore, given <varname>gl_FragCoord</varname>, we will need to perform the
                reverse of these:</para>
            <equation>
                <title>Window to Camera Transforms</title>
                <mediaobject>
                    <imageobject>
                        <imagedata  fileref="WindowToCameraTrans.svg"/>
                    </imageobject>
                </mediaobject>
            </equation>
            <para>In order for our fragment shader to perform this transformation, it must be given
                the following values:</para>
            <itemizedlist>
                <listitem>
                    <para>The inverse projection matrix.</para>
                </listitem>
                <listitem>
                    <para>The viewport width/height.</para>
                </listitem>
                <listitem>
                    <para>The depth range.</para>
                </listitem>
            </itemizedlist>
        </section>
        <section>
            <title>Applied Attenuation</title>
            <para>The <phrase role="propername">Fragment Attenuation</phrase> tutorial performs
                per-fragment attenuation, both with linear and quadratic attenuation.</para>
            <figure>
                <title>Fragment Attenuation</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Fragment%20Attenuation.png"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>This tutorial controls as before, with the following exceptions. The
                    <keycap>O</keycap> and <keycap>U</keycap> keys increase and decrease the
                attenuation constant. However, remember that decreasing the constant makes the
                attenuation less, which makes the light appear <emphasis>brighter</emphasis> at a
                particular distance. Using the shift key in combination with them will
                increase/decrease the attenuation by smaller increments. The <keycap>H</keycap> key
                swaps between the linear and quadratic interpolation functions.</para>
            <para>The drawing code is mostly the same as we saw in the per-vertex point light
                tutorial, since both this and that one perform lighting in camera space. The vertex
                shader is also nothing new; passes the vertex normal and color to the fragment
                shader. The vertex normal is multiplied by the normal matrix, which allows us to use
                non-uniform scaling.</para>
        </section>
        <section>
            <title>New Uniform Types</title>
            <para>The more interesting part is the fragment shader. The definitions are not much
                changed from the last one, but there have been some additions:</para>
            <example>
                <title>Light Attenuation Fragment Shader Definitions</title>
                <programlisting language="glsl">uniform float lightAttenuation;
uniform bool bUseRSquare;

uniform UnProjection
{
    mat4 clipToCameraMatrix;
    ivec2 windowSize;
};</programlisting>
            </example>
            <para>The <varname>lightAttenuation</varname> uniform is just a float, but
                    <varname>bUseRSquare</varname> uses a new type: boolean.</para>
            <para>GLSL has the <type>bool</type> type just like C++ does. The
                    <literal>true</literal> and <literal>false</literal> values work just like C++'s
                equivalents. Where they differ is that GLSL also has vectors of bools, called
                    <type>bvec#</type>, where the # can be 2, 3, or 4. We do not use that here, but
                it is important to note.</para>
            <para>OpenGL's API, however, is still a C API. And C (at least, pre-C99) has no
                    <type>bool</type> type. Uploading a boolean value to a shader looks like
                this:</para>
            <programlisting language="cpp">glUniform1i(g_FragWhiteDiffuseColor.bUseRSquareUnif, g_bUseRSquare ? 1 : 0);</programlisting>
            <para>The integer form of uniform uploading is used, but the floating-point form could
                be allowed as well. The number 0 represents false, and any other number is
                true.</para>
            <para>The <classname>UnProjection</classname> uniform block contains data that only
                changes when the window changes. This uniform block is updated along with the vertex
                shader's <classname>Projection</classname> block. This data is used to perform the
                previously-discussed reverse-transformation operation, so that we can turn
                    <varname>gl_FragCoord</varname> into a camera-space position.</para>
            <para>Notice that the <varname>windowSize</varname> uses a new type: <type>ivec2</type>.
                This is a 2-dimensional vector of integers.</para>
        </section>
        <section>
            <title>Functions in GLSL</title>
            <para>For the first time, we have a shader complex enough that splitting it into
                different functions makes sense. So we do that. The first function is one that
                computes the camera-space position:</para>
            <example>
                <title>Window to Camera Space Function</title>
                <programlisting language="glsl">vec3 CalcCameraSpacePosition()
{
    vec4 ndcPos;
    ndcPos.xy = ((gl_FragCoord.xy / windowSize.xy) * 2.0) - 1.0;
    ndcPos.z = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) /
        (gl_DepthRange.far - gl_DepthRange.near);
    ndcPos.w = 1.0;
    
    vec4 clipPos = ndcPos / gl_FragCoord.w;
    
    return vec3(clipToCameraMatrix * clipPos);
}</programlisting>
            </example>
            <para>Not unsurprisingly, GLSL functions are defined much like C and C++
                functions.</para>
            <para>The first three lines compute the position in normalized device coordinates.
                Notice that the computation of the X and Y coordinates is simplified from the
                original function. This is because our viewport always sets the lower-left position
                of the viewport to (0, 0). This is what you get when you plug zeros into that
                equation.</para>
            <para>The <varname>gl_DepthRange</varname> variable is a special uniform defined by GLSL
                for fragment shaders. As the name suggests, it properly mirrors the values passed to
                    <function>glDepthRange</function>; this way, we do not have to put it in our
                uniform block.</para>
            <para>After the transformation to NDC space, we compute the clip-space position as
                previously shown. Then the result is multiplied through the clip-to-camera matrix,
                and that vector is returned to the caller.</para>
            <para>This is a simple function that uses only uniforms to compute a value. It takes no
                arguments. The second function is not quite as simple.</para>
            <example>
                <title>Light Intensity Application Function</title>
                <programlisting language="glsl">vec4 ApplyLightIntensity(in vec3 cameraSpacePosition, out vec3 lightDirection)
{
    vec3 lightDifference =  cameraSpaceLightPos - cameraSpacePosition;
    float lightDistanceSqr = dot(lightDifference, lightDifference);
    lightDirection = lightDifference * inversesqrt(lightDistanceSqr);
    
    float distFactor = bUseRSquare ? lightDistanceSqr : sqrt(lightDistanceSqr);
    
    return lightIntensity * (1 / ( 1.0 + lightAttenuation * distFactor));
}</programlisting>
            </example>
            <para>The function header looks rather different from the standard C/C++ function
                definition syntax. Parameters to GLSL functions are designated as being inputs,
                outputs, or inputs and outputs.</para>
            <para>Parameters designated with <literal>in</literal> are input parameters. Functions
                can change these values, but they will have no effect on the variable or expression
                used in the function call. This is much like the default in C/C++, where parameter
                changes are local. Naturally, this is the default with GLSL parameters if you do not
                specify a qualifier.</para>
            <para>Parameters designated with <literal>out</literal> can be written to, and its value
                will be returned to the calling function. These are similar to non-const reference
                parameter types in C++. And just as with reference parameters, the caller of a
                function must call it with a real variable (called an <quote>l-value</quote>). And
                this variable must be a variable that can be <emphasis>changed</emphasis>, so you
                cannot pass a uniform or shader stage input value as this parameter.</para>
            <para>However, the initial value of parameters declared as outputs is
                    <emphasis>not</emphasis> initialized from the calling function. This means that
                the initial value is uninitialized and therefore undefined (ie: it could be
                anything). Because of this, you can pass shader stage outputs as
                    <literal>out</literal> parameters. Recall that shader stage output variables can
                be written to, but <emphasis>never</emphasis> read from.</para>
            <para>Parameters designated as <literal>inout</literal> will have its value initialized
                by the caller and have the final value returned to the caller. These are exactly
                like non-const reference parameters in C++. The main difference is that the value is
                initialized with the one that the user passed in, which forbids the passing of
                shader stage outputs as <literal>inout</literal> parameters.</para>
            <para>This particular function is semi-complex, as an optimization. Previously, our
                functions simply normalized the difference between the vertex position and the light
                position. In computing the attenuation, we need the distance between the two. And
                the process of normalization computes the distance. So instead of calling the GLSL
                function to normalize the direction, we do it ourselves, so that the distance is not
                computed twice (once in the GLSL function and once for us).</para>
            <para>The second line performs a dot product with the same vector. Remember that the dot
                product between two vectors is the cosine of the angle between them, multiplied by
                each of the lengths of the vectors. Well, the angle between a vector and itself is
                zero, and the cosine of zero is always one. So what you get is just the length of
                the two vectors times one another. And since the vectors are the same, the lengths
                are the same. Thus, the dot product of a vector with itself is the square of its
                length.</para>
            <para>To normalize a vector, we must divide the vector by it's length. And the length of
                    <varname>lightDifference</varname> is the square root of
                    <varname>lightDistanceSqr</varname>. The <function>inversesqrt</function>
                computes 1 / the square root of the given value, so all we need to do is multiply
                this with the <varname>lightDifference</varname> to get the light direction as a
                normalized vector. This value is written to our output variable.</para>
            <para>The next line computes our lighting term. Notice the use of the ?: operator. This
                works just like in C/C++. If we are using the square of the distance, that's what we
                store. Otherwise we get the square-root and store that.</para>
            <note>
                <para>The assumption in using ?: here is that only one or the other of the two
                    expressions will be evaluated. That's why the expensive call to
                        <function>sqrt</function> is done here. However, this may not be the case.
                    It is entirely possible (and quite likely) that the shader will always evaluate
                        <emphasis>both</emphasis> expressions and simply store one value or the
                    other as needed. So do not rely on such conditional logic to save
                    performance.</para>
            </note>
            <para>After that, things proceed as expected.</para>
            <para>Making these separate functions makes the main function look almost identical to
                prior versions:</para>
            <example>
                <title>Main Light Attenuation</title>
                <programlisting language="glsl">void main()
{
    vec3 cameraSpacePosition = CalcCameraSpacePosition();
    
    vec3 lightDir = vec3(0.0);
    vec4 attenIntensity = ApplyLightIntensity(cameraSpacePosition, lightDir);
    
    float cosAngIncidence = dot(normalize(vertexNormal), lightDir);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    outputColor = (diffuseColor * attenIntensity * cosAngIncidence) +
        (diffuseColor * ambientIntensity);
}</programlisting>
            </example>
            <para>Function calls appear very similar to C/C++, with the exceptions about parameters
                noted before. The camera-space position is determined. Then the light intensity,
                modified by attenuation, is computed. From there, things proceed as before.</para>
            <sidebar>
                <title>Alternative Attenuation</title>
                <para>As nice as these somewhat-realistic attenuation schemes are, it is often
                    useful to model light attenuation in a very different way. This is in no way
                    physically accurate, but it can look reasonably good.</para>
                <para>We simply do linear interpolation based on the distance. When the distance is
                    0, the light has full intensity. When the distance is beyond a given distance,
                    the maximum light range (which varies per-light), the intensity is 0.</para>
                <para>Note that <quote>reasonably good</quote> depends on your needs. The closer you
                    get in other ways to providing physically accurate lighting, the closer you get
                    to photorealism, the less you can rely on less accurate phenomena. It does no
                    good to implement a complicated sub-surface scattering lighting model that
                    includes Fresnel factors and so forth, while simultaneously using a simple
                    interpolation lighting attenuation model.</para>
            </sidebar>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut10 In Review.html" ?>
        <title>In Review</title>
        <para>In this tutorial, you have learned the following:</para>
        <itemizedlist>
            <listitem>
                <para>Point lights are lights that have a position within the world, radiating light
                    equally in all directions. The light direction at a particular point on the
                    surface must be computed using the position at that point and the position of
                    the light.</para>
            </listitem>
            <listitem>
                <para>Attempting to perform per-vertex lighting computations with point lights leads
                    to artifacts.</para>
            </listitem>
            <listitem>
                <para>Lighting can be computed per-fragment by passing the fragment's position in an
                    appropriate space.</para>
            </listitem>
            <listitem>
                <para>Lighting can be computed in model space.</para>
            </listitem>
            <listitem>
                <para>Point lights have a falloff with distance, called attenuation. Not performing
                    this can cause odd effects, where a light appears to be brighter when it moves
                    farther from a surface. Light attenuation varies with the inverse of the square
                    of the distance, but other attenuation models can be used.</para>
            </listitem>
            <listitem>
                <para>Fragment shaders can compute the camera space position of the fragment in
                    question by using <varname>gl_FragCoord</varname> and a few uniform variables
                    holding information about the camera to window space transform.</para>
            </listitem>
            <listitem>
                <para>GLSL can have integer vectors, boolean values, and functions.</para>
            </listitem>
        </itemizedlist>
        <section>
            <title>Further Study</title>
            <para>Try doing these things with the given programs.</para>
            <itemizedlist>
                <listitem>
                    <para>When we used model space-based lighting computations, we had to perform an
                        inverse on our matrix from the matrix stack to transform the light position
                        from camera space to model space. However, it would be entirely possible to
                        simply build an inverse matrix at the same time we build a regular matrix on
                        our matrix stack. The inverse of a rotation matrix is just the rotation
                        matrix with a negated angle; the inverse of a scale is just the
                        multiplicative inverse of the scales, and the inverse of the translation is
                        the negation of the translation vector.</para>
                    <para>To do this, you will need to modify the <classname>MatrixStack</classname>
                        class in a number of ways. It must store a second matrix representing the
                        accumulated inverse matrix. When a transformation command is given to the
                        stack, it must also generate the inverse matrix for this transform and
                            <emphasis>left multiply</emphasis> this into the accumulated inverse.
                        The push/pop will have to push/pop the inverse matrix as well. It can use
                        the same stack, so long as the pop function puts the two matrices in the
                        proper places.</para>
                </listitem>
                <listitem>
                    <para>Implement the alternative attenuation described at the end of the section
                        on attenuation.</para>
                </listitem>
            </itemizedlist>
        </section>
        <section>
            <title>GLSL Features of Note</title>
            <glosslist>
                <glossentry>
                    <glossterm>gl_DepthRange</glossterm>
                    <glossdef>
                        <para>A built-in OpenGL uniform defined for fragment shaders only. This
                            uniform stores the parameters passed to
                                <function>glDepthRange.</function> When those parameters change, all
                            programs are automatically updated.</para>
                    </glossdef>
                </glossentry>
            </glosslist>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>vec <function>inversesqrt</function></funcdef>
                    <paramdef>vec <parameter>x</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>This function computes 1 / the square root of <varname>x</varname>. This is a
                component-wise computation, so vectors may be used. The return value will have the
                same type as <varname>x</varname>.</para>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>vec <function>sqrt</function></funcdef>
                    <paramdef>vec <parameter>x</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>This function computes the square root of <varname>x</varname>. This is a
                component-wise computation, so vectors may be used. The return value will have the
                same type as <varname>x</varname>.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut10 Glossary.html" ?>
        <title>Glossary</title>
        <glosslist>
            <glossentry>
                <glossterm>point light source</glossterm>
                <glossdef>
                    <para>A light source that emits light from a particular location in the world.
                        The light is emitted in all directions evenly.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>fragment lighting</glossterm>
                <glossdef>
                    <para>Evaluating the lighting equation at every fragment.</para>
                    <para>This is also called Phong shading, in contrast with Goroud shading, but
                        this name has fallen out of favor due to similarities with names for other
                        lighting models.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>light attenuation</glossterm>
                <glossdef>
                    <para>The decrease of the intensity of light with distance from the source of
                        that light.</para>
                </glossdef>
            </glossentry>
        </glosslist>
    </section>
    
</chapter>
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.