gltut / Documents / Illumination / Tutorial 09.xml

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
<?xml version="1.0" encoding="UTF-8"?>
<?oxygen RNGSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng" type="xml"?>
<?oxygen SCHSchema="http://docbook.org/xml/5.0/rng/docbookxi.rng"?>
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xi="http://www.w3.org/2001/XInclude"
    xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0">
    <?dbhtml filename="Tutorial 09.html" ?>
    <title>Lights On</title>
    <para>It is always best to start simply. And since lighting is a big topic, we will begin with
        the simplest possible scenario.</para>
    <section>
        <?dbhtml filename="Tut09 Modelling Lights.html" ?>
        <title>Modelling Lights</title>
        <para>Lighting is complicated. Very complicated. The interaction between a surface and a
            light is mostly well understood in terms of the physics. But actually doing the
            computations for full light/surface interaction as it is currently understood is
            prohibitively expensive.</para>
        <para>As such, all lighting in any real-time application is some form of approximation of
            the real world. How accurate that approximation is generally determines how close to
                <glossterm>photorealism</glossterm> one gets. Photorealism is the ability to render
            a scene that is indistinguishable from a photograph of reality.</para>
        <note>
            <title>Non-Photorealistic Rendering</title>
            <para>There are lighting models that do not attempt to model reality. These are, as a
                group, called non-photorealistic rendering (<acronym>NPR</acronym>) techniques.
                These lighting models and rendering techniques can attempt to model cartoon styles
                (typically called <quote>cel shading</quote>), paintbrush effects, pencil-sketch, or
                other similar things. NPR techniques including lighting models, but they also do
                other, non-lighting things, like drawing object silhouettes in an dark, ink-like
                color.</para>
            <para>Developing good NPR techniques is at least as difficult as developing good
                photorealistic lighting models. For the most part, in this book, we will focus on
                approximating photorealism.</para>
        </note>
        <para>A <glossterm>lighting model</glossterm> is an algorithm, a mathematical function, that
            determines how a surface interacts with light.</para>
        <para>In the real world, our eyes see by detecting light that hits them. The structure of
            our iris and lenses use a number of photorecepters (light-sensitive cells) to resolve a
            pair of images. The light we see can have one of two sources. A light emitting object
            like the sun or a lamp can emit light that is directly captured by our eyes. Or a
            surface can reflect light from another source that is captured by our eyes. Light
            emitting objects are called <glossterm>light sources.</glossterm></para>
        <para>The interaction between a light and a surface is the most important part of a lighting
            model. It is also the most difficult to get right. The way light interacts with atoms on
            a surface alone involves complicated quantum mechanical principles that are difficult to
            understand. And even that does not get into the fact that surfaces are not perfectly
            smooth or perfectly opaque.</para>
        <para>This is made more complicated by the fact that light itself is not one thing. There is
            no such thing as <quote>white light.</quote> Virtually all light is made up of a number
            of different wavelengths. Each wavelength (in the visible spectrum) represents a color.
            White light is made of many wavelengths (colors) of light. Colored light simply has
            fewer wavelengths in it than pure white light.</para>
        <para>Surfaces interact with light of different wavelengths in different ways. As a
            simplification of this complex interaction, we will assume that a surface can do one of
            two things: absorb that wavelength of light or reflect it.</para>
        <para>A surface looks blue under white light because the surface absorbs all non-blue parts
            of the light and only reflects the blue parts. If one were to shine a red light on the
            surface, the surface would appear very dark, as the surface absorbs non-blue light, and
            the red light does not have much blue light in it.</para>
        <figure>
            <title>Surface Light Absorption</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="SurfaceColorAbsorption.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Therefore, the apparent color of a surface is a combination of the absorbing
            characteristics of the surface (which wavelengths are absorbed or reflected) and the
            wavelengths of light shone upon that surface.</para>
        <para>The very first approximation that is made is that not all of these wavelengths matter.
            Instead of tracking millions of wavelengths in the visible spectrum, we will instead
            track 3. Red, green, and blue.</para>
        <para>The RGB intensity of light reflected from a surface at a particular point is a
            combination of the RGB light absorbing characteristics of the surface at that point and
            the RGB <glossterm>light intensity</glossterm> shone on that point on the surface. All
            of these, the reflected light, the source light, and the surface absorption, can be
            described as RGB colors, on the range [0, 1].</para>
        <para>The intensity of light shone upon a surface depends on (at least) two things. First,
            it depends on the intensity of light that reaches the surface from a light source. And
            second, it depends on the angle between the surface and the light.</para>
        <para>Consider a perfectly flat surface. If you shine a column of light with a known
            intensity directly onto that surface, the intensity of that light at each point under
            the surface will be a known value, based on the intensity of the light divided by the
            area projected on the surface.</para>
        <figure>
            <title>Perpendicular Light</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="DirectLightColumn.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>If the light is shone instead at an angle, the area on the surface is much wider. This
            spreads the same light intensity over a larger area of the surface; as a result, each
            point under the light <quote>sees</quote> the light less intensely.</para>
        <figure>
            <title>Light at an Angle</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="AngleLightColumn.svg"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>Therefore, the intensity of the light cast upon a surface is a function of the
            original light's intensity and the angle between the surface and the light source. This
            angle is called the <glossterm>angle of incidence</glossterm> of the light.</para>
        <para>A lighting model is a function of all of these parameters. This is far from a
            comprehensive list of lighting parameters; this list will be expanded considerably in
            future discussions.</para>
        <section>
            <title>Standard Diffuse Lighting</title>
            <para><glossterm>Diffuse lighting</glossterm> refers to a particular kind of
                light/surface interaction, where the light from the light source reflects from the
                surface at many angles, instead of as a perfect mirror.</para>
            <figure>
                <title>Diffuse Reflectance</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="DiffuseReflection.svg"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>An ideal diffuse material will reflect light evenly in all directions, as shown in
                the picture above. No actual surfaces are ideal diffuse materials, but this is a
                good starting point and looks pretty decent.</para>
            <para>For this tutorial, we will be using the <glossterm>Lambertian
                    reflectance</glossterm> model of diffuse lighting. It represents the ideal case
                shown above, where light is reflected in all directions equally. The equation for
                this lighting model is quite simple:</para>
            <equation>
                <title>Diffuse Lighting Equation</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="DiffuseLightingEquation.svg"/>
                    </imageobject>
                </mediaobject>
            </equation>
            <para>The cosine of the angle of incidence is used because it represents the perfect
                hemisphere of light that would be reflected. When the angle of incidence is 0°, the
                cosine of this angle will be 1.0. The lighting will be at its brightest. When the
                angle of incidence is 90°, the cosine of this angle will be 0.0, so the lighting
                will be 0. Values less than 0 are clamped to 0.</para>
        </section>
        <section>
            <title>Surface Orientation</title>
            <para>Now that we know what we need to compute, the question becomes how to compute it.
                Specifically, this means how to compute the angle of incidence for the light, but it
                also means where to perform the lighting computations.</para>
            <para>Since our mesh geometry is made of triangles, each individual triangle is flat.
                Therefore, much like the plane above, each triangle faces a single direction. This
                direction is called the <glossterm>surface normal</glossterm> or
                    <glossterm>normal.</glossterm> It is the direction that the surface is facing at
                the location of interest.</para>
            <para>Every point along the surface of a single triangle has the same geometric surface
                normal. That's all well and good, for actual triangles. But polygonal models are
                usually supposed to be approximations of real, curved surfaces. If we use the actual
                triangle's surface normal for all of the points on a triangle, the object would look
                very faceted. This would certainly be an accurate representation of the actual
                triangular mesh, but it reveals the surface to be exactly what it is: a triangular
                mesh approximation of a curved surface. If we want to create the illusion that the
                surface really is curved, we need to do something else.</para>
            <para>Instead of using the triangle's normal, we can assign to each vertex the normal
                that it <emphasis>would</emphasis> have had on the surface it is approximating. That
                is, while the mesh is an approximation, the normal for a vertex is the actual normal
                for that surface. This actually works out surprisingly well.</para>
            <para>This means that we must add to the vertex's information. In past tutorials, we
                have had a position and sometimes a color. To that information, we add a normal. So
                we will need a vertex attribute that represents the normal.</para>
        </section>
        <section>
            <title>Gouraud Shading</title>
            <para>So each vertex has a normal. That is useful, but it is not sufficient, for one
                simple reason. We do not draw the vertices of triangles; we draw the interior of a
                triangle through rasterization.</para>
            <para>There are several ways to go about computing lighting across the surface of a
                triangle. The simplest to code, and most efficient for rendering, is to perform the
                lighting computations at every vertex, and then let the result of this computation
                be interpolated across the surface of the triangle. This process is called
                    <glossterm>Gouraud shading.</glossterm></para>
            <para>Gouraud shading is a pretty decent approximation, when using the diffuse lighting
                model. It usually looks OK so long as we remain using that lighting model, and was
                commonly used for a good decade or so. Interpolation of vertex outputs is a very
                fast process, and not having to compute lighting at every fragment generated from
                the triangle raises the performance substantially.</para>
            <para>That being said, modern games have essentially abandoned this technique. Part of
                that is because the per-fragment computation is not as slow and limited as it used to
                be. And part of it is simply that games tend to not use just diffuse lighting
                anymore, so the Gouraud approximation is more noticeably inaccurate.</para>
        </section>
        <section>
            <title>Directional Light Source</title>
            <para>The angle of incidence is the angle between the surface normal and the direction
                towards the light. Computing the direction from the point in question to the light
                can be done in a couple of ways.</para>
            <para>If you have a light source that is very close to an object, then the direction
                towards the light can change dramatically over the surface of that object. As the
                light source is moved farther and farther away, the direction towards the light
                varies less and less over the surface of the object.</para>
            <figure>
                <title>Near and Far Lights</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="NearVsFarLight.svg"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>If the light source is sufficiently distant, relative to the size of the scene
                being rendered, then the direction towards the light is nearly the same for every
                point on every object you render. Since the direction is the same everywhere, the
                light can be represented as just a single direction given to all of the objects.
                There is no need to compute the direction based on the position of the point being
                illuminated.</para>
            <para>This situation is called a <glossterm>directional light source.</glossterm> Light
                from such a source effectively comes from a particular direction as a wall of
                intensity, evenly distributed over the scene.</para>
            <para>Direction light sources are a good model for lights like the sun relative to a
                small region of the Earth. It would not be a good model for the sun relative to the
                rest of the solar system. So scale is important.</para>
            <para>Light sources do not have to be physical objects rendered in the scene. All we
                need to use a directional light is to provide a direction to our lighting model when
                rendering the surface we want to see. However, having light appear from seemingly
                nothing hurts verisimilitude; this should be avoided where possible.</para>
            <para>Alternatives to directional lights will be discussed a bit later.</para>
        </section>
        <section>
            <title>Normals and Space</title>
            <para>Normals have many properties that positions do. Normals are vector directions, so
                like position vectors, they exist in a certain coordinate system. It is usually a
                good idea to have the normals for your vertices be in the same coordinate system as
                the positions in those vertices. So that means model space.</para>
            <para>This also means that normals must be transformed from model space to another
                space. That other space needs to be the same space that the lighting direction is
                in; otherwise, the two vectors cannot be compared. One might think that world space
                is a fine choice. After all, the light direction is already defined in world
                space.</para>
            <para>You certainly could use world space to do lighting. However, for our purposes, we
                will use camera space. The reason for this is partially illustrative: in later
                tutorials, we are going to do lighting in some rather unusual spaces. By using
                camera space, it gets us in the habit of transforming both our light direction and
                the surface normals into different spaces.</para>
            <para>We will talk more in later sections about exactly how we transform the normal. For
                now, we will just transform it with the regular transformation matrix.</para>
        </section>
        <section>
            <title>Drawing with Lighting</title>
            <para>The full lighting model for computing the diffuse reflectance from directional
                light sources, using per-vertex normals and Gouraud shading, is as follows. The
                light will be represented by a direction and a light intensity (color). The light
                direction passed to our shader is expected to be in camera space already, so the
                shader is not responsible for this transformation. For each vertex (in addition to
                the normal position transform), we:</para>
            <orderedlist>
                <listitem>
                    <para>Transform the normal from model space to camera space using the
                        model-to-camera transformation matrix.</para>
                </listitem>
                <listitem>
                    <para>Compute the cosine of the angle of incidence.</para>
                </listitem>
                <listitem>
                    <para>Multiply the light intensity by the cosine of the angle of incidence, and
                        multiply that by the diffuse surface color.</para>
                </listitem>
                <listitem>
                    <para>Pass this value as a vertex shader output, which will be written to the
                        screen by the fragment shader.</para>
                </listitem>
            </orderedlist>
            <para>This is what we do in the <phrase role="propername">Basic Lighting</phrase>
                tutorial. It renders a cylinder above a flat plane, with a single directional light
                source illuminating both objects. One of the nice things about a cylinder is that it
                has both curved and flat surfaces, thus making an adequate demonstration of how
                light interacts with a surface.</para>
            <figure>
                <title>Basic Lighting</title>
                <mediaobject>
                    <imageobject>
                        <imagedata fileref="Basic%20Lighting.png"/>
                    </imageobject>
                </mediaobject>
            </figure>
            <para>The light is at a fixed direction; the model and camera both can be
                rotated.</para>
            <sidebar>
                <title>Mouse Movement</title>
                <para>This is the first tutorial that uses mouse movement to orient objects and the
                    camera. These controls will be used throughout the rest of this book.</para>
                <para>The camera can be oriented with the left mouse button. Left-clicking and
                    dragging will rotate the camera around the target point. This will rotate both
                    horizontally and vertically. Think of the world as a sphere. Starting to drag
                    means placing your finger on the sphere. Moving your mouse is like moving your
                    finger; the sphere rotates along with your finger's movement. If you hold
                        <keycap>Ctrl</keycap> when you left-click, you can rotate either
                    horizontally or vertically, depending on the direction you move the mouse.
                    Whichever direction is farthest from the original location clicked will be the
                    axis that is rotated.</para>
                <para>The camera's up direction can be changed as well. To do this, left-click while
                    holding <keycap>Alt</keycap>. Only horizontal movements of the mouse will spin
                    the view. Moving left spins counter-clockwise, while moving right spins
                    clockwise.</para>
                <para>The camera can be moved closer to it's target point and farther away. To do
                    this, scroll the mouse wheel up and down. Up scrolls move closer, while down
                    moves farther away.</para>
                <para>The object can be controlled by the mouse as well. The object can be oriented
                    with the right-mouse button. Right-clicking and dragging will rotate the object
                    horizontally and vertically, relative to the current camera view. As with camera
                    controls, holding <keycap>Ctrl</keycap> when you right-click will allow you to
                    rotate horizontally or vertically only.</para>
                <para>The object can be spun by right-clicking while holding <keycap>Alt</keycap>.
                    As with the other object movements, the spin is relative to the current
                    direction of the camera.</para>
                <para>The code for these are contained in the framework objects
                        <type>MousePole</type> and <type>ObjectPole</type>. The source code in them
                    is, outside of how FreeGLUT handles mouse input, nothing that has not been seen
                    previously.</para>
            </sidebar>
            <para>Pressing the <keycap>Spacebar</keycap> will switch between a cylinder that has a
                varying diffuse color and one that is pure white. This demonstrates the effect of
                lighting on a changing diffuse color.</para>
            <para>The initialization code does the usual: loads the shaders, gets uniforms from
                them, and loads a number of meshes. In this case, it loads a mesh for the ground
                plane and a mesh for the cylinder. Both of these meshes have normals at each vertex;
                we'll look at the mesh data a bit later.</para>
            <para>The display code has gone through a few changes. The vertex shader uses only two
                matrices: one for model-to-camera, and one for camera-to-clip-space. So our matrix
                stack will have the camera matrix at the very bottom.</para>
            <example>
                <title>Display Camera Code</title>
                <programlisting language="cpp">const glm::vec3 &amp;camPos = ResolveCamPosition();

Framework::MatrixStack modelMatrix;
modelMatrix.SetMatrix(g_mousePole.CalcMatrix());

glm::vec4 lightDirCameraSpace = modelMatrix.Top() * g_lightDirection;

glUseProgram(g_WhiteDiffuseColor.theProgram);
glUniform3fv(g_WhiteDiffuseColor.dirToLightUnif, 1, glm::value_ptr(lightDirCameraSpace));
glUseProgram(g_VertexDiffuseColor.theProgram);
glUniform3fv(g_VertexDiffuseColor.dirToLightUnif, 1, glm::value_ptr(lightDirCameraSpace));
glUseProgram(0);</programlisting>
            </example>
            <para>Since our vertex shader will be doing all of its lighting computations in camera
                space, we need to move the <varname>g_lightDirection</varname> from world space to
                camera space. So we multiply it by the camera matrix. Notice that the camera matrix
                now comes from the MousePole object.</para>
            <para>Now, we need to talk a bit about vector transforms with matrices. When
                transforming positions, the fourth component was 1.0; this was used so that the
                translation component of the matrix transformation would be added to each
                position.</para>
            <para>Normals represent directions, not absolute positions. And while rotating or
                scaling a direction is a reasonable operation, translating it is not. Now, we could
                just adjust the matrix to remove all translations before transforming our light into
                camera space. But that's highly unnecessary; we can simply put 0.0 in the fourth
                component of the direction. This will do the same job, only we do not have to mess
                with the matrix to do so.</para>
            <para>This also allows us to use the same transformation matrix for vectors as for
                positions.</para>
            <para>We upload the camera-space light direction to the two programs.</para>
            <para>To render the ground plane, we run this code:</para>
            <example>
                <title>Ground Plane Lighting</title>
                <programlisting language="cpp">Framework::MatrixStackPusher push(modelMatrix);

glUseProgram(g_WhiteDiffuseColor.theProgram);
glUniformMatrix4fv(g_WhiteDiffuseColor.modelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(modelMatrix.Top()));
glm::mat3 normMatrix(modelMatrix.Top());
glUniformMatrix3fv(g_WhiteDiffuseColor.normalModelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(normMatrix));
glUniform4f(g_WhiteDiffuseColor.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
g_pPlaneMesh->Render();
glUseProgram(0);</programlisting>
            </example>
            <para>We upload two matrices. One of these is used for normals, and the other is used
                for positions. The normal matrix is only 3x3 instead of the usual 4x4. This is
                because normals do not use the translation component. We could have used the trick
                we used earlier, where we use a 0.0 as the W component of a 4 component normal. But
                instead, we just extract the top-left 3x3 area of the model-to-camera matrix and
                send that.</para>
            <para>Of course, the matrix is the same as the model-to-camera, except for the lack of
                translation. The reason for having separate matrices will come into play
                later.</para>
            <para>We also upload the intensity of the light, as a pure-white light at full
                brightness. Then we render the mesh.</para>
            <para>To render the cylinder, we run this code:</para>
            <example>
                <title>Cylinder Lighting</title>
                <programlisting language="cpp">Framework::MatrixStackPusher push(modelMatrix);

modelMatrix.ApplyMatrix(g_objectPole.CalcMatrix());

if(g_bDrawColoredCyl)
{
    glUseProgram(g_VertexDiffuseColor.theProgram);
    glUniformMatrix4fv(g_VertexDiffuseColor.modelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(modelMatrix.Top()));
    glm::mat3 normMatrix(modelMatrix.Top());
    glUniformMatrix3fv(g_VertexDiffuseColor.normalModelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(normMatrix));
    glUniform4f(g_VertexDiffuseColor.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
    g_pCylinderMesh->Render("lit-color");
}
else
{
    glUseProgram(g_WhiteDiffuseColor.theProgram);
    glUniformMatrix4fv(g_WhiteDiffuseColor.modelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(modelMatrix.Top()));
    glm::mat3 normMatrix(modelMatrix.Top());
    glUniformMatrix3fv(g_WhiteDiffuseColor.normalModelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(normMatrix));
    glUniform4f(g_WhiteDiffuseColor.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
    g_pCylinderMesh->Render("lit");
}
glUseProgram(0);</programlisting>
            </example>
            <para>The cylinder is not scaled at all. It is one unit from top to bottom, and the
                diameter of the cylinder is also 1. Translating it up by 0.5 simply moves it to
                being on top of the ground plane. Then we apply a rotation to it, based on user
                inputs.</para>
            <para>We actually draw two different kinds of cylinders, based on user input. The
                colored cylinder is tinted red and is the initial cylinder. The white cylinder uses
                a vertex program that does not use per-vertex colors for the diffuse color; instead,
                it uses a hard-coded color of full white. These both come from the same mesh file,
                but have special names to differentiate between them.</para>
            <para>What changes is that the <quote>flat</quote> mesh does not pass the color vertex
                attribute and the <quote>tint</quote> mesh does.</para>
            <para>Other than which program is used to render them and what mesh name they use, they
                are both rendered similarly.</para>
            <para>The camera-to-clip matrix is uploaded to the programs in the
                    <function>reshape</function> function, as previous tutorials have
                demonstrated.</para>
        </section>
        <section>
            <title>Vertex Lighting</title>
            <para>There are two vertex shaders used in this tutorial. One of them uses a color
                vertex attribute as the diffuse color, and the other assumes the diffuse color is
                (1, 1, 1, 1). Here is the vertex shader that uses the color attribute,
                    <filename>DirVertexLighting_PCN</filename>:</para>
            <example>
                <title>Lighting Vertex Shader</title>
                <programlisting language="glsl">#version 330

layout(location = 0) in vec3 position;
layout(location = 1) in vec4 diffuseColor;
layout(location = 2) in vec3 normal;

smooth out vec4 interpColor;

uniform vec3 dirToLight;
uniform vec4 lightIntensity;

uniform mat4 modelToCameraMatrix;
uniform mat3 normalModelToCameraMatrix;

layout(std140) uniform Projection
{
    mat4 cameraToClipMatrix;
};

void main()
{
    gl_Position = cameraToClipMatrix * (modelToCameraMatrix * vec4(position, 1.0));
    
	vec3 normCamSpace = normalize(normalModelToCameraMatrix * normal);
    
    float cosAngIncidence = dot(normCamSpace, dirToLight);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    interpColor = lightIntensity * diffuseColor * cosAngIncidence;
}</programlisting>
            </example>
            <para>We define a single output variable, <varname>interpColor</varname>, which will be
                interpolated across the surface of the triangle. We have a uniform for the
                camera-space lighting direction <varname>dirToLight</varname>. Notice the name: it
                is the direction from the surface <emphasis>towards</emphasis> the light. It is not
                the direction <emphasis>from</emphasis> the light.</para>
            <para>We also have a light intensity uniform value, as well as two matrices for
                positions and a separate one for normals. Notice that the
                    <varname>cameraToClipMatrix</varname> is in a uniform block. This allows us to
                update all programs that use the projection matrix just by changing the buffer
                object.</para>
            <para>The first line of <function>main</function> simply does the position transforms we
                need to position our vertices, as we have seen before. We do not need to store the
                camera-space position, so we can do the entire transformation in a single
                step.</para>
            <para>The next line takes our normal and transforms it by the model-to-camera matrix
                specifically for normals. As noted earlier, the contents of this matrix are
                identical to the contents of <varname>modelToCameraMatrix.</varname> The
                    <function>normalize</function> function takes the result of the transform and
                ensures that the normal has a length of one. The need for this will be explained
                later.</para>
            <para>We then compute the cosine of the angle of incidence. We'll explain how this math
                computes this shortly. Do note that after computing the cosine of the angle of
                incidence, we then clamp the value to between 0 and 1 using the GLSL built-in
                function <function>clamp.</function></para>
            <para>This is important, because the cosine of the angle of incidence can be negative.
                This is for values which are pointed directly away from the light, such as the
                underside of the ground plane, or any part of the cylinder that is facing away from
                the light. The lighting computations do not make sense with this value being
                negative, so the clamping is necessary.</para>
            <para>After computing that value, we multiply it by the light intensity and diffuse
                color. This result is then passed to the interpolated output color. The fragment
                shader is a simple passthrough shader that writes the interpolated color
                directly.</para>
            <para>The version of the vertex shader without the per-vertex color attribute simply
                omits the multiplication with the <varname>diffuseColor</varname> (as well as the
                definition of that input variable). This is the same as doing a multiply with a
                color vector of all 1.0.</para>
        </section>
        <section>
            <title>Vector Dot Product</title>
            <para>We glossed over an important point in looking at the vertex shader. Namely, how
                the cosine of the angle of incidence is computed.</para>
            <para>Given two vectors, one could certainly compute the angle of incidence, then take
                the cosine of it. But both computing that angle and taking its cosine are quite
                expensive. Instead, we elect to use a vector math trick: the <glossterm>vector dot
                    product.</glossterm></para>
            <para>The vector dot product between two vectors can be mathematically computed as
                follows:</para>
            <equation>
                <title>Dot Product</title>
                <mediaobject>
                    <imageobject>
                        <imagedata  fileref="DotProductLength.svg"/>
                    </imageobject>
                </mediaobject>
            </equation>
            <para>If both vectors have a length of one, if they are unit vectors, then the result of
                a dot product is just the cosine of the angle between the vectors.</para>
            <para>This is also part of the reason why the light direction is the direction
                    <emphasis>towards</emphasis> the light rather than from the light. Otherwise we
                would have to negate the vector before performing the dot product.</para>
            <para>What makes this faster than taking the cosine of the angle directly is that, while
                the dot product is geometrically the cosine of the angle between the two unit
                vectors, computing the dot product via vector math is very simple:</para>
            <equation>
                <title>Dot Product from Vector Math</title>
                <mediaobject>
                    <imageobject>
                        <imagedata  fileref="DotProductEquation.svg"/>
                    </imageobject>
                </mediaobject>
            </equation>
            <para>This does not require any messy cosine transcendental math computations. This
                does not require using trigonometry to compute the angle between the two vectors.
                Simple multiplications and additions; most graphics hardware can do billions of
                these a second.</para>
            <para>Obviously, the GLSL function <function>dot</function> computes the vector dot
                product of its arguments.</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut09 Normal Transformation.html" ?>
        <title>Normal Transformation</title>
        <para>In the last section, we saw that our computation of the cosine of the angle of
            incidence has certain requirements. Namely, that the two vectors involved, the surface
            normal and the light direction, are of unit length. The light direction can be assumed
            to be of unit length, since it is passed directly as a uniform.</para>
        <para>The surface normal can also be assumed to be of unit length.
                <emphasis>Initially.</emphasis> However, the normal undergoes a transformation by an
            arbitrary matrix; there is no guarantee that this transformation will not apply scaling
            or other transformations to the vector that will result in a non-unit vector.</para>
        <para>Of course, it is easy enough to correct this. The GLSL function
                <function>normalize</function> will return a vector that is of unit length without
            changing the direction of the input vector.</para>
        <para>And while mathematically this would function, geometrically, it would be nonsense. For
            example, consider a 2D circle. We can apply a non-uniform scale (different scales in
            different axes) to the positions on this circle that will transform it into an
            ellipse:</para>
        <figure>
            <title>Circle Scaling</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="CircleScaling.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>This is all well and good, but consider the normals in this transformation:</para>
        <figure>
            <title>Circle Scaling with Normals</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="CircleNormalScaling.svg" />
                </imageobject>
            </mediaobject>
        </figure>
        <para>The ellipse in the middle has the normals that you would expect if you transformed the
            normals from the circle by the same matrix the circle was transformed by. They may be
            unit length, but they no longer reflect the <emphasis>shape</emphasis> of the ellipse.
            The ellipse on the right has normals that reflect the actual shape.</para>
        <para>It turns out that, what you really want to do is transform the normals with the same
            rotations as the positions, but invert the scales. That is, a scale of 0.5 along the X
            axis will shrink positions in that axis by half. For the surface normals, you want to
                <emphasis>double</emphasis> the X value of the normals, then normalize the
            result.</para>
        <para>This is easy if you have a simple matrix. But more complicated matrices, composed from
            multiple successive rotations, scales, and other operations, are not so easy to
            compute.</para>
        <para>Instead, what we must do is compute something called the <glossterm>inverse
                transpose</glossterm> of the matrix in question. This means we first compute the
            inverse matrix, then compute the <glossterm>transpose</glossterm> of that matrix. The
            transpose of a matrix is simply the same matrix flipped along the diagonal. The columns
            of the original matrix are the rows of its transpose. That is:</para>
        <equation>
            <title>Matrix Transpose</title>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="MatrixTranspose.svg"/>
                </imageobject>
            </mediaobject>
        </equation>
        <para>So how does this inverse transpose help us?</para>
        <para>Remember: what we want is to invert the scales of our matrix without affecting the
            rotational characteristics of our matrix. Given a 3x3 matrix M that is composed of only
            rotation and scale transformations, we can re-express this matrix as follows:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="MatrixSVD.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>That is, the matrix can be expressed as doing a rotation into a space, followed by a
            single scale transformation, followed by another rotation. We can do this
                <emphasis>regardless</emphasis> of how many scale and rotation matrices were used to
            build M. That is, M could be the result of twenty rotation and scale matrices, but all
            of those can be extracted into two rotations with a scale inbetween.<footnote>
                <para>We will skip over deriving how exactly this is true. If you are interested,
                    search for <quote><link
                            xlink:href="http://en.wikipedia.org/wiki/Singular_value_decomposition"
                            >Singular Value Decomposition</link></quote>. But be warned: it is
                    math-heavy.</para>
            </footnote></para>
        <para>Recall that what we want to do is invert the scales in our transformation. Where we
            scale by 0.4 in the original, we want to scale by 2.5 in the inverse. The inverse matrix
            of a pure scale matrix is a matrix with each of the scaling components inverted.
            Therefore, we can express the matrix that we actually want as this:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="InvertedScale.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>An interesting fact about pure-rotation matrices: the inverse of any rotation matrix
                <emphasis>is</emphasis> its transpose. Also, taking the inverse of a matrix twice
            results in the original matrix. Therefore, you can express any pure-rotation matrix as
            the inverse transpose of itself, without affecting the matrix. Since the inverse is its
            transpose, and doing a transpose twice on a matrix does not change its value, the
            inverse-transpose of a rotation matrix is a no-op.</para>
        <para>Also, since the values in pure-scale matrices are along the diagonal, a transpose
            operation on scale matrices does nothing. With these two facts in hand, we can
            re-express the matrix we want to compute as:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="DeriveInvTrans_1.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>Using matrix algebra, we can factor the transposes out, but doing so requires
            reversing the order of the matrix multiplication:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="FactorOutTranspose_2.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>Similar, we can factor out the inverse operations, but this requires reversing the
            order again:</para>
        <informalequation>
            <mediaobject>
                <imageobject>
                    <imagedata  fileref="FactorOutInverse_3.svg"/>
                </imageobject>
            </mediaobject>
        </informalequation>
        <para>Thus, the inverse-transpose solves our problem. And both GLM and GLSL have nice
            functions that can do these operations for us. Though really, if you can avoid doing an
            inverse-transpose in GLSL, you are <emphasis>strongly</emphasis> advised to do so; this
            is not a trivial computation.</para>
        <para>We do this in the <phrase role="propername">Scale and Lighting</phrase> tutorial. It
            controls mostly the same as the previous tutorial, with a few exceptions. Pressing the
            space bar will toggle between a regular cylinder and a scaled one. The <quote>T</quote>
            key will toggle between properly using the inverse-transpose (the default) and not using
            the inverse transpose. The rendering code for the cylinder is as follows:</para>
        <example>
            <title>Lighting with Proper Normal Transform</title>
            <programlisting language="cpp">Framework::MatrixStackPusher push(modelMatrix);

modelMatrix.ApplyMatrix(g_objectPole.CalcMatrix());

if(g_bScaleCyl)
{
    modelMatrix.Scale(1.0f, 1.0f, 0.2f);
}

glUseProgram(g_VertexDiffuseColor.theProgram);
glUniformMatrix4fv(g_VertexDiffuseColor.modelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(modelMatrix.Top()));
glm::mat3 normMatrix(modelMatrix.Top());
if(g_bDoInvTranspose)
{
    normMatrix = glm::transpose(glm::inverse(normMatrix));
}
glUniformMatrix3fv(g_VertexDiffuseColor.normalModelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(normMatrix));
glUniform4f(g_VertexDiffuseColor.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
g_pCylinderMesh->Render("lit-color");
glUseProgram(0);</programlisting>
        </example>
        <para>It's pretty self-explanatory.</para>
        <figure>
            <title>Lighting and Scale</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Scale%20and%20Lighting.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>One more thing to note before we move on. Doing the inverse-transpose is only really
            necessary if you are using a <emphasis>non-uniform</emphasis> scale. In practice, it's
            actually somewhat rare to use this kind of scale factor. We do it in these tutorials, so
            that it is easier to build models from simple geometric components. But when you have an
            actual modeller creating objects for a specific purpose, non-uniform scales generally
            are not used. At least, not in the output mesh. It's better to just get the modeller to
            adjust the model as needed in their modelling application.</para>
        <para>Uniform scales are more commonly used. So you still need to normalize the normal after
            transforming it with the model-to-camera matrix, even if you are not using the
            inverse-transpose.</para>
    </section>
    <section>
        <?dbhtml filename="Tut09 Global Illumination.html" ?>
        <title>Global Illumination</title>
        <para>You may notice something very unrealistic about the results of this tutorial. For
            example, take this image:</para>
        <figure>
            <title>Half Lit</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="No%20Ambient.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>The unlit portions of the cylinder are completely, 100% black. This almost never
            happens in real life, even for objects we perceive as being <quote>black</quote> in
            color. The reason for this is somewhat complicated.</para>
        <para>Consider a scene of the outdoors. In normal daylight, there is exactly one light
            source: the sun. Objects that are in direct sunlight appear to be bright, and objects
            that have some object between them and the sun are in shadow.</para>
        <para>But think about what those shadows look like. They're not 100% black. They're
            certainly darker than the surrounding area, but they still have some color. And
            remember: we only see anything because our eyes detect light. In order to see an object
            in the shadow of a light source, that object must either be emitting light directly or
            reflecting light that came from somewhere else. Grass is not known for its
            light-emitting qualities, so where does the light come from?</para>
        <para>Think about it. We see because an object reflects light into our eyes. But our eyes
            are not special; the object does not reflect light <emphasis>only</emphasis> into our
            eyes. It reflects light in all directions. Not necessarily at the same intensity in each
            direction, but objects that reflect light tend to do so in all directions to some
            degree. What happens when that light hits another surface?</para>
        <para>The same thing that happens when light hits any surface: some of it is absorbed, and
            some is reflected in some way.</para>
        <para>The light being cast in shadows from the sun comes from many places. Part of it is an
            atmospheric effect; the sun is so bright that the weakly reflective atmosphere reflects
            enough light to shine a color. Typically, this is a pale blue. Part of the light comes
            from other objects. The sun gives off so much light that the light reflected from other
            objects is bright enough to be a substantial contributer to the overall lighting in a
            scene.</para>
        <para>This phenomenon is called <glossterm>interreflection.</glossterm> A lighting model
            that handles interreflection is said to handle <glossterm>global
                illumination.</glossterm> It represents light that bounces from object to object
            before hitting the eyes of the person viewing the scene. Modelling only lighting
            directly from a light-emitting surface is called <glossterm>local
                illumination</glossterm> or <glossterm>direct illumination,</glossterm> and it is
            what we have been doing up until this point.</para>
        <para>As you might imagine, modelling global illumination is hard. <emphasis>Very</emphasis>
            hard. It is typically a subtle effect, but in many scenes, particularly outdoor scenes,
            it is almost a necessity to provide at least basic global illumination modelling in
            order to achieve a decent degree of photorealism. Incidentally, this is a good part of
            the reason why most games tend to avoid outdoor scenes or light outdoor scenes as though
            the sky were cloudy or overcast. This neatly avoids needing to do complex global
            illumination modelling by damping down the brightness of the sun to levels when
            interreflection would be difficult to notice.</para>
        <para>Having this completely black area in our rendering looks incredibly fake. Since doing
            actual global illumination modelling is hard, we will instead use a time-tested
            technique: <glossterm>ambient lighting.</glossterm></para>
        <para>The ambient lighting <quote>model</quote><footnote>
                <para>I put the word model in quotations because ambient lighting is so divorced
                    from anything in reality that it does not really deserve to be called a model.
                    That being said, just because it does not actually model global illumination in
                    any real way does not mean that it is not <emphasis>useful</emphasis>.</para>
            </footnote> is quite simple. It assumes that, on every object in the scene, there is a
            light of a certain intensity that emanates from everywhere. It comes from all directions
            equally, so there is no angle of incidence in our diffuse calculation. It is simply the
            ambient light intensity * the diffuse surface color.</para>
        <para>We do this in the <phrase role="propername">Ambient Lighting</phrase> tutorial. The
            controls are the same as the last tutorial, except that the space bar swaps between the
            two cylinders (red and white), and that the <keycap>T</keycap> key toggles ambient
            lighting on and off (defaults to off).</para>
        <figure>
            <title>Ambient Lighting</title>
            <mediaobject>
                <imageobject>
                    <imagedata fileref="Ambient%20Lighting.png"/>
                </imageobject>
            </mediaobject>
        </figure>
        <para>The detail seen in the dark portion of the cylinder only comes from the diffuse color.
            And because the ambient is fairly weak, the diffuse color of the surface appears muted
            in the dark areas.</para>
        <para>The rendering code now uses four of vertex shaders instead of two. Two of them are
            used for non-ambient lighting, and use the same shaders we have seen before, and the
            other two use ambient lighting.</para>
        <para>The ambient vertex shader that uses per-vertex colors is called
                <filename>DirAmbVertexLighting_PCN.vert</filename> and reads as follows:</para>
        <example>
            <title>Ambient Vertex Lighting</title>
            <programlisting language="glsl">#version 330

layout(location = 0) in vec3 position;
layout(location = 1) in vec4 diffuseColor;
layout(location = 2) in vec3 normal;

smooth out vec4 interpColor;

uniform vec3 dirToLight;
uniform vec4 lightIntensity;
uniform vec4 ambientIntensity;

uniform mat4 modelToCameraMatrix;
uniform mat3 normalModelToCameraMatrix;

layout(std140) uniform Projection
{
    mat4 cameraToClipMatrix;
};

void main()
{
    gl_Position = cameraToClipMatrix * (modelToCameraMatrix * vec4(position, 1.0));
    
    vec3 normCamSpace = normalize(normalModelToCameraMatrix * normal);
    
    float cosAngIncidence = dot(normCamSpace, dirToLight);
    cosAngIncidence = clamp(cosAngIncidence, 0, 1);
    
    interpColor = (diffuseColor * lightIntensity * cosAngIncidence) +
        (diffuseColor * ambientIntensity);
}</programlisting>
        </example>
        <para>It takes two uniforms that specify lighting intensity. One specifies the intensity for
            the diffuse lighting, and the other for the ambient lighting. The only other change is
            to the last line in the shader. The usual diffuse lighting result has its value added to
            the ambient lighting computation. Also, note that the contribution from two lighting
            models is added together.</para>
        <para>Of particular note is the difference between the lighting intensities in the
            pure-diffuse case and the diffuse+ambient case:</para>
        <example>
            <title>Lighting Intensity Settings</title>
            <programlisting language="cpp">if(g_bShowAmbient)
{
    glUseProgram(whiteDiffuse.theProgram);
    glUniform4f(whiteDiffuse.lightIntensityUnif, 0.8f, 0.8f, 0.8f, 1.0f);
    glUniform4f(whiteDiffuse.ambientIntensityUnif, 0.2f, 0.2f, 0.2f, 1.0f);
    glUseProgram(vertexDiffuse.theProgram);
    glUniform4f(vertexDiffuse.lightIntensityUnif, 0.8f, 0.8f, 0.8f, 1.0f);
    glUniform4f(vertexDiffuse.ambientIntensityUnif, 0.2f, 0.2f, 0.2f, 1.0f);
}
else
{
    glUseProgram(whiteDiffuse.theProgram);
    glUniform4f(whiteDiffuse.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
    glUseProgram(vertexDiffuse.theProgram);
    glUniform4f(vertexDiffuse.lightIntensityUnif, 1.0f, 1.0f, 1.0f, 1.0f);
}</programlisting>
        </example>
        <para>In the pure-diffuse case, the light intensity is full white. But in the ambient case,
            we deliberately set the diffuse intensity to less than full white. This is very
            intensional.</para>
        <para>We will talk more about this issue in the near future, but it is very critical that
            light intensity values not exceed 1.0. This includes <emphasis>combined</emphasis>
            lighting intensity values. OpenGL clamps colors that it writes to the output image to
            the range [0, 1]. So any light intensity that exceeds 1.0, whether alone or combined
            with other lights, can cause unpleasant visual effects.</para>
        <para>There are ways around this, and those ways will be discussed in the eventual
            future.</para>
    </section>
    <section>
        <?dbhtml filename="Tut09 Intensity of Light.html" ?>
        <title>Intensity of Light</title>
        <para>There are many, many things wrong with the rather primitive lighting models introduced
            thus far. But one of the most important is the treatment of the lighting
            intensity.</para>
        <para>Thus far, we have used light intensity like a color. We clamp it to the range [0, 1].
            We even make sure that combined intensities from different lighting models always are
            within that range.</para>
        <para>What this effectively means is that the light does not brighten the scene. The scene
            without the light is fully lit; our lights simply <emphasis>darken</emphasis> parts of
            the scene. They take the diffuse color and make it smaller, because multiplying with a
            number on the range [0, 1] can only ever make a number smaller (or the same).</para>
        <para>This is of course not realistic. In reality, there is no such thing as a
                <quote>maximum</quote> illumination or brightness. There is no such thing as a (1,
            1, 1) light intensity. The actual range of light intensity per wavelength is on the
            range [0, ∞). This also means that the range of intensity for reflected light is [0, ∞);
            after all, if you shine a really bright light on a surface, it will reflect a lot of it.
            A surface that looks dark blue under dim light can appear light blue under very bright
            light.</para>
        <para>Of course in the real world, things tend to catch on fire if you shine <emphasis>too
                much</emphasis> light at them, but that's not something we need to model.</para>
        <para>The concept of lighting darkening a scene was common for a long time in real-time
            applications. It is another part of the reason why, for so many years, 3D games tended
            to avoid the bright outdoors, preferring corridors with darker lighting. The sun is a
            powerful light source; binding lighting intensity to the [0, 1] range does not lead to a
            realistic vision of the outdoors.</para>
        <para>One obvious way to correct this is to take all of the diffuse colors and divide them by
            a value like 2. Then increase your light intensity range from [0, 1] to [0, 2]. This is
            a workable solution, to some extent. Lights can be brighter than 1.0, and lighting can
            serve to increase the brightness of the diffuse color as well as decrease it. Of course,
            2 is just as far from infinity as 1 is, so it is not technically any closer to proper
            lighting. But it is an improvement.</para>
        <para>This technique does have its flaws. As we will see in later tutorials, you often will
            want to render the same object multiple times and combine the results. This method
            does not work when adding contributions from multiple light sources this way, unless you
            limit the sum total of all lights to the same value. Just as we did for diffuse when
            combining it with an ambient term.</para>
        <para>It is certainly possible on modern hardware to model light intensity correctly. And we
            will eventually do that. But these more primitive lighting models do still have their
            uses in some cases. And it is illustrative to see what an improvement having proper
            lighting intensity can make on the result. The lighting computations themselves do not
            change; what changes are the numbers fed into them.</para>
    </section>
    <section>
        <?dbhtml filename="Tut09 In Review.html" ?>
        <title>In Review</title>
        <para>In this tutorial, you have learned the following:</para>
        <itemizedlist>
            <listitem>
                <para>Diffuse lighting is a simple lighting model based on the angle between the
                    light source and the surface normal.</para>
            </listitem>
            <listitem>
                <para>Surface normals are values used, per-vertex, to define the direction of the
                    surface at a particular location. They do not have to mirror the actual normal
                    of the mesh geometry.</para>
            </listitem>
            <listitem>
                <para>Surface normals must be transformed by the inverse-transpose of the
                    model-to-camera matrix, if that matrix can involve a non-uniform scale
                    operation.</para>
            </listitem>
            <listitem>
                <para>Light interreflection can be approximated by adding a single light that has no
                    direction.</para>
            </listitem>
        </itemizedlist>
        <section>
            <title>Further Study</title>
            <para>Try doing these things with the given programs.</para>
            <itemizedlist>
                <listitem>
                    <para>Modify the ambient lighting tutorial, bumping the diffuse light intensity
                        up to 1.0. See how this effects the results.</para>
                </listitem>
                <listitem>
                    <para>Change the shaders in the ambient lighting tutorial to use the lighting
                        intensity correction mentioned above. Divide the diffuse color by a value,
                        then pass larger lighting intensities to the shader. Notice how this changes
                        the quality of the lighting.</para>
                </listitem>
            </itemizedlist>
        </section>
        <section>
            <title>Further Research</title>
            <para>Lambertian diffuse reflectance is a rather good model for diffuse reflectance for
                many surfaces. Particularly rough surfaces however do not behave in a Lambertian
                manor. If you are interested in modelling such surfaces, investigate the Oren-Nayar
                reflectance model.</para>
        </section>
        <section>
            <title>GLSL Functions of Note</title>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>vec <function>clamp</function></funcdef>
                    <paramdef>vec <parameter>val</parameter></paramdef>
                    <paramdef>vec <parameter>minVal</parameter></paramdef>
                    <paramdef>vec <parameter>maxVal</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>This function does a clamping operation of each component of
                    <parameter>val</parameter>. All of the parameters must scalars or vectors of the
                same dimensionality. This function will work with any scalar or vector type. It
                returns a scalar or vector of the same dimensionality as the parameters, where each
                component of <parameter>val</parameter> will be clamped to the closed range
                    [<parameter>minVal</parameter>, <parameter>maxVal</parameter>]. This is useful
                for ensuring that values are in a certain range.</para>
            <para>All components of <parameter>minVal</parameter>
                <emphasis>must</emphasis> be smaller than the corresponding components of
                    <parameter>maxVal</parameter>.</para>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>float <function>dot</function></funcdef>
                    <paramdef>vec <parameter>x</parameter></paramdef>
                    <paramdef>vec <parameter>y</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>This function performs a vector dot product on <parameter>x</parameter> and
                    <parameter>y</parameter>. This always results in a scalar value. The two
                parameters must have the same dimensionality and must be vectors.</para>
            <funcsynopsis>
                <funcprototype>
                    <funcdef>vec <function>normalize</function></funcdef>
                    <paramdef>vec <parameter>x</parameter></paramdef>
                </funcprototype>
            </funcsynopsis>
            <para>This function returns a vector in the same direction as <parameter>x</parameter>,
                but with a length of 1. <parameter>x</parameter> must have a length greater than 0
                (that is, it cannot be a vector with all zeros).</para>
        </section>
    </section>
    <section>
        <?dbhtml filename="Tut09 Glossary.html" ?>
        <title>Glossary</title>
        <glosslist>
            <glossentry>
                <glossterm>photorealism</glossterm>
                <glossdef>
                    <para>A rendering system has achieved photorealism when it can render a still
                        image that is essentially indistinguishable from a real photograph.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>lighting model</glossterm>
                <glossdef>
                    <para>A mathematical model that defines how light is absorbed and reflected from
                        a surface. This can attempt to model reality, but it does not have
                        to.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>light source</glossterm>
                <glossdef>
                    <para>Mathematically, this is something that produces light and adds it to a
                        scene. It does not have to be an actual object shown in the world.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>light intensity</glossterm>
                <glossdef>
                    <para>The intensity, measured in RGB, of light emitted from a light-casting
                        source.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>angle of incidence</glossterm>
                <glossdef>
                    <para>The angle between the surface normal and the direction towards the
                        light.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>diffuse lighting</glossterm>
                <glossdef>
                    <para>A lighting model that assumes light is reflected from a surface in many
                        directions, as opposed to a flat mirror that reflects light in one
                        direction.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>Lambertian reflectance</glossterm>
                <glossdef>
                    <para>A particular diffuse lighting model that represents the ideal diffuse
                        case: lighting is reflected evenly in all directions.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>surface normal, normal</glossterm>
                <glossdef>
                    <para>The direction that a particular point on a surface faces.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>Gouraud shading</glossterm>
                <glossdef>
                    <para>Computing lighting computations at every vertex, and interpolating the
                        results of these computations across the surface of the triangle.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>directional light source</glossterm>
                <glossdef>
                    <para>A light source that emits light along a particular direction. Every point
                        in the scene to be rendered receives light from the same direction. This
                        models a very distant light source that lights the scene evenly from a
                        single direction.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>vector dot product</glossterm>
                <glossdef>
                    <para>Computes the length of the projection of one vector onto another. If the
                        two vectors are unit vectors, then the dot product is simply the cosine of
                        the angle between them.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>transpose</glossterm>
                <glossdef>
                    <para>A matrix operation that flips the matrix along the main diagonal. The
                        columns of the original matrix become the rows of the transpose.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>inverse transpose</glossterm>
                <glossdef>
                    <para>A matrix operation, where a matrix is inverted and then transposed.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>interreflection</glossterm>
                <glossdef>
                    <para>Light that reflects off of multiple surfaces before reaching the
                        viewer.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>global illumination</glossterm>
                <glossdef>
                    <para>A category of lighting models that take into account lighting
                        contributions from interreflection.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>local illumination, direct illumination</glossterm>
                <glossdef>
                    <para>Lighting computations made only from light sources that cast light
                        directly onto the surface.</para>
                </glossdef>
            </glossentry>
            <glossentry>
                <glossterm>ambient lighting</glossterm>
                <glossdef>
                    <para>A lighting model that models all contributions from interreflection as a
                        single light intensity that does not originate from any particular
                        direction.</para>
                </glossdef>
            </glossentry>
        </glosslist>
    </section>
</chapter>
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.