Commits

Jason McKesson  committed 79148ac

Tutorial 4 has most images complete.

  • Participants
  • Parent commits b56c928

Comments (0)

Files changed (18)

File Documents/Basics/GenNormDeviceCoord.lua

 	return ret;
 end
 
---Negate the Z to get into a left-handed system.
 local viewportMatrix = vmath.mat4(
 	vmath.vec4(imageWidth / 2, 0, 0, imageWidth / 2),
 	vmath.vec4(0, imageHeight / 2, 0, imageHeight / 2),

File Documents/Basics/NormDeviceCoord.svg

Old
Old image
New
New image

File Documents/Outline.xml

                         glDepthRange and the depth portion of the viewport transform.</para>
                 </listitem>
                 <listitem>
-                    <para>Clipping. Show how things are clipped against the view frustum.</para>
+                    <para>Clipping. Show how things are clipped against the view frustum. Note that
+                        clipping happens in <emphasis>clip-space</emphasis>, not NDC space. Thus,
+                        the clipped vertices will <emphasis>never</emphasis> have a W of 0.</para>
                 </listitem>
             </itemizedlist>
         </section>

File Documents/Positioning/CameraToPerspective.svg

Added
New image

File Documents/Positioning/GenCameraToPerspective.lua

+require "SvgWriter"
+require "vmath"
+
+vec2 = vmath.vec2;
+
+-- Sizing
+local numSubImages = 2;
+local subImageWidth, subImageHeight = 400, 400;
+local subImageSpacing = 100;
+local belowImageSpaceing = 50;
+
+local imageWidth = (subImageWidth * numSubImages) + (subImageSpacing * (numSubImages - 1));
+local imageHeight = subImageHeight + belowImageSpaceing;
+
+local subImageSize = vmath.vec2{subImageWidth, subImageHeight};
+local pointSize = 10
+local circleRadius = subImageWidth / 8
+
+local subImagePositions = {}
+
+for i = 1, numSubImages, 1 do
+	subImagePositions[i] = vmath.vec2{(subImageWidth + subImageSpacing) * (i-1), 0};
+end
+
+local worldWidth = 4;
+local halfWorldWidth = worldWidth / 2;
+local leftWorldOffset = vmath.vec2(0, halfWorldWidth * 0.75)
+local leftWorldVertRange = vmath.vec2(-halfWorldWidth - leftWorldOffset[2], halfWorldWidth -leftWorldOffset[2])
+
+local function TransformPointToLeftWnd(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformPointToLeftWnd(realPoint)
+		end
+		return ret;
+	end
+
+	local final = vmath.vec2(tPoint);
+	final = final + leftWorldOffset;
+	final = final + (halfWorldWidth);
+	final = final * (subImageSize / worldWidth);
+	final = final + subImagePositions[1]
+	return final;
+end
+
+local function TransformPointToRightWnd(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformPointToRightWnd(realPoint)
+		end
+		return ret;
+	end
+
+	local final = vmath.vec2(tPoint);
+	--final.y = -final.y
+	final = final + (halfWorldWidth);
+	final = final * (subImageSize / worldWidth);
+	final = final + subImagePositions[2]
+	return final;
+end
+
+local zNear, zFar = -1.0, -3.0;
+local zCenter = (zFar + zNear) / 2;
+local zScale = math.abs(zFar - zNear) / 2;
+
+local function TransformToNDC(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformToNDC(realPoint)
+		end
+		return ret;
+	end
+	
+	local final = vec2(tPoint);
+	--[[
+	final.y = (final.y - zCenter);
+	final.y = final.y / zScale;
+	final.y = -final.y;
+	
+	final.x = final.x / -(tPoint.y)
+	]]
+	local near, far = -zNear, -zFar;
+	final.y = (final.y * ((far + near)/(near-far))) + ((2 * near * far)/(near-far))
+	final = final / -(tPoint.y)
+	return final;
+end
+
+
+-- Styles
+local styleLib = SvgWriter.StyleLibrary();
+
+styleLib:AddStyle(nil, "black",
+	SvgWriter.Style():stroke("black"):stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "stroke_none",
+	SvgWriter.Style():stroke("none"));
+	
+styleLib:AddStyle(nil, "object_lines",
+	SvgWriter.Style():stroke("#00C000"):stroke_width("1px"):fill("none"));
+	
+styleLib:AddStyle(nil, "object_circles",
+	SvgWriter.Style():fill("#00C000"):stroke("black"):stroke_width("0.5px"));
+	
+styleLib:AddStyle(nil, "radial_eye",
+	SvgWriter.Style():stroke("black"):stroke_dasharray{3, 3});
+	
+styleLib:AddStyle(nil, "wide_black",
+	SvgWriter.Style():stroke("black"):stroke_width("3px"));
+
+styleLib:AddStyle(nil, "fill_black",
+	SvgWriter.Style():fill("black"));
+	
+styleLib:AddStyle(nil, "fill_frustum",
+	SvgWriter.Style():fill("#E0E0E0"));
+	
+styleLib:AddStyle(nil, "fill_none",
+	SvgWriter.Style():fill("none"));
+	
+styleLib:AddStyle(nil, "text",
+	SvgWriter.Style():font_size("30px"):font_family("monospace") );
+	
+styleLib:AddStyle(nil, "axis_label",
+	SvgWriter.Style():stroke("black"):font_size("30px"):font_family("monospace") );
+	
+styleLib:AddStyle(nil, "image_label",
+	SvgWriter.Style():stroke("black"):font_size("40px"):font_family("serif"):text_anchor("middle") );
+	
+styleLib:AddStyle(nil, "pointed",
+	SvgWriter.Style():marker(SvgWriter.uriLocalElement("point")));
+
+styleLib:AddStyle(nil, "arrow_ended",
+	SvgWriter.Style():marker_end(SvgWriter.uriLocalElement("arrow")));
+styleLib:AddStyle(nil, "arrows",
+	SvgWriter.Style():marker_mid(SvgWriter.uriLocalElement("arrow")):marker_end(SvgWriter.uriLocalElement("arrow")));
+styleLib:AddStyle(nil, "double_arrowheaded",
+	SvgWriter.Style():marker_start(SvgWriter.uriLocalElement("arrow")):marker_end(SvgWriter.uriLocalElement("arrow")));
+
+-- Paths and other data.
+
+local arrowWidth, arrowLength = 12, 16;
+
+local arrowheadPath = SvgWriter.Path();
+arrowheadPath:M{arrowLength, arrowWidth / 2}:L{0, 0}:L{0, arrowWidth}:Z();
+
+local leftAxisLocs =
+{
+	vec2(0, 0),
+	vec2(0, leftWorldVertRange[1]),
+	vec2(0, leftWorldVertRange[2]),
+	vec2(-worldWidth/2, 0),
+	vec2(worldWidth/2, 0),
+}
+
+local numAxisHashes = 8
+local leftAxisHashes = {}
+local startPt = leftAxisLocs[2];
+local dir = leftAxisLocs[3] - leftAxisLocs[2]
+local hashSize = worldWidth / 25;
+for i = 1, numAxisHashes - 1 do
+	local leftPt = startPt + (dir * (i / numAxisHashes));
+	local rightPt = vec2(leftPt);
+	if(math.mod(i, 2) == 0) then
+		leftPt.x = leftPt.x + hashSize * 0.75
+		rightPt.x = rightPt.x - hashSize * 0.75
+	else
+		leftPt.x = leftPt.x + hashSize/2
+		rightPt.x = rightPt.x - hashSize/2
+	end
+	leftAxisHashes[#leftAxisHashes + 1] = leftPt;
+	leftAxisHashes[#leftAxisHashes + 1] = rightPt;
+end
+
+startPt = leftAxisLocs[4];
+dir = leftAxisLocs[5] - leftAxisLocs[4]
+for i = 1, numAxisHashes - 1 do
+	local botPt = startPt + (dir * (i / numAxisHashes));
+	local topPt = vec2(botPt);
+	if(math.mod(i, 2) == 0) then
+		botPt.y = botPt.y + hashSize * 0.75
+		topPt.y = topPt.y - hashSize * 0.75
+	else
+		botPt.y = botPt.y + hashSize/2
+		topPt.y = topPt.y - hashSize/2
+	end
+	leftAxisHashes[#leftAxisHashes + 1] = botPt;
+	leftAxisHashes[#leftAxisHashes + 1] = topPt;
+end
+
+leftAxisHashes = TransformPointToLeftWnd(leftAxisHashes);
+leftAxisLocs = TransformPointToLeftWnd(leftAxisLocs);
+
+
+local rightAxisLocs =
+{
+	vec2(0, 0),
+	vec2(0, -worldWidth/2),
+	vec2(0, worldWidth/2),
+	vec2(-worldWidth/2, 0),
+	vec2(worldWidth/2, 0),
+}
+
+local rightAxisHashes = {}
+local startPt = rightAxisLocs[2];
+local dir = rightAxisLocs[3] - rightAxisLocs[2]
+hashSize = worldWidth / 25;
+for i = 1, numAxisHashes - 1 do
+	local leftPt = startPt + (dir * (i / numAxisHashes));
+	local rightPt = vec2(leftPt);
+	if(math.mod(i, 2) == 0) then
+		leftPt.x = leftPt.x + hashSize * 0.75
+		rightPt.x = rightPt.x - hashSize * 0.75
+	else
+		leftPt.x = leftPt.x + hashSize/2
+		rightPt.x = rightPt.x - hashSize/2
+	end
+	rightAxisHashes[#rightAxisHashes + 1] = leftPt;
+	rightAxisHashes[#rightAxisHashes + 1] = rightPt;
+end
+
+startPt = rightAxisLocs[4];
+dir = rightAxisLocs[5] - rightAxisLocs[4]
+for i = 1, numAxisHashes - 1 do
+	local botPt = startPt + (dir * (i / numAxisHashes));
+	local topPt = vec2(botPt);
+	if(math.mod(i, 2) == 0) then
+		botPt.y = botPt.y + hashSize * 0.75
+		topPt.y = topPt.y - hashSize * 0.75
+	else
+		botPt.y = botPt.y + hashSize/2
+		topPt.y = topPt.y - hashSize/2
+	end
+	rightAxisHashes[#rightAxisHashes + 1] = botPt;
+	rightAxisHashes[#rightAxisHashes + 1] = topPt;
+end
+
+rightAxisHashes = TransformPointToRightWnd(rightAxisHashes);
+rightAxisLocs = TransformPointToRightWnd(rightAxisLocs);
+
+
+local function TransformBoth(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformBoth(realPoint)
+		end
+		return ret;
+	end
+
+	local testPtLeft = tPoint;
+	local testPtRight = TransformToNDC(testPtLeft);
+	testPtLeft = TransformPointToLeftWnd(testPtLeft);
+	testPtRight = TransformPointToRightWnd(testPtRight);
+	
+	return {testPtLeft, testPtRight};
+end
+
+local testPts =
+{
+	vec2(1.5, -0.75),
+	vec2(0.5, -0.75),
+
+	vec2(1.5, -1.25),
+	vec2(0.5, -1.25),
+
+	vec2(1.5, -1.75),
+	vec2(0.5, -1.75),
+
+	vec2(1.5, -2.25),
+	vec2(0.5, -2.25),
+
+	vec2(1.5, -2.75),
+	vec2(0.5, -2.75),
+
+	vec2(1.5, -3.25),
+	vec2(0.5, -3.25),
+};
+
+testPts = TransformBoth(testPts)
+
+local leftBlocksPath = SvgWriter.Path();
+local rightBlocksPath = SvgWriter.Path();
+
+for i = 1, #testPts - 2, 2 do
+	leftBlocksPath:M(testPts[i + 2][1]):L(testPts[i][1]):L(testPts[i + 1][1]):L(testPts[i + 3][1]);
+	rightBlocksPath:M(testPts[i + 2][2]):L(testPts[i][2]):L(testPts[i + 1][2]):L(testPts[i + 3][2]);
+end
+
+leftBlocksPath:Z();
+rightBlocksPath:Z();
+
+local frustumPoints =
+{
+	vec2(zNear, zNear),
+	vec2(-zNear, zNear),
+	vec2(worldWidth/2, -worldWidth/2),
+	vec2(worldWidth/2, zFar),
+	vec2(-worldWidth/2, zFar),
+	vec2(-worldWidth/2, -worldWidth/2),
+}
+
+frustumPoints = TransformPointToLeftWnd(frustumPoints);
+
+local leftViewArea = SvgWriter.Path()
+
+leftViewArea:M(frustumPoints[1]);
+
+for i = 2, #frustumPoints do
+	leftViewArea:L(frustumPoints[i]);
+end
+
+leftViewArea:Z();
+
+local rightViewArea = { vec2(-1, -1), vec2(1, 1); }
+rightViewArea = TransformPointToRightWnd(rightViewArea);
+
+local leftOrigin = TransformPointToLeftWnd(vec2(0, 0));
+
+local leftAxisLocations = { vec2(halfWorldWidth, 0), vec2(0, leftWorldVertRange[1]) };
+leftAxisLocations = TransformPointToLeftWnd(leftAxisLocations);
+local rightAxisLocations = { vec2(halfWorldWidth, 0), vec2(0, -halfWorldWidth) }
+rightAxisLocations = TransformPointToRightWnd(rightAxisLocations);
+
+local axisLabelPixelOffsets = { vec2(-45, -10), vec2(-50, 30) };
+
+local imageTitleLoc = {vec2(subImageWidth / 2, subImageHeight)}
+imageTitleLoc[2] = imageTitleLoc[1] + vec2(subImageWidth + subImageSpacing, 0)
+local imageTitleOffset = vec2(0, 40)
+
+
+-- The SVG itself.
+local writer = SvgWriter.SvgWriter("CameraToPerspective.svg", {imageWidth .."px", imageHeight .. "px"});
+	writer:StyleLibrary(styleLib);
+	writer:BeginDefinitions();
+		writer:BeginMarker({pointSize, pointSize}, {pointSize/2, pointSize/2}, "auto", true, nil, "point");
+			writer:Circle({pointSize/2, pointSize/2}, pointSize/2, {"fill_black", "black"});
+		writer:EndMarker();
+		writer:BeginMarker({arrowLength, arrowWidth}, {arrowLength, arrowWidth / 2}, "auto", true, nil, "arrow");
+			writer:Path(arrowheadPath, {"fill_black", "black"});
+		writer:EndMarker();
+	writer:EndDefinitions();
+	
+	--Draw the viewing volumes.
+	writer:Path(leftViewArea, {"stroke_none", "fill_frustum"});
+	writer:Rect2Pt(rightViewArea[1], rightViewArea[2], nil, {"stroke_none", "fill_frustum"});
+	
+	--Draw the eye lines.
+	writer:Line(leftOrigin, frustumPoints[3], {"radial_eye"});
+	writer:Line(leftOrigin, frustumPoints[6], {"radial_eye"});
+	
+	--Draw the coordinate axes.
+	for i, endPt in ipairs(leftAxisLocs) do
+		if(i ~= 1) then
+			writer:Line(leftAxisLocs[1], leftAxisLocs[i], {"black", "arrow_ended"})
+		end
+	end
+	
+	for i = 1, #leftAxisHashes, 2 do
+		writer:Line(leftAxisHashes[i], leftAxisHashes[i+1], {"black"})
+	end
+
+	for i, endPt in ipairs(rightAxisLocs) do
+		if(i ~= 1) then
+			writer:Line(rightAxisLocs[1], rightAxisLocs[i], {"black", "arrow_ended"})
+		end
+	end
+	
+	for i = 1, #rightAxisHashes, 2 do
+		writer:Line(rightAxisHashes[i], rightAxisHashes[i+1], {"black"})
+	end
+
+	--Draw the objects
+	writer:Path(leftBlocksPath, {"object_lines"});
+	writer:Path(rightBlocksPath, {"object_lines"});
+	
+	for i, ptPair in ipairs(testPts) do
+		writer:Circle(ptPair[1], 5, {"object_circles"});
+		writer:Circle(ptPair[2], 5, {"object_circles"});
+	end
+	
+	--label the axes.
+	writer:Text("+X", leftAxisLocations[1] + axisLabelPixelOffsets[1], {"axis_label"})
+	writer:Text("-Z", leftAxisLocations[2] + axisLabelPixelOffsets[2], {"axis_label"})
+	writer:Text("+X", rightAxisLocations[1] + axisLabelPixelOffsets[1], {"axis_label"})
+	writer:Text("-Z", rightAxisLocations[2] + axisLabelPixelOffsets[2], {"axis_label"})
+
+	--label the images
+	writer:Text("Camera Space", imageTitleLoc[1] + imageTitleOffset, {"image_label"});
+	writer:Text("Norm. Device Coord.", imageTitleLoc[2] + imageTitleOffset, {"image_label"});
+	
+--	writer:Rect(subImagePositions[1], subImageSize, nil, {"black", "fill_none"});
+--	writer:Rect(subImagePositions[2], subImageSize, nil, {"black", "fill_none"});
+--	writer:Rect2Pt(TransformPointToLeftWnd(vmath.vec2(-1, -1)), TransformPointToLeftWnd(vmath.vec2(1, 1)), nil, {"fill_none", "black"});
+	
+writer:Close();
+
+
+

File Documents/Positioning/GenOrtho2DProjection.lua

+require "SvgWriter"
+require "vmath"
+
+local imageWidth = 600;
+local imageHeight = 500;
+local imageSize = vmath.vec2(imageWidth, imageHeight);
+
+local worldBox = 4;
+
+local worldExtents = {-worldBox / 2, worldBox / 2}
+
+local function TransformPoint(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformPoint(realPoint)
+		end
+		return ret;
+	end
+
+	local final = vmath.vec2(tPoint);
+	final.y = -final.y;
+	final = final - worldExtents[1];
+	if(imageWidth > imageHeight) then
+		final = final * (imageHeight / worldBox);
+		final = final + vmath.vec2((imageWidth - imageHeight) / 2, 0.0); --Allow for centering a rectangular image size.
+	else
+		final = final * (imageWidth / worldBox);
+		final = final + vmath.vec2(0.0, (imageHeight - imageWidth) / 2); --Allow for centering a rectangular image size.
+		print(tPoint, final)
+	end
+
+	return final;
+end
+
+local function ProjectPoint(tPoint, projection)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = ProjectPoint(realPoint, projection)
+		end
+		return ret;
+	end
+
+	return vmath.vec2(tPoint.x, projection.lineLoc.y);
+end
+
+
+----------------------------------
+-- Styles
+local styleLib = SvgWriter.StyleLibrary();
+
+styleLib:AddStyle(nil, "black",
+	SvgWriter.Style():stroke("black"));
+	
+styleLib:AddStyle(nil, "red",
+	SvgWriter.Style():stroke("red"));
+	
+styleLib:AddStyle(nil, "line_of_proj",
+	SvgWriter.Style():stroke("red"):stroke_width("1px"):stroke_dasharray({4, 8}));
+	
+styleLib:AddStyle(nil, "object",
+	SvgWriter.Style():stroke("#00C000"):stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "object_projected",
+	SvgWriter.Style():stroke("#00C000"):stroke_width("8px"));
+	
+styleLib:AddStyle(nil, "line_standard",
+	SvgWriter.Style():stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "projected",
+	SvgWriter.Style():stroke_width("8px"));
+	
+styleLib:AddStyle(nil, "dashed",
+	SvgWriter.Style():stroke_dasharray({4, 8}));
+	
+	
+styleLib:AddStyle(nil, "fill_black",
+	SvgWriter.Style():fill("black"));
+	
+styleLib:AddStyle(nil, "fill_transluscent",
+	SvgWriter.Style():fill("blue"):fill_opacity(0.1));
+
+styleLib:AddStyle(nil, "fill_none",
+	SvgWriter.Style():fill("none"));
+	
+styleLib:AddStyle(nil, "background",
+	SvgWriter.Style():fill("#E0E0E0"):stroke("none"));
+	
+
+----------------------------------
+-- Point setup.
+
+local lineProjection = {}
+lineProjection.lineLoc = vmath.vec2(0.0, worldExtents[1] * 0.5);
+lineProjection.lineWidth = 2.0;
+lineProjection.lineEndPts =
+{
+	vmath.vec2(-lineProjection.lineWidth / 2, lineProjection.lineLoc.y),
+	vmath.vec2(lineProjection.lineWidth / 2, lineProjection.lineLoc.y),
+}
+lineProjection.finalEndPts = TransformPoint(lineProjection.lineEndPts)
+
+local sideProjLines = {}
+sideProjLines[1] = vmath.vec2(lineProjection.lineEndPts[1])
+sideProjLines[1].x = worldExtents[1];
+sideProjLines[2] = vmath.vec2(lineProjection.lineEndPts[2])
+sideProjLines[2].x = worldExtents[2];
+
+sideProjLines = TransformPoint(sideProjLines);
+
+local projectRect =
+{
+	vmath.vec2(lineProjection.finalEndPts[1].x, 0),
+	vmath.vec2(lineProjection.finalEndPts[2].x - lineProjection.finalEndPts[1].x, lineProjection.finalEndPts[2].y),
+}
+
+
+local rectShape =
+{
+	vmath.vec2(0.8, 0.0),
+	vmath.vec2(0.5, 1.5),
+}
+
+local rectProj = ProjectPoint(rectShape, lineProjection)
+
+rectShape = TransformPoint(rectShape);
+rectProj = TransformPoint(rectProj);
+
+local rectProjOffset = {}
+for i, tPoint in ipairs(rectProj) do
+	rectProjOffset[i] = tPoint + vmath.vec2(0, -1);
+end
+
+local lineShape =
+{
+	vmath.vec2(-0.3, -0.25),
+	vmath.vec2(-1.75, 1.25),
+}
+
+local lineProj = ProjectPoint(lineShape, lineProjection)
+
+lineShape = TransformPoint(lineShape)
+lineProj = TransformPoint(lineProj);
+
+
+----------------------------------------
+-- The SVG itself
+local writer = SvgWriter.SvgWriter("Ortho2DProjection.svg", {imageWidth .."px", imageHeight .. "px"});
+	writer:StyleLibrary(styleLib);
+	writer:BeginDefinitions();
+	writer:EndDefinitions();
+	
+	--Background
+	writer:Rect(projectRect[1], projectRect[2], nil, {"background"});
+	--Projection
+	writer:Line(rectProj[1], rectShape[1], {"line_of_proj"});
+	writer:Line(rectProj[2], vmath.vec2(rectShape[2].x, rectShape[1].y), {"line_of_proj"});
+	writer:Line(rectProj[1], rectProj[2], {"object_projected"});
+	
+	writer:Line(lineShape[1], lineProj[1], {"line_of_proj"});
+	writer:Line(lineShape[2], lineProj[2], {"line_of_proj"});
+	writer:Line(lineProj[1], lineProj[2], {"object_projected"});
+
+	--Draw shapes.
+	writer:Rect(rectShape[2], rectShape[1] - rectShape[2], nil, {"object", "fill_none"});
+	writer:Line(lineShape[1], lineShape[2], {"object"});
+	
+	
+	--Draw the projection plane.
+	writer:Line(lineProjection.finalEndPts[1], lineProjection.finalEndPts[2], {"black", "line_standard"});
+	writer:Line(lineProjection.finalEndPts[1], sideProjLines[1], {"black", "line_standard", "dashed"});
+	writer:Line(lineProjection.finalEndPts[2], sideProjLines[2], {"black", "line_standard", "dashed"});
+
+--[[	
+	--Debug: Box around the world.
+	local tempPt = TransformPoint(vmath.vec2(worldExtents[1], worldExtents[2]));
+	writer:Rect(tempPt, TransformPoint(vmath.vec2(worldExtents[2], worldExtents[1])) - tempPt,
+		nil, {"black", "line_standard", "fill_none"});
+		]]
+writer:Close();
+
+	
+	
+	

File Documents/Positioning/GenPersp2DProjection.lua

+require "SvgWriter"
+require "vmath"
+
+local imageWidth = 500;
+local imageHeight = 500;
+local imageSize = vmath.vec2(imageWidth, imageHeight);
+
+local worldBox = 8;
+
+local worldExtents = {-worldBox / 2, worldBox / 2}
+
+local function TransformPoint(tPoint)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = TransformPoint(realPoint)
+		end
+		return ret;
+	end
+
+	local final = vmath.vec2(tPoint);
+	final.y = -final.y;
+	final = final - worldExtents[1];
+	if(imageWidth > imageHeight) then
+		final = final * (imageHeight / worldBox);
+		final = final + vmath.vec2((imageWidth - imageHeight) / 2, 0.0); --Allow for centering a rectangular image size.
+	else
+		final = final * (imageWidth / worldBox);
+		final = final + vmath.vec2(0.0, (imageHeight - imageWidth) / 2); --Allow for centering a rectangular image size.
+	end
+
+	return final;
+end
+
+local function ProjectPoint(tPoint, projection)
+	if(vmath.vtype(tPoint) == "table") then
+		local ret = {}
+		for i, realPoint in ipairs(tPoint) do
+			ret[i] = ProjectPoint(realPoint, projection)
+		end
+		return ret;
+	end
+
+	local lineDir = tPoint - projection.eyeLoc;
+	local offset = (projection.lineLoc.y - projection.eyeLoc.y) / lineDir.y;
+	return (lineDir * offset) + projection.eyeLoc;
+end
+
+
+----------------------------------
+-- Styles
+local styleLib = SvgWriter.StyleLibrary();
+
+styleLib:AddStyle(nil, "black",
+	SvgWriter.Style():stroke("black"));
+	
+styleLib:AddStyle(nil, "red",
+	SvgWriter.Style():stroke("red"));
+	
+styleLib:AddStyle(nil, "line_of_proj",
+	SvgWriter.Style():stroke("red"):stroke_width("1px"):stroke_dasharray({4, 8}));
+	
+styleLib:AddStyle(nil, "object",
+	SvgWriter.Style():stroke("#00C000"):stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "object_projected",
+	SvgWriter.Style():stroke("#00C000"):stroke_width("8px"));
+	
+styleLib:AddStyle(nil, "line_standard",
+	SvgWriter.Style():stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "projected",
+	SvgWriter.Style():stroke_width("8px"));
+	
+styleLib:AddStyle(nil, "dashed",
+	SvgWriter.Style():stroke_dasharray({4, 8}));
+	
+	
+styleLib:AddStyle(nil, "fill_black",
+	SvgWriter.Style():fill("black"));
+	
+styleLib:AddStyle(nil, "fill_transluscent",
+	SvgWriter.Style():fill("blue"):fill_opacity(0.1));
+
+styleLib:AddStyle(nil, "fill_none",
+	SvgWriter.Style():fill("none"));
+	
+styleLib:AddStyle(nil, "background",
+	SvgWriter.Style():fill("#E0E0E0"):stroke("none"));
+	
+
+----------------------------------
+-- Point setup.
+
+local lineProjection = {}
+lineProjection.lineLoc = vmath.vec2(0.0, worldExtents[1] * 0.5);
+lineProjection.eyeLoc = vmath.vec2(0.0, worldExtents[1] * 0.75);
+lineProjection.lineWidth = 2.0;
+lineProjection.lineEndPts =
+{
+	vmath.vec2(-lineProjection.lineWidth / 2, lineProjection.lineLoc.y),
+	vmath.vec2(lineProjection.lineWidth / 2, lineProjection.lineLoc.y),
+}
+lineProjection.finalEndPts = TransformPoint(lineProjection.lineEndPts)
+lineProjection.finalEyeLoc = TransformPoint(lineProjection.eyeLoc)
+
+local sideProjLines = {}
+sideProjLines[1] = vmath.vec2(lineProjection.lineEndPts[1])
+sideProjLines[1].x = worldExtents[1];
+sideProjLines[2] = vmath.vec2(lineProjection.lineEndPts[2])
+sideProjLines[2].x = worldExtents[2];
+
+sideProjLines = TransformPoint(sideProjLines);
+
+local projectFrustum =
+{
+	vmath.vec2(lineProjection.lineEndPts[1]),
+	vmath.vec2(lineProjection.lineEndPts[2]),
+}
+
+do
+	local lineDir = lineProjection.lineEndPts[2] - lineProjection.eyeLoc;
+	local offset = (worldExtents[2] - lineProjection.eyeLoc.y) / lineDir.y;
+	projectFrustum[3] = (lineDir * offset) + lineProjection.eyeLoc;
+	lineDir = lineProjection.eyeLoc - lineProjection.lineEndPts[1];
+	offset = (worldExtents[2] - lineProjection.eyeLoc.y) / lineDir.y;
+	projectFrustum[4] = (lineDir * offset) + lineProjection.eyeLoc;
+end
+
+projectFrustum = TransformPoint(projectFrustum);
+
+
+local rectShape =
+{
+	vmath.vec2(0.8, 0.0),
+	vmath.vec2(0.5, 0.0),
+	vmath.vec2(0.5, 1.5),
+	vmath.vec2(0.8, 1.5),
+}
+
+local rectProj = ProjectPoint(rectShape, lineProjection)
+
+rectShape = TransformPoint(rectShape);
+rectProj = TransformPoint(rectProj);
+
+local rectProjOffset = {}
+for i, tPoint in ipairs(rectProj) do
+	rectProjOffset[i] = tPoint + vmath.vec2(0, -1);
+end
+
+local lineShape =
+{
+	vmath.vec2(-3.3, -1.5),
+	vmath.vec2(-0.5, 3.0),
+}
+
+local lineProj = ProjectPoint(lineShape, lineProjection)
+
+lineShape = TransformPoint(lineShape)
+lineProj = TransformPoint(lineProj);
+
+
+----------------------------------------
+-- The SVG itself
+local writer = SvgWriter.SvgWriter("Persp2DProjection.svg", {imageWidth .."px", imageHeight .. "px"});
+	writer:StyleLibrary(styleLib);
+	writer:BeginDefinitions();
+	writer:EndDefinitions();
+	
+	--Background
+	writer:Polygon(projectFrustum, {"background"})
+	
+	--Projection
+	writer:Line(rectProj[1], rectProj[3], {"object_projected"});
+	
+	for i, proj in ipairs(rectProj) do
+		writer:Line(proj, rectShape[i], {"line_of_proj"});
+	end
+
+	writer:Line(lineShape[1], lineProj[1], {"line_of_proj"});
+	writer:Line(lineShape[2], lineProj[2], {"line_of_proj"});
+	writer:Line(lineProj[1], lineProj[2], {"object_projected"});
+
+	--Draw shapes.
+	writer:Polygon(rectShape, {"object", "fill_none"});
+	writer:Line(lineShape[1], lineShape[2], {"object"});
+	
+	
+	--Draw the projection plane.
+	writer:Line(lineProjection.finalEndPts[1], lineProjection.finalEndPts[2], {"black", "line_standard"});
+	writer:Line(lineProjection.finalEndPts[1], sideProjLines[1], {"black", "line_standard", "dashed"});
+	writer:Line(lineProjection.finalEndPts[2], sideProjLines[2], {"black", "line_standard", "dashed"});
+	
+	writer:Circle(lineProjection.finalEyeLoc, imageWidth / 100, {"black", "fill_black"})
+	writer:Line(lineProjection.finalEndPts[1], lineProjection.finalEyeLoc, {"black", "line_standard", "dashed"});
+	writer:Line(lineProjection.finalEndPts[2], lineProjection.finalEyeLoc, {"black", "line_standard", "dashed"});
+	
+
+--[[	
+	--Debug: Box around the world.
+	local tempPt = TransformPoint(vmath.vec2(worldExtents[1], worldExtents[2]));
+	writer:Rect(tempPt, TransformPoint(vmath.vec2(worldExtents[2], worldExtents[1])) - tempPt,
+		nil, {"black", "line_standard", "fill_none"});
+		]]
+writer:Close();
+
+	
+	
+	

File Documents/Positioning/GenViewFrustum.lua

+require "SvgWriter"
+require "vmath"
+
+local imageWidth, imageHeight = 500, 500;
+
+local yAngle = math.rad(45);
+local zAngle = math.rad(20);
+
+local ySin, yCos = math.sin(yAngle), math.cos(yAngle);
+local zSin, zCos = math.sin(zAngle), math.cos(zAngle);
+
+local yMat = vmath.mat4(
+	vmath.vec4(yCos, 0, ySin, 0),
+	vmath.vec4(0, 1, 0, 0),
+	vmath.vec4(-ySin, 0, yCos, 0),
+	vmath.vec4(0, 0, 0, 1))
+	
+local zMat = vmath.mat4(
+	vmath.vec4(1, 0, 0, 0),
+	vmath.vec4(0, zCos, -zSin, 0),
+	vmath.vec4(0, zSin, zCos, 0),
+	vmath.vec4(0, 0, 0, 1))
+
+local worldScale = 0.1;
+	
+local scalingMatrix = vmath.mat4(
+	vmath.vec4(worldScale, 0, 0, 0),
+	vmath.vec4(0, worldScale, 0, 0),
+	vmath.vec4(0, 0, worldScale, 0),
+	vmath.vec4(0, 0, 0, 1))
+	
+local fullMat = (scalingMatrix * zMat) * yMat
+
+local function LocalTransform(listOfPoints)
+	local ret = {};
+	for i, point in ipairs(listOfPoints) do
+		ret[#ret + 1] = fullMat:Transform(point);
+	end
+	
+	return ret;
+end
+
+local viewportMatrix = vmath.mat4(
+	vmath.vec4(imageWidth / 2, 0, 0, imageWidth / 2),
+	vmath.vec4(0, imageHeight / 2, 0, imageHeight / 2),
+	vmath.vec4(0, 0, 1, 0),
+	vmath.vec4(0, 0, 0, 1))
+
+local function ViewportTransform(listOfPoints)
+	local ret = {};
+	for i, point in ipairs(listOfPoints) do
+		ret[#ret + 1] = vmath.vec2(viewportMatrix:Transform(point));
+	end
+	
+	return ret;
+end
+
+local initialBoxPoints = {
+	vmath.vec3(		 4.0,	 4.0,	 5.0),
+	vmath.vec3(		-4.0,	 4.0,	 5.0),
+	vmath.vec3(		 4.0,	-4.0,	 5.0),
+	vmath.vec3(		-4.0,	-4.0,	 5.0),
+	vmath.vec3(		 1.0,	 1.0,	-5.0),
+	vmath.vec3(		-1.0,	 1.0,	-5.0),
+	vmath.vec3(		 1.0,	-1.0,	-5.0),
+	vmath.vec3(		-1.0,	-1.0,	-5.0),
+}
+
+local initialAxisPoints =
+{
+	vmath.vec3(2.5, 0.0, 0.0),
+	vmath.vec3(10.0, 0.0, 0.0),
+	
+	vmath.vec3(-2.5, 0.0, 0.0),
+	vmath.vec3(-10.0, 0.0, 0.0),
+	
+	vmath.vec3(0.0, 2.5, 0.0),
+	vmath.vec3(0.0, 10.0, 0.0),
+
+	vmath.vec3(0.0, -2.5, 0.0),
+	vmath.vec3(0.0, -10.0, 0.0),
+
+	vmath.vec3(0.0, 0.0, 5.0),
+	vmath.vec3(0.0, 0.0, 10.0),
+
+	vmath.vec3(0.0, 0.0, -5.0),
+	vmath.vec3(0.0, 0.0, -10.0),
+}
+
+local boxPoints = ViewportTransform(LocalTransform(initialBoxPoints));
+local axisPoints = ViewportTransform(LocalTransform(initialAxisPoints));
+
+
+local boxIndexList =
+{
+	{2, 4, 8, 6},
+	{1, 2, 6, 5},
+	{1, 2, 4, 3},
+
+	{1, 3, 7, 5},
+	{3, 4, 8, 7},
+	{5, 6, 8, 7},
+}
+
+local boxList = {}
+
+for i, box in ipairs(boxIndexList) do
+	boxList[i] = {
+		boxPoints[box[1]],
+		boxPoints[box[2]],
+		boxPoints[box[3]],
+		boxPoints[box[4]]}
+end
+
+local styleLib = SvgWriter.StyleLibrary();
+
+styleLib:AddStyle(nil, "black",
+	SvgWriter.Style():stroke("black"):stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "dashed",
+	SvgWriter.Style():stroke_dasharray({3, 3}));
+	
+	
+styleLib:AddStyle(nil, "fill_black",
+	SvgWriter.Style():fill("black"));
+	
+styleLib:AddStyle(nil, "fill_transluscent",
+	SvgWriter.Style():fill("blue"):fill_opacity(0.1));
+
+styleLib:AddStyle(nil, "arrowended",
+	SvgWriter.Style():marker_end(SvgWriter.uriLocalElement("arrowhead")));
+
+local arrowheadPath = SvgWriter.Path();
+arrowheadPath:M{10, 4}:L{0, 0}:L{0, 8}:Z();
+
+
+	
+local writer = SvgWriter.SvgWriter("ViewFrustum.svg", {imageWidth .."px", imageHeight .. "px"});
+	writer:StyleLibrary(styleLib);
+	writer:BeginDefinitions();
+		writer:BeginMarker({10, 8}, {10, 4}, "auto", nil, nil, "arrowhead");
+			writer:Path(arrowheadPath, "black");
+		writer:EndMarker();
+	writer:EndDefinitions();
+
+	--Draw the rear-most lines, with markers.
+	writer:Line(axisPoints[3], axisPoints[4], {"black", "arrowended"});
+	writer:Line(axisPoints[7], axisPoints[8], {"black", "arrowended"});
+	writer:Line(axisPoints[9], axisPoints[10], {"black", "arrowended"});
+	
+	--Draw the rear-most box sides.
+	writer:Polygon(boxList[1], {"black", "fill_transluscent"});
+	writer:Polygon(boxList[2], {"black", "fill_transluscent"});
+	writer:Polygon(boxList[3], {"black", "fill_transluscent"});
+	
+	--Draw the internal lines, no markers.
+	writer:Line(axisPoints[1], axisPoints[3], {"black", "dashed"});
+	writer:Line(axisPoints[5], axisPoints[7], {"black", "dashed"});
+	writer:Line(axisPoints[9], axisPoints[11], {"black", "dashed"});
+	
+	--Draw the front-most boxes.
+	writer:Polygon(boxList[4], {"black", "fill_transluscent"});
+	writer:Polygon(boxList[5], {"black", "fill_transluscent"});
+	writer:Polygon(boxList[6], {"black", "fill_transluscent"});
+	
+	--Draw the front-most lines, with markers.
+	writer:Line(axisPoints[1], axisPoints[2], {"black", "arrowended"});
+	writer:Line(axisPoints[5], axisPoints[6], {"black", "arrowended"});
+	writer:Line(axisPoints[11], axisPoints[12], {"black", "arrowended"});
+	
+writer:Close();
+
+
+

File Documents/Positioning/GenWindingOrder.lua

+require "SvgWriter"
+require "vmath"
+
+-- Sizing
+local numSubImages = 2;
+local subImageWidth, subImageHeight = 300, 300;
+local subImageSpacing = 100;
+
+local imageWidth = (subImageWidth * numSubImages) + (subImageSpacing * (numSubImages - 1));
+local imageHeight = subImageHeight;
+
+local subImageSize = {subImageWidth, imageHeight};
+local pointSize = 10
+local circleRadius = subImageWidth / 8
+
+local subImagePositions = {}
+
+for i = 1, numSubImages, 1 do
+	subImagePositions[i] = {(subImageWidth + subImageSpacing) * (i-1), 0};
+end
+
+
+
+-- Styles
+local styleLib = SvgWriter.StyleLibrary();
+
+styleLib:AddStyle(nil, "black",
+	SvgWriter.Style():stroke("black"):stroke_width("1px"));
+	
+styleLib:AddStyle(nil, "wide_black",
+	SvgWriter.Style():stroke("black"):stroke_width("3px"));
+
+styleLib:AddStyle(nil, "fill_black",
+	SvgWriter.Style():fill("black"));
+	
+styleLib:AddStyle(nil, "fill_none",
+	SvgWriter.Style():fill("none"));
+	
+styleLib:AddStyle(nil, "text",
+	SvgWriter.Style():font_size("30px"):font_family("monospace") );
+	
+styleLib:AddStyle(nil, "pointed",
+	SvgWriter.Style():marker(SvgWriter.uriLocalElement("point")));
+
+styleLib:AddStyle(nil, "arrows",
+	SvgWriter.Style():marker_mid(SvgWriter.uriLocalElement("arrow")):marker_end(SvgWriter.uriLocalElement("arrow")));
+
+-- Paths and other data.
+
+local arrowheadPath = SvgWriter.Path();
+arrowheadPath:M{10, 4}:L{0, 0}:L{0, 8}:Z();
+
+local trianglePoints =
+{
+	vmath.vec2{subImageWidth * 0.3, (subImageHeight * 0.2)},
+	vmath.vec2{subImageWidth * 0.8, (subImageHeight * 0.6)},
+	vmath.vec2{subImageWidth * 0.1, (subImageHeight * 0.8)},
+}
+
+local cwLabelOffsets = 
+{
+	vmath.vec2{-7, -12},
+	vmath.vec2{9, 7},
+	vmath.vec2{-25, 25},
+}
+
+local centerPoint = vmath.vec2();
+
+for i, tPoint in ipairs(trianglePoints) do
+	centerPoint = centerPoint + tPoint;
+end
+centerPoint = centerPoint / 3.0;
+
+local unitCWCirclePoints =
+{
+	vmath.vec2{0.0, 1.0},
+	vmath.vec2{-0.866, -0.5},
+	vmath.vec2{0.866, -0.5},
+}
+
+local circleCwPath = SvgWriter.Path();
+local circleCcwPath = SvgWriter.Path();
+
+do
+	local centerCwTriPoints, centerCcwTriPoints = {}, {}
+
+	for i, tPoint in ipairs(unitCWCirclePoints) do
+		centerCwTriPoints[i] = (circleRadius * tPoint) + centerPoint;
+		centerCcwTriPoints[i] = (circleRadius * (tPoint * vmath.vec2(1.0, -1.0))) + centerPoint;
+	end
+
+	circleCwPath:M(centerCwTriPoints[#centerCwTriPoints])
+	circleCcwPath:M(centerCcwTriPoints[#centerCcwTriPoints])
+
+	for i = 1, #centerCwTriPoints do
+		circleCwPath:A({circleRadius, circleRadius}, 0, 0, 1, centerCwTriPoints[i])
+		circleCcwPath:A({circleRadius, circleRadius}, 0, 0, 0, centerCcwTriPoints[i])
+	end
+end
+
+
+-- The SVG itself.
+local writer = SvgWriter.SvgWriter("WindingOrder.svg", {imageWidth .."px", imageHeight .. "px"});
+	writer:StyleLibrary(styleLib);
+	writer:BeginDefinitions();
+		writer:BeginMarker({pointSize, pointSize}, {pointSize/2, pointSize/2}, "auto", true, nil, "point");
+			writer:Circle({pointSize/2, pointSize/2}, pointSize/2, {"fill_black", "black"});
+		writer:EndMarker();
+		writer:BeginMarker({10, 8}, {0, 4}, "auto", true, nil, "arrow");
+			writer:Path(arrowheadPath, {"fill_black", "black"});
+		writer:EndMarker();
+		writer:BeginGroup(nil, "g_triangle");
+			writer:Polygon(trianglePoints)
+		writer:EndGroup();
+		writer:BeginGroup(nil, "g_cwCircle");
+			writer:Path(circleCwPath, "arrows");
+		writer:EndGroup();
+		writer:BeginGroup(nil, "g_ccwCircle");
+			writer:Path(circleCcwPath, "arrows");
+		writer:EndGroup();
+		writer:BeginGroup(nil, "g_cwLabels");
+			for i, offset in ipairs(cwLabelOffsets) do
+				writer:Text(tostring(i), trianglePoints[i] + offset);
+			end
+		writer:EndGroup();
+		writer:BeginGroup(nil, "g_ccwLabels");
+			writer:Text("1", trianglePoints[1] + cwLabelOffsets[1]);
+			writer:Text("2", trianglePoints[3] + cwLabelOffsets[3]);
+			writer:Text("3", trianglePoints[2] + cwLabelOffsets[2]);
+		writer:EndGroup();
+	writer:EndDefinitions();
+
+	--First subimage: just the triangle.
+	writer:Use("g_triangle", subImagePositions[1], subImageSize, {"black", "fill_none", "pointed"});
+	writer:Use("g_triangle", subImagePositions[2], subImageSize, {"black", "fill_none", "pointed"});
+	writer:Use("g_cwLabels", subImagePositions[1], subImageSize, {"black", "text"});
+	writer:Use("g_cwCircle", subImagePositions[1], subImageSize, {"black", "fill_none"});
+	writer:Use("g_ccwLabels", subImagePositions[2], subImageSize, {"black", "text"});
+	writer:Use("g_ccwCircle", subImagePositions[2], subImageSize, {"black", "fill_none"});
+--	writer:Rect(subImagePositions[1], subImageSize, nil, {"black", "fill_none"});
+--	writer:Rect(subImagePositions[2], subImageSize, nil, {"black", "fill_none"});
+	
+writer:Close();
+
+
+

File Documents/Positioning/Ortho2DProjection.svg

Added
New image

File Documents/Positioning/Persp2DProjection.svg

Added
New image

File Documents/Positioning/PerspDiagram.svg

Added
New image

File Documents/Positioning/Tutorial 04.xml

                 center, then the triangle is facing clockwise relative to the viewer. Otherwise, the
                 triangle is counter-clockwise relative to the viewer. This ordering is called the
                     <glossterm>winding order.</glossterm></para>
-            <!--TODO: Add an image showing the two winding orders for triangles.-->
+            <figure>
+                <title>Triangle Winding Order</title>
+                <mediaobject>
+                    <imageobject>
+                        <imagedata format="SVG" fileref="WindingOrder.svg"/>
+                    </imageobject>
+                </mediaobject>
+                <caption>
+                    <para>The left triangle has a clockwise winding order; the triangle on the right
+                        has a counter-clockwise winding order.</para>
+                </caption>
+            </figure>
             <para>Face culling in OpenGL works based on this ordering. Setting this is a two-step
                 process, and is accomplished by the last two statements of the initialization
                 function.</para>
             a finite space of the lower dimensionality. For a 3D to 2D projection, there is a finite
             plane on which the world is projected. For 2D to 1D, there is a bounded line that is the
             result of the projection.</para>
-        <para>This is a diagram of an orthographic projection from 2D to 1D. The 2D scene is
-            projected onto a 1D line.</para>
-        <!--TODO: Add 2D ortho projection-->
         <para>An <glossterm>orthographic projection</glossterm> is a very simplistic projection.
             When projecting onto an axis-aligned surface, as above, the projection simply involves
-            throwing away the coordinate perpendicular to the surface. In the above case, the Y
-            component of the 2D positions are discarded, leaving only X components.</para>
+            throwing away the coordinate perpendicular to the surface.</para>
+        <figure>
+            <title>2D to 1D Orthographic Projection</title>
+            <mediaobject>
+                <imageobject>
+                    <imagedata fileref="Ortho2DProjection.svg" format="SVG"/>
+                </imageobject>
+            </mediaobject>
+            <caption>
+                <para>A scene orthographically projected onto the black line. The gray box
+                    represents the part of the world that is visible to the projection; parts of the
+                    scene outside of this region are not seen.</para>
+            </caption>
+        </figure>
         <para>When projecting onto an arbitrary line, the math is a bit more complicated. But what
             makes it an orthographic projection is that the dimension perpendicular to the surface
             is negated uniformly to create the projection. The fact that it is a projection in the
             orthographic.</para>
         <para>Human eyes do not see the world via orthographic projection. If they did, you would
             only be able to see an area of the world the size of your pupils. Because we do not use
-            orthographic projections (among other things) orthographic projections do not look
+            orthographic projections (among other reasons) orthographic projections do not look
             particularly real to us.</para>
         <para>Instead, we use a pinhole camera model for our eyesight. This model performs a
                 <glossterm>perspective projection</glossterm>. A perspective projection is a
             projection of the world on a surface as though seen through a single point. A 2D to 1D
             perspective projection looks like this:</para>
-        <!--TODO: Add 2D perspective projection-->
+        <figure>
+            <title>2D to 1D Perspective Projection</title>
+            <mediaobject>
+                <imageobject>
+                    <imagedata fileref="Persp2DProjection.svg" format="SVG"/>
+                </imageobject>
+            </mediaobject>
+        </figure>
         <para>As you can see, the projection is radial, based on the location of a particular point.
             That point is the eye of the projection.</para>
         <para>From this point forward, we are going to make a simplifying assumption. The position
         <para>Just from the shape of the projection, we can see that the perspective projection
             causes a larger field of geometry to be projected onto the surface. An orthographic
             projection only captures the rectangular prism directly in front of the surface of
-            projection. A perspective projection captures a larger space of the world:</para>
-        <!--TODO: Add diagram of the region captured in an ortho projection and the region captured in a perspective one.-->
+            projection. A perspective projection captures a larger space of the world.</para>
         <para>In 2D, the shape of the perspective projection is a regular trapezoid (a quadrilateral
             that has only one pair of parallel sides, and the other pair of sides have the same
             slope). In 3D, the shape is called a <glossterm>frustum</glossterm>; essentially, a
             pyramid with the tip chopped off.</para>
-        <!--TODO: Add a diagram of a viewing frustum.-->
+        <figure>
+            <title>Viewing Frustum</title>
+            <mediaobject>
+                <imageobject>
+                    <imagedata fileref="ViewFrustum.svg" format="SVG"/>
+                </imageobject>
+            </mediaobject>
+        </figure>
         <section>
             <title>Mathematical Perspective</title>
             <para>Now that we know what we want to do, we just need to know how to do it.</para>
             <para>We will be making a few simplifying assumptions. In addition to the assumption
                 that the eye point is centered relative to the projection surface, we will also
                 assume that the plane of projection is axis aligned and is facing down the -Z axis.
-                Thus, -Z is farther away from the plane. The projection plane also passes through
-                the origin. The size of the plane of projection will be [-1, 1]. Thus, the X and Y
-                coordinates of the eye point are (0, 0).</para>
-            <para>Yes, this sounds suspiciously like normalized device coordinate space. No, that's
-                not a coincidence. But let's not get ahead of ourselves.</para>
+                Thus, -Z is farther away from the plane. The eye will be fixed at the origin, so the
+                eye position is always (0, 0, 0). The size of the plane of projection will be [-1,
+                1] in the X and Y.</para>
+            <para>Yes, this sounds somewhat like normalized device coordinate space. No, that's not
+                a coincidence. But let's not get ahead of ourselves.</para>
             <para>We know a few things about how the projection results will work. A perspective
                 projection essentially shifts vertices towards the eye, based on the location of
                 that particular vertex. Vertices farther in Z from the front of the projection are
                 direction.</para>
             <para>The problem is really just a simple geometry problem. Here is the equivalent form
                 in a 2D to 1D perspective projection.</para>
-            <!--TODO: Add a diagram of the geometry of projection.-->
-            <para>What we have are two similar right triangles. We have the eye position and the
-                position of the unprojected point. The solution in the 2D case is this:</para>
+            <figure>
+                <title>2D to 1D Perspective Projection Diagram</title>
+                <mediaobject>
+                    <imageobject>
+                        <imagedata fileref="PerspDiagram.svg" format="SVG"/>
+                    </imageobject>
+                </mediaobject>
+                <caption>
+                    <para>The projection of the point P onto the projection plane, located at the
+                        origin. R is the projection and E is the eye point.</para>
+                </caption>
+            </figure>
+            <para>What we have are two similar right triangles; the triangle formed by E, R and the
+                origin; and the triangle formed by E, P, and P<subscript>z</subscript>. We have the
+                eye position and the position of the unprojected point. To find the location of R,
+                we simply do this:</para>
             <equation>
                 <title>Perspective Computation</title>
                 <mediaobject>
                     </imageobject>
                 </mediaobject>
             </equation>
-            <para>Since this is a vectorized function, this is also the solution in 3D. Thus,
-                perspective projection is simply the task of applying that simple formula to every
-                vertex that the vertex shader receives.</para>
+            <para>Since this is a vectorized function, this solution applies equally to 2D as to 3D.
+                Thus, perspective projection is simply the task of applying that simple formula to
+                every vertex that the vertex shader receives.</para>
         </section>
         <section>
             <title>The Perspective Divide</title>
                 stated, the projection plane shall be a region [-1, 1] in the X and Y axes, and at a
                 Z value of 0. The projection will be from vertices in the -Z direction onto this
                 plane; vertices that have a positive Z value are behind the projection plane.</para>
-            <para>Now, we will make one more simplifying assumption: the eye of our perspective
-                projection is fixed at (0, 0, 1) in camera space. But, since the projection plane is
-                pointing down the -Z axis, this point relative to the projection plane is at (0, 0,
-                -1). Thus, the offset from the projection plane to the eye is always -1. This means
-                that our perspective term, when phrased as division rather than multiplication, is
-                simply -Pz, the negation of the camera-space Z coordinate.</para>
-            <para>Having a fixed eye position makes it difficult to have zoom-in/zoom-out style
-                effects. This would normally be done by moving the eye position away or towards the
-                projection plane. There is a way to do this, however. All you need to do is, when
-                transforming from camera space to clip space, scale all of the X and Y values by a
-                constant. What this does is make the world, as the camera sees it, smaller or larger
-                in the X and Y axes. It effectively makes the frustum wider or narrower.</para>
+            <para>Now, we will make one more simplifying assumption: the location of the center of
+                the perspective plane is fixed at (0, 0, -1) in camera space. Therefore, since the
+                projection plane is pointing down the -Z axis, eye's location relative to the plane
+                of projection is (0, 0, -1). Thus, the offset from the projection plane to the eye
+                is always -1 in the Z. This means that our perspective term, when phrased as
+                division rather than multiplication, is simply P<subscript>z</subscript>/-1: the
+                negation of the camera-space Z coordinate.</para>
+            <para>Having a fixed eye position and projection plane makes it difficult to have
+                zoom-in/zoom-out style effects. This would normally be done by moving the plane
+                relative to the fixed eye point. There is a way to do this, however. All you need to
+                do is, when transforming from camera space to clip space, scale all of the X and Y
+                values by a constant. What this does is make the world, as the camera sees it,
+                smaller or larger in the X and Y axes. It effectively makes the frustum wider or
+                narrower.</para>
             <para>To compare, camera space and normalized device coordinate space (after the
-                perspective divide) look like this:</para>
-            <!--TODO: Show an image of camera space with a frustum, beside a picture of NDC space with a cube.-->
+                perspective divide) look like this, using a 2D version of a perspective
+                projection:</para>
+            <figure>
+                <title>Camera to NDC Transformation in 2D</title>
+                <mediaobject>
+                    <imageobject>
+                        <imagedata fileref="CameraToPerspective.svg" format="SVG"/>
+                    </imageobject>
+                </mediaobject>
+            </figure>
+            <para>Do note that the direction of viewing is flipped from camera space and normalized
+                device coordinate (NDC) space. Camera space is looking <quote>up</quote> in the left
+                diagram, along the -Z axis. While NDC space is looking <quote>down</quote>, along
+                the +Z axis. Thus, the meaning of front and back is reversed; the points at the top
+                of the left image correspond to the points at the <emphasis>bottom</emphasis> of the
+                right image.</para>
+            <para>If you perform an orthographic projection from NDC space on the right (by dropping
+                the Z coordinate), then what you get is a perspective projection of the world on the
+                left. In effect, what we have done is transform objects into a three-dimensional
+                space from which an orthographic projection will look like a perspective one.</para>
         </section>
         <section>
             <title>Perspective in Depth</title>
                 perspective divide just like the X and Y coordinates, we need to take this into
                 account if we actually want to see anything in our projection.</para>
             <para>Our W coordinate will be based on the camera-space Z coordinate. We need to map Z
-                values on the range [0, -∞) to values on the range [-1, 1]. Since camera space is an
-                infinite range and we're trying to map to a finite range, we need to do some range
-                bounding. The frustum is already finitely bound in the X and Y directions; we simply
-                need to add a Z boundary.</para>
+                values from the camera-space range [0, -∞) to the NDC space range [-1, 1]. Since
+                camera space is an infinite range and we're trying to map to a finite range, we need
+                to do some range bounding. The frustum is already finitely bound in the X and Y
+                directions; we simply need to add a Z boundary.</para>
             <para>The maximum distance that a vertex can be before it is considered no longer in
                 view is the <glossterm>camera zFar</glossterm>. We also have a minimum distance from
-                the mapping plane at 0; this is called the <glossterm>camera zNear</glossterm>. This
-                creates a finite frustum for our camera space viewpoint:</para>
-            <!--TODO: Show the bound camera-space frustum, in 2D. Show the zNear and zFar explicitly, as well as the mapping plane.-->
+                the eye; this is called the <glossterm>camera zNear</glossterm>. This creates a
+                finite frustum for our camera space viewpoint.</para>
             <note>
                 <para>It is very important to remember that these are the zNear and zFar for the
                         <emphasis>camera</emphasis> space. The next tutorial will also introduce a
-                    range of depth, also using the names zNear and zFar, which is a related but
+                    range of depth, also using the names zNear and zFar. This is a related but
                     fundamentally different range.</para>
             </note>
+            <para>The camera zNear can appear to effectively determine the offset between the eye
+                and the projection plane. However, this is not the case. Even if zNear is less than
+                1, which would place the near Z plane <emphasis>behind</emphasis> the projection
+                plane, you still get an effectively valid projection. Objects behind the plane can
+                be projected onto the plane just as well as those in front of it; it is still a
+                perspective projection. Mathematically, this works.</para>
+            <para>What it does <emphasis>not</emphasis> do is what you would expect if you moved the
+                plane of projection. Since the plane of projection has a fixed size (the range [-1,
+                1]), moving the plane would alter where points appear in the projection. Changing
+                the camera zNear does not affect the X, Y position of points in the
+                projection.</para>
             <para>There are several ways to go about mapping one finite range to another. One
                 confounding problem is the perspective divide itself; it is easy to perform a linear
                 mapping between two finite spaces. It is quite another to do a mapping that remains
-                linear after the perspective divide. Since we will be dividing by -Z itself (the
-                camera-space Z, not the clip-space Z), the math is much more complex than you might
-                expect.</para>
+                linear <emphasis>after</emphasis> the perspective divide. Since we will be dividing
+                by -Z itself (the camera-space Z, not the clip-space Z), the math is much more
+                complex than you might expect.</para>
             <para>For reasons that will be better explained in the next tutorial, we will use this
                 modestly complicated function to compute the clip-space Z:</para>
             <equation>
                 values are <emphasis>positive</emphasis>; the equation accounts for this when
                 performing the transformation. Also, zNear <emphasis>cannot</emphasis> be 0; it can
                 be very close to zero, but it must never be exactly zero.</para>
+            <para>Let us review the previous diagram of camera-to-NDC transformation in 2D
+                space:</para>
+            <informalfigure>
+                <mediaobject>
+                    <imageobject>
+                        <imagedata fileref="CameraToPerspective.svg" format="SVG"/>
+                    </imageobject>
+                </mediaobject>
+            </informalfigure>
+            <para>The example of 2D camera-space vs. 2D NDC space uses this equation to compute the
+                Z values. Take a careful look at how the Z coordinates match. The Z distances are
+                evenly spaced in camera space, but in NDC space, they are non-linearly distributed.
+                And yet simultaneously, points that are colinear in camera-space remain colinear in
+                NDC space.</para>
+            <para>This fact has some interesting properties that we will investigate further in the
+                next tutorial.</para>
         </section>
         <section>
             <title>Drawing in Perspective</title>
                 <quote>camera</quote> are 4-dimensional vectors; namely the clip and camera space
             vectors. The larger block of numbers is a matrix. You probably are not familiar with
             matrix math. If not, it will be explained presently.</para>
-        <para>Generically speaking, a matrix is a two dimensional block of numbers (matrices with
-            more than 2 dimensions are called <quote>tensors</quote>). Matrices are very common in
-            computer graphics. Thus far, we have been able to get along without them. As we get into
-            more detailed object transformations however, we will rely more and more on matrices to
-            simplify matters.</para>
+        <para>Generically speaking, a <glossterm>matrix</glossterm> is a two dimensional block of
+            numbers (matrices with more than 2 dimensions are called <quote>tensors</quote>).
+            Matrices are very common in computer graphics. Thus far, we have been able to get along
+            without them. As we get into more detailed object transformations however, we will rely
+            more and more on matrices to simplify matters.</para>
         <para>In graphics work, we typically use 4x4 matrices; that is, matrices with 4 columns and
             4 rows respectively. This is due to the nature of graphics work: most of the things that
             we want to use matrices for are either 3 dimensional or 3 dimensional with an extra
         <para>A 4x4 matrix is technically 16 values, so a 16-entry array can store a matrix. But
             there are two ways to store a matrix as an array. One way is called
                 <glossterm>column-major</glossterm> order, the other naturally is
-                <glossterm>row-major</glossterm> order. column-major order means that, for an NxM
+                <glossterm>row-major</glossterm> order. Column-major order means that, for an NxM
             matrix (columns x rows), the first N values in the array are the first column
             (top-to-bottom), the next N values are the second column, and so forth. In row-major
             order, the first M values in the array are the first row (left-to-right), followed by
                         center of the window, either horizontally or vertically as
                         appropriate.</para>
                 </listitem>
+                <listitem>
+                    <para>We made some simplifying assumptions in our perspective transformation
+                        algorithm. In particular, we fixed the eye point at (0, 0, 0). and the plane
+                        at (0, 0, -1). However, this was not strictly necessary; we could have
+                        altered our perspective transform algorithm to use a variable eye point.
+                        Adjust the <phrase role="propername">ShaderPerspective</phrase> to implement
+                        an arbitrary perspective plane location (the size remains fixed at [-1, 1].
+                        You will need to offset the X, Y camera-space positions of the vertices by
+                            E<subscript>x</subscript> and E<subscript>y</subscript> respectively,
+                        but only <emphasis>after</emphasis> the scaling (for aspect ratio). And you
+                        will need to divide the camera-space Z term by -E<subscript>z</subscript>
+                        instead of just -1.</para>
+                </listitem>
+                <listitem>
+                    <para>Do the above, but in matrix form. Remember that any terms placed in the
+                        fourth column will be added to that component, due to the multiplication by
+                            W<subscript>camera</subscript> (which is always 1.0).</para>
+                </listitem>
             </itemizedlist>
         </section>
         <section>
                         <para>These two functions control how face culling works.
                                 <function>glFrontFace</function> defines which triangle winding
                             order is considered the front. <function>glCullFace</function> defines
-                            what face gets culled.</para>
+                            what face gets culled. This function can also cull
+                                <emphasis>all</emphasis> faces, though this is not useful if you
+                            want to get rendering done.</para>
                         <para>These functions only do something useful if
-                                <literal>GL_CULL_FACE</literal> is currently enabled.</para>
+                                <literal>GL_CULL_FACE</literal> is currently enabled. They still set
+                            the values internally even if <literal>GL_CULL_FACE</literal> is not
+                            enabled, so enabling it later will use the up-to-date settings.</para>
                     </glossdef>
                 </glossentry>
             </glosslist>
         <glossentry>
             <glossterm>face culling</glossterm>
             <glossdef>
-                <para/>
+                <para>The ability to cull triangles based on the winding order of the triangle. This
+                    functionality is activated in OpenGL by using <function>glEnable</function> with
+                        <literal>GL_CULL_FACE</literal>. Which faces get culled is determined by the
+                        <function>glCullFace</function> and <function>glFrontFace</function>
+                    functions.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>winding order</glossterm>
             <glossdef>
-                <para/>
+                <para>The order, clockwise or counter-clockwise, that the 3 vertices that make up a
+                    triangle are received in. This is measured in window coordinates,
+                    two-dimensionally.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>projection</glossterm>
             <glossdef>
-                <para/>
+                <para>The act of taking a series of objects in a higher dimension and presenting
+                    those objects in a lower dimension. The act of rendering a 3D scene to a 2D
+                    image requires projecting that scene from three dimensions into two
+                    dimensions.</para>
+                <para>Projection always happens relative to a surface of projection. Projecting 2D
+                    space onto a 1D space requires a finite line to be projected on. Projecting 3D
+                    space onto 2D space requires a plane of projection. This surface is defined in
+                    the higher dimension's world.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>orthographic projection</glossterm>
             <glossdef>
-                <para/>
+                <para>A form of projection that simply negates all offsets in the direction
+                    perpendicular to the surface of projection. When doing a 3D to 2D orthographic
+                    projection, if the plane is axis aligned, then the projection can be done
+                    simply. The coordinate that is perpendicular to the plane of projection is
+                    simply discarded. If the plane is not axis aligned, then the math is more
+                    complex, but it has the same effect.</para>
+                <para>Orthographic projections are uniform in the direction of the projection.
+                    Because of the uniformity, lines that are parallel in the higher dimension space
+                    are guaranteed to remain parallel in the lower dimension space.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>perspective projection</glossterm>
             <glossdef>
-                <para/>
+                <para>A form of projection that projects onto the surface based on a position, the
+                    eye position. Perspective projections attempt to emulate a pin-hole camera
+                    model, which is similar to how human eyes see. The positions of objects in space
+                    are projected onto the surface of projection radially based on the eye
+                    position.</para>
+                <para>Parallel lines in the higher dimension are <emphasis>not</emphasis> guaranteed
+                    to remain parallel in the lower dimension. They might, but they might
+                    not.</para>
+            </glossdef>
+        </glossentry>
+        <glossentry>
+            <glossterm>frustum</glossterm>
+            <glossdef>
+                <para>Geometrically, a frustum is 3D shape; a pyramid that has the top chopped off.
+                    The view of a 3D to 2D perspective projection, from the eye through the plane of
+                    projection has the shape of a frustum.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>perspective divide</glossterm>
             <glossdef>
-                <para/>
+                <para>A new name for the transformation from clip space to normalized device
+                    coordinate space. This is so called because the division by W is what allows
+                    perspective projection to work using only matrix math; a matrix alone would not
+                    otherwise be able to perform the full perspective projection operation.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>camera space</glossterm>
             <glossdef>
-                <para/>
+                <para>An arbitrarily defined, but highly useful, space from which the perspective
+                    projection can be performed relatively easily. Camera space is an infinitely
+                    large space, with positive X going right, positive Y going up, and positive Z
+                    coming towards the viewer.</para>
+                <para>In camera space, the eye position of the perspective projection is assumed to
+                    be at (0, 0, 1), and the plane of projection is a [-1, 1] plane in X and Y,
+                    which passes through the 3D origin. Thus, all points that have a positive Z are
+                    considered to be behind the camera and thus out of view.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>camera zNear, camera zFar</glossterm>
             <glossdef>
-                <para/>
+                <para>Normalized device coordinate (NDC) space is bounded in all dimensions on the
+                    range [-1, 1]. Camera space is unbounded, but the perspective transform
+                    implicitly bounds what is considered in view to [-1, 1] in the X and Y axis.
+                    This leaves the Z axis unbounded, which NDC space does not allow.</para>
+                <para>The camera zNear and zFar values are numbers that define the minimum and
+                    maximum extent of Z in the perspective projection transform. These values are
+                    positive value, though they represent negative values in camera space. Using the
+                    standard perspective transform, both values must be greater than 0, and zNear
+                    must be less than zFar.</para>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>swizzle selection</glossterm>
             <glossdef>
+                <para>Swizzle selection is a vector technique, unique to shading languages, that
+                    allows you to take a vector and arbitrarily build other vectors from the
+                    components. This selection is completely arbitrary; you can build a vec4 from a
+                    vec2, or any other combination you wish, up to 4 elements.</para>
+                <para>Swizzle selections use combinations of <quote>x,</quote>
+                    <quote>y,</quote>
+                    <quote>z,</quote> and <quote>w</quote> to pick components out of the input
+                    vector. Swizzle operations look like this:</para>
+                <programlisting>vec2 firstVec;
+vec4 secondVec = firstVec.xyxx;
+vec3 thirdVec = secondVec.wzyx;</programlisting>
+                <para>Swizzle selection is, in graphics hardware, considered an operation so fast as
+                    to be instantaneous. That is, graphics hardware is built with swizzle selection
+                    in mind.</para>
+            </glossdef>
+        </glossentry>
+        <glossentry>
+            <glossterm>matrix</glossterm>
+            <glossdef>
                 <para/>
             </glossdef>
         </glossentry>
         <glossentry>
             <glossterm>column-major, row-major</glossterm>
             <glossdef>
-                <para/>
+                <para>These terms define the two ways in which a matrix can be stored as an array of
+                    values. Column-major order means that, for an NxM matrix (columns x rows), the
+                    first N values in the array are the first column (top-to-bottom), the next N
+                    values are the second column, and so forth. In row-major order, the first M
+                    values in the array are the first row (left-to-right), followed by another M
+                    values for the second row, and so forth.</para>
             </glossdef>
         </glossentry>
     </glossary>

File Documents/Positioning/ViewFrustum.svg

Added
New image

File Documents/Positioning/WindingOrder.svg

Added
New image

File Documents/Positioning/test.svg

Removed
Old image

File Documents/Positioning/test.xml

-<?xml version="1.0" encoding="utf-8"?>
-<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">
-<mrow>
-  <msup>
-          <mn>5</mn>
-        <mn>2</mn>
-  </msup>
-</mrow>
-</math>

File Documents/Tutorial Documents.xpr

                             </scenarioAssociation>
                             <scenarioAssociation>
                                 <field name="name">
+                                    <String xml:space="preserve">Tutorial to HTML</String>
+                                </field>
+                                <field name="type">
+                                    <String xml:space="preserve">XSL</String>
+                                </field>
+                                <field name="url">
+                                    <String xml:space="preserve">Basics/Tutorial%2000.xml</String>
+                                </field>
+                            </scenarioAssociation>
+                            <scenarioAssociation>
+                                <field name="name">
                                     <String xml:space="preserve">Docbook PDF Printable</String>
                                 </field>
                                 <field name="type">
                             </scenarioAssociation>
                             <scenarioAssociation>
                                 <field name="name">
-                                    <String xml:space="preserve">Tutorial to HTML</String>
-                                </field>
-                                <field name="type">
-                                    <String xml:space="preserve">XSL</String>
-                                </field>
-                                <field name="url">
-                                    <String xml:space="preserve">Basics/Tutorial%2000.xml</String>
-                                </field>
-                            </scenarioAssociation>
-                            <scenarioAssociation>
-                                <field name="name">
                                     <String xml:space="preserve">Docbook PDF Printable</String>
                                 </field>
                                 <field name="type">