# Commits

committed 775699c

more

• Participants
• Parent commits 58e4a84

# File ex7/computeCentroids.m

`+function centroids = computeCentroids(X, idx, K)`
`+%COMPUTECENTROIDS returs the new centroids by computing the means of the `
`+%data points assigned to each centroid.`
`+%   centroids = COMPUTECENTROIDS(X, idx, K) returns the new centroids by `
`+%   computing the means of the data points assigned to each centroid. It is`
`+%   given a dataset X where each row is a single data point, a vector`
`+%   idx of centroid assignments (i.e. each entry in range [1..K]) for each`
`+%   example, and K, the number of centroids. You should return a matrix`
`+%   centroids, where each row of centroids is the mean of the data points`
`+%   assigned to it.`
`+%`
`+`
`+% Useful variables`
`+[m n] = size(X);`
`+`
`+% You need to return the following variables correctly.`
`+centroids = zeros(K, n);`
`+`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Go over every centroid and compute mean of all points that`
`+%               belong to it. Concretely, the row vector centroids(i, :)`
`+%               should contain the mean of the data points assigned to`
`+%               centroid i.`
`+%`
`+% Note: You can use a for-loop over the centroids to compute this.`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+`
`+end`
`+`

# File ex7/displayData.m

`+function [h, display_array] = displayData(X, example_width)`
`+%DISPLAYDATA Display 2D data in a nice grid`
`+%   [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data`
`+%   stored in X in a nice grid. It returns the figure handle h and the `
`+%   displayed array if requested.`
`+`
`+% Set example_width automatically if not passed in`
`+if ~exist('example_width', 'var') || isempty(example_width) `
`+	example_width = round(sqrt(size(X, 2)));`
`+end`
`+`
`+% Gray Image`
`+colormap(gray);`
`+`
`+% Compute rows, cols`
`+[m n] = size(X);`
`+example_height = (n / example_width);`
`+`
`+% Compute number of items to display`
`+display_rows = floor(sqrt(m));`
`+display_cols = ceil(m / display_rows);`
`+`
`+% Between images padding`
`+pad = 1;`
`+`
`+% Setup blank display`
`+display_array = - ones(pad + display_rows * (example_height + pad), ...`
`+                       pad + display_cols * (example_width + pad));`
`+`
`+% Copy each example into a patch on the display array`
`+curr_ex = 1;`
`+for j = 1:display_rows`
`+	for i = 1:display_cols`
`+		if curr_ex > m, `
`+			break; `
`+		end`
`+		% Copy the patch`
`+		`
`+		% Get the max value of the patch`
`+		max_val = max(abs(X(curr_ex, :)));`
`+		display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...`
`+		              pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...`
`+						reshape(X(curr_ex, :), example_height, example_width) / max_val;`
`+		curr_ex = curr_ex + 1;`
`+	end`
`+	if curr_ex > m, `
`+		break; `
`+	end`
`+end`
`+`
`+% Display Image`
`+h = imagesc(display_array, [-1 1]);`
`+`
`+% Do not show axis`
`+axis image off`
`+`
`+drawnow;`
`+`
`+end`

# File ex7/drawLine.m

`+function drawLine(p1, p2, varargin)`
`+%DRAWLINE Draws a line from point p1 to point p2`
`+%   DRAWLINE(p1, p2) Draws a line from point p1 to point p2 and holds the`
`+%   current figure`
`+`
`+plot([p1(1) p2(1)], [p1(2) p2(2)], varargin{:});`
`+`
`+end`

# File ex7/ex7.m

`+%% Machine Learning Online Class`
`+%  Exercise 7 | Principle Component Analysis and K-Means Clustering`
`+%`
`+%  Instructions`
`+%  ------------`
`+%`
`+%  This file contains code that helps you get started on the`
`+%  exercise. You will need to complete the following functions:`
`+%`
`+%     pca.m`
`+%     projectData.m`
`+%     recoverData.m`
`+%     computeCentroids.m`
`+%     findClosestCentroids.m`
`+%     kMeansInitCentroids.m`
`+%`
`+%  For this exercise, you will not need to change any code in this file,`
`+%  or any other files other than those mentioned above.`
`+%`
`+`
`+%% Initialization`
`+clear ; close all; clc`
`+`
`+%% ================= Part 1: Find Closest Centroids ====================`
`+%  To help you implement K-Means, we have divided the learning algorithm `
`+%  into two functions -- findClosestCentroids and computeCentroids. In this`
`+%  part, you shoudl complete the code in the findClosestCentroids function. `
`+%`
`+fprintf('Finding closest centroids.\n\n');`
`+`
`+% Load an example dataset that we will be using`
`+load('ex7data2.mat');`
`+`
`+% Select an initial set of centroids`
`+K = 3; % 3 Centroids`
`+initial_centroids = [3 3; 6 2; 8 5];`
`+`
`+% Find the closest centroids for the examples using the`
`+% initial_centroids`
`+idx = findClosestCentroids(X, initial_centroids);`
`+`
`+fprintf('Closest centroids for the first 3 examples: \n')`
`+fprintf(' %d', idx(1:3));`
`+fprintf('\n(the closest centroids should be 1, 3, 2 respectively)\n');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ===================== Part 2: Compute Means =========================`
`+%  After implementing the closest centroids function, you should now`
`+%  complete the computeCentroids function.`
`+%`
`+fprintf('\nComputing centroids means.\n\n');`
`+`
`+%  Compute means based on the closest centroids found in the previous part.`
`+centroids = computeCentroids(X, idx, K);`
`+`
`+fprintf('Centroids computed after initial finding of closest centroids: \n')`
`+fprintf(' %f %f \n' , centroids');`
`+fprintf('\n(the centroids should be\n');`
`+fprintf('   [ 2.428301 3.157924 ]\n');`
`+fprintf('   [ 5.813503 2.633656 ]\n');`
`+fprintf('   [ 7.119387 3.616684 ]\n\n');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% =================== Part 3: K-Means Clustering ======================`
`+%  After you have completed the two functions computeCentroids and`
`+%  findClosestCentroids, you have all the necessary pieces to run the`
`+%  kMeans algorithm. In this part, you will run the K-Means algorithm on`
`+%  the example dataset we have provided. `
`+%`
`+fprintf('\nRunning K-Means clustering on example dataset.\n\n');`
`+`
`+% Load an example dataset`
`+load('ex7data2.mat');`
`+`
`+% Settings for running K-Means`
`+K = 3;`
`+max_iters = 10;`
`+`
`+% For consistency, here we set centroids to specific values`
`+% but in practice you want to generate them automatically, such as by`
`+% settings them to be random examples (as can be seen in`
`+% kMeansInitCentroids).`
`+initial_centroids = [3 3; 6 2; 8 5];`
`+`
`+% Run K-Means algorithm. The 'true' at the end tells our function to plot`
`+% the progress of K-Means`
`+[centroids, idx] = runkMeans(X, initial_centroids, max_iters, true);`
`+fprintf('\nK-Means Done.\n\n');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ============= Part 4: K-Means Clustering on Pixels ===============`
`+%  In this exercise, you will use K-Means to compress an image. To do this,`
`+%  you will first run K-Means on the colors of the pixels in the image and`
`+%  then you will map each pixel on to it's closest centroid.`
`+%  `
`+%  You should now complete the code in kMeansInitCentroids.m`
`+%`
`+`
`+fprintf('\nRunning K-Means clustering on pixels from an image.\n\n');`
`+`
`+%  Load an image of a bird`
`+A = double(imread('bird_small.png'));`
`+`
`+% If imread does not work for you, you can try instead`
`+%   load ('bird_small.mat');`
`+`
`+A = A / 255; % Divide by 255 so that all values are in the range 0 - 1`
`+`
`+% Size of the image`
`+img_size = size(A);`
`+`
`+% Reshape the image into an Nx3 matrix where N = number of pixels.`
`+% Each row will contain the Red, Green and Blue pixel values`
`+% This gives us our dataset matrix X that we will use K-Means on.`
`+X = reshape(A, img_size(1) * img_size(2), 3);`
`+`
`+% Run your K-Means algorithm on this data`
`+% You should try different values of K and max_iters here`
`+K = 16; `
`+max_iters = 10;`
`+`
`+% When using K-Means, it is important the initialize the centroids`
`+% randomly. `
`+% You should complete the code in kMeansInitCentroids.m before proceeding`
`+initial_centroids = kMeansInitCentroids(X, K);`
`+`
`+% Run K-Means`
`+[centroids, idx] = runkMeans(X, initial_centroids, max_iters);`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ================= Part 5: Image Compression ======================`
`+%  In this part of the exercise, you will use the clusters of K-Means to`
`+%  compress an image. To do this, we first find the closest clusters for`
`+%  each example. After that, we `
`+`
`+fprintf('\nApplying K-Means to compress an image.\n\n');`
`+`
`+% Find closest cluster members`
`+idx = findClosestCentroids(X, centroids);`
`+`
`+% Essentially, now we have represented the image X as in terms of the`
`+% indices in idx. `
`+`
`+% We can now recover the image from the indices (idx) by mapping each pixel`
`+% (specified by it's index in idx) to the centroid value`
`+X_recovered = centroids(idx,:);`
`+`
`+% Reshape the recovered image into proper dimensions`
`+X_recovered = reshape(X_recovered, img_size(1), img_size(2), 3);`
`+`
`+% Display the original image `
`+subplot(1, 2, 1);`
`+imagesc(A); `
`+title('Original');`
`+`
`+% Display compressed image side by side`
`+subplot(1, 2, 2);`
`+imagesc(X_recovered)`
`+title(sprintf('Compressed, with %d colors.', K));`
`+`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`

# File ex7/ex7_pca.m

`+%% Machine Learning Online Class`
`+%  Exercise 7 | Principle Component Analysis and K-Means Clustering`
`+%`
`+%  Instructions`
`+%  ------------`
`+%`
`+%  This file contains code that helps you get started on the`
`+%  exercise. You will need to complete the following functions:`
`+%`
`+%     pca.m`
`+%     projectData.m`
`+%     recoverData.m`
`+%     computeCentroids.m`
`+%     findClosestCentroids.m`
`+%     kMeansInitCentroids.m`
`+%`
`+%  For this exercise, you will not need to change any code in this file,`
`+%  or any other files other than those mentioned above.`
`+%`
`+`
`+%% Initialization`
`+clear ; close all; clc`
`+`
`+%% ================== Part 1: Load Example Dataset  ===================`
`+%  We start this exercise by using a small dataset that is easily to`
`+%  visualize`
`+%`
`+fprintf('Visualizing example dataset for PCA.\n\n');`
`+`
`+%  The following command loads the dataset. You should now have the `
`+%  variable X in your environment`
`+load ('ex7data1.mat');`
`+`
`+%  Visualize the example dataset`
`+plot(X(:, 1), X(:, 2), 'bo');`
`+axis([0.5 6.5 2 8]); axis square;`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% =============== Part 2: Principal Component Analysis ===============`
`+%  You should now implement PCA, a dimension reduction technique. You`
`+%  should complete the code in pca.m`
`+%`
`+fprintf('\nRunning PCA on example dataset.\n\n');`
`+`
`+%  Before running PCA, it is important to first normalize X`
`+[X_norm, mu, sigma] = featureNormalize(X);`
`+`
`+%  Run PCA`
`+[U, S] = pca(X_norm);`
`+`
`+%  Compute mu, the mean of the each feature`
`+`
`+%  Draw the eigenvectors centered at mean of data. These lines show the`
`+%  directions of maximum variations in the dataset.`
`+hold on;`
`+drawLine(mu, mu + 1.5 * S(1,1) * U(:,1)', '-k', 'LineWidth', 2);`
`+drawLine(mu, mu + 1.5 * S(2,2) * U(:,2)', '-k', 'LineWidth', 2);`
`+hold off;`
`+`
`+fprintf('Top eigenvector: \n');`
`+fprintf(' U(:,1) = %f %f \n', U(1,1), U(2,1));`
`+fprintf('\n(you should expect to see -0.707107 -0.707107)\n');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% =================== Part 3: Dimension Reduction ===================`
`+%  You should now implement the projection step to map the data onto the `
`+%  first k eigenvectors. The code will then plot the data in this reduced `
`+%  dimensional space.  This will show you what the data looks like when `
`+%  using only the corresponding eigenvectors to reconstruct it.`
`+%`
`+%  You should complete the code in projectData.m`
`+%`
`+fprintf('\nDimension reduction on example dataset.\n\n');`
`+`
`+%  Plot the normalized dataset (returned from pca)`
`+plot(X_norm(:, 1), X_norm(:, 2), 'bo');`
`+axis([-4 3 -4 3]); axis square`
`+`
`+%  Project the data onto K = 1 dimension`
`+K = 1;`
`+Z = projectData(X_norm, U, K);`
`+fprintf('Projection of the first example: %f\n', Z(1));`
`+fprintf('\n(this value should be about 1.481274)\n\n');`
`+`
`+X_rec  = recoverData(Z, U, K);`
`+fprintf('Approximation of the first example: %f %f\n', X_rec(1, 1), X_rec(1, 2));`
`+fprintf('\n(this value should be about  -1.047419 -1.047419)\n\n');`
`+`
`+%  Draw lines connecting the projected points to the original points`
`+hold on;`
`+plot(X_rec(:, 1), X_rec(:, 2), 'ro');`
`+for i = 1:size(X_norm, 1)`
`+    drawLine(X_norm(i,:), X_rec(i,:), '--k', 'LineWidth', 1);`
`+end`
`+hold off`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% =============== Part 4: Loading and Visualizing Face Data =============`
`+%  We start the exercise by first loading and visualizing the dataset.`
`+%  The following code will load the dataset into your environment`
`+%`
`+fprintf('\nLoading face dataset.\n\n');`
`+`
`+%  Load Face dataset`
`+load ('ex7faces.mat')`
`+`
`+%  Display the first 100 faces in the dataset`
`+displayData(X(1:100, :));`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% =========== Part 5: PCA on Face Data: Eigenfaces  ===================`
`+%  Run PCA and visualize the eigenvectors which are in this case eigenfaces`
`+%  We display the first 36 eigenfaces.`
`+%`
`+fprintf(['\nRunning PCA on face dataset.\n' ...`
`+         '(this mght take a minute or two ...)\n\n']);`
`+`
`+%  Before running PCA, it is important to first normalize X by subtracting `
`+%  the mean value from each feature`
`+[X_norm, mu, sigma] = featureNormalize(X);`
`+`
`+%  Run PCA`
`+[U, S] = pca(X_norm);`
`+`
`+%  Visualize the top 36 eigenvectors found`
`+displayData(U(:, 1:36)');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ============= Part 6: Dimension Reduction for Faces =================`
`+%  Project images to the eigen space using the top k eigenvectors `
`+%  If you are applying a machine learning algorithm `
`+fprintf('\nDimension reduction for face dataset.\n\n');`
`+`
`+K = 100;`
`+Z = projectData(X_norm, U, K);`
`+`
`+fprintf('The projected data Z has a size of: ')`
`+fprintf('%d ', size(Z));`
`+`
`+fprintf('\n\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ==== Part 7: Visualization of Faces after PCA Dimension Reduction ====`
`+%  Project images to the eigen space using the top K eigen vectors and `
`+%  visualize only using those K dimensions`
`+%  Compare to the original input, which is also displayed`
`+`
`+fprintf('\nVisualizing the projected (reduced dimension) faces.\n\n');`
`+`
`+K = 100;`
`+X_rec  = recoverData(Z, U, K);`
`+`
`+% Display normalized data`
`+subplot(1, 2, 1);`
`+displayData(X_norm(1:100,:));`
`+title('Original faces');`
`+axis square;`
`+`
`+% Display reconstructed data from only k eigenfaces`
`+subplot(1, 2, 2);`
`+displayData(X_rec(1:100,:));`
`+title('Recovered faces');`
`+axis square;`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% === Part 8(a): Optional (ungraded) Exercise: PCA for Visualization ===`
`+%  One useful application of PCA is to use it to visualize high-dimensional`
`+%  data. In the last K-Means exercise you ran K-Means on 3-dimensional `
`+%  pixel colors of an image. We first visualize this output in 3D, and then`
`+%  apply PCA to obtain a visualization in 2D.`
`+`
`+close all; close all; clc`
`+`
`+% Re-load the image from the previous exercise and run K-Means on it`
`+% For this to work, you need to complete the K-Means assignment first`
`+A = double(imread('bird_small.png'));`
`+`
`+% If imread does not work for you, you can try instead`
`+%   load ('bird_small.mat');`
`+`
`+A = A / 255;`
`+img_size = size(A);`
`+X = reshape(A, img_size(1) * img_size(2), 3);`
`+K = 16; `
`+max_iters = 10;`
`+initial_centroids = kMeansInitCentroids(X, K);`
`+[centroids, idx] = runkMeans(X, initial_centroids, max_iters);`
`+`
`+%  Sample 1000 random indexes (since working with all the data is`
`+%  too expensive. If you have a fast computer, you may increase this.`
`+sel = floor(rand(1000, 1) * size(X, 1)) + 1;`
`+`
`+%  Setup Color Palette`
`+palette = hsv(K);`
`+colors = palette(idx(sel), :);`
`+`
`+%  Visualize the data and centroid memberships in 3D`
`+figure;`
`+scatter3(X(sel, 1), X(sel, 2), X(sel, 3), 10, colors);`
`+title('Pixel dataset plotted in 3D. Color shows centroid memberships');`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% === Part 8(b): Optional (ungraded) Exercise: PCA for Visualization ===`
`+% Use PCA to project this cloud to 2D for visualization`
`+`
`+% Subtract the mean to use PCA`
`+[X_norm, mu, sigma] = featureNormalize(X);`
`+`
`+% PCA and project the data to 2D`
`+[U, S] = pca(X_norm);`
`+Z = projectData(X_norm, U, 2);`
`+`
`+% Plot in 2D`
`+figure;`
`+plotDataPoints(Z(sel, :), idx(sel), K);`
`+title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction');`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`

# File ex7/featureNormalize.m

`+function [X_norm, mu, sigma] = featureNormalize(X)`
`+%FEATURENORMALIZE Normalizes the features in X `
`+%   FEATURENORMALIZE(X) returns a normalized version of X where`
`+%   the mean value of each feature is 0 and the standard deviation`
`+%   is 1. This is often a good preprocessing step to do when`
`+%   working with learning algorithms.`
`+`
`+mu = mean(X);`
`+X_norm = bsxfun(@minus, X, mu);`
`+`
`+sigma = std(X_norm);`
`+X_norm = bsxfun(@rdivide, X_norm, sigma);`
`+`
`+`
`+% ============================================================`
`+`
`+end`

# File ex7/findClosestCentroids.m

`+function idx = findClosestCentroids(X, centroids)`
`+%FINDCLOSESTCENTROIDS computes the centroid memberships for every example`
`+%   idx = FINDCLOSESTCENTROIDS (X, centroids) returns the closest centroids`
`+%   in idx for a dataset X where each row is a single example. idx = m x 1 `
`+%   vector of centroid assignments (i.e. each entry in range [1..K])`
`+%`
`+`
`+% Set K`
`+K = size(centroids, 1);`
`+`
`+% You need to return the following variables correctly.`
`+idx = zeros(size(X,1), 1);`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Go over every example, find its closest centroid, and store`
`+%               the index inside idx at the appropriate location.`
`+%               Concretely, idx(i) should contain the index of the centroid`
`+%               closest to example i. Hence, it should be a value in the `
`+%               range 1..K`
`+%`
`+% Note: You can use a for-loop over the examples to compute this.`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+end`
`+`

# File ex7/kMeansInitCentroids.m

`+function centroids = kMeansInitCentroids(X, K)`
`+%KMEANSINITCENTROIDS This function initializes K centroids that are to be `
`+%used in K-Means on the dataset X`
`+%   centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be`
`+%   used with the K-Means on the dataset X`
`+%`
`+`
`+% You should return this values correctly`
`+centroids = zeros(K, size(X, 2));`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: You should set centroids to randomly chosen examples from`
`+%               the dataset X`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+end`
`+`

# File ex7/pca.m

`+function [U, S] = pca(X)`
`+%PCA Run principal component analysis on the dataset X`
`+%   [U, S, X] = pca(X) computes eigenvectors of the covariance matrix of X`
`+%   Returns the eigenvectors U, the eigenvalues (on diagonal) in S`
`+%`
`+`
`+% Useful values`
`+[m, n] = size(X);`
`+`
`+% You need to return the following variables correctly.`
`+U = zeros(n);`
`+S = zeros(n);`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: You should first compute the covariance matrix. Then, you`
`+%               should use the "svd" function to compute the eigenvectors`
`+%               and eigenvalues of the covariance matrix. `
`+%`
`+% Note: When computing the covariance matrix, remember to divide by m (the`
`+%       number of examples).`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =========================================================================`
`+`
`+end`

# File ex7/plotDataPoints.m

`+function plotDataPoints(X, idx, K)`
`+%PLOTDATAPOINTS plots data points in X, coloring them so that those with the same`
`+%index assignments in idx have the same color`
`+%   PLOTDATAPOINTS(X, idx, K) plots data points in X, coloring them so that those `
`+%   with the same index assignments in idx have the same color`
`+`
`+% Create palette`
`+palette = hsv(K + 1);`
`+colors = palette(idx, :);`
`+`
`+% Plot the data`
`+scatter(X(:,1), X(:,2), 15, colors);`
`+`
`+end`

# File ex7/plotProgresskMeans.m

`+function plotProgresskMeans(X, centroids, previous, idx, K, i)`
`+%PLOTPROGRESSKMEANS is a helper function that displays the progress of `
`+%k-Means as it is running. It is intended for use only with 2D data.`
`+%   PLOTPROGRESSKMEANS(X, centroids, previous, idx, K, i) plots the data`
`+%   points with colors assigned to each centroid. With the previous`
`+%   centroids, it also plots a line between the previous locations and`
`+%   current locations of the centroids.`
`+%`
`+`
`+% Plot the examples`
`+plotDataPoints(X, idx, K);`
`+`
`+% Plot the centroids as black x's`
`+plot(centroids(:,1), centroids(:,2), 'x', ...`
`+     'MarkerEdgeColor','k', ...`
`+     'MarkerSize', 10, 'LineWidth', 3);`
`+`
`+% Plot the history of the centroids with lines`
`+for j=1:size(centroids,1)`
`+    drawLine(centroids(j, :), previous(j, :));`
`+end`
`+`
`+% Title`
`+title(sprintf('Iteration number %d', i))`
`+`
`+end`
`+`

# File ex7/projectData.m

`+function Z = projectData(X, U, K)`
`+%PROJECTDATA Computes the reduced data representation when projecting only `
`+%on to the top k eigenvectors`
`+%   Z = projectData(X, U, K) computes the projection of `
`+%   the normalized inputs X into the reduced dimensional space spanned by`
`+%   the first K columns of U. It returns the projected examples in Z.`
`+%`
`+`
`+% You need to return the following variables correctly.`
`+Z = zeros(size(X, 1), K);`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Compute the projection of the data using only the top K `
`+%               eigenvectors in U (first K columns). `
`+%               For the i-th example X(i,:), the projection on to the k-th `
`+%               eigenvector is given as follows:`
`+%                    x = X(i, :)';`
`+%                    projection_k = x' * U(:, k);`
`+%`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+end`

# File ex7/recoverData.m

`+function X_rec = recoverData(Z, U, K)`
`+%RECOVERDATA Recovers an approximation of the original data when using the `
`+%projected data`
`+%   X_rec = RECOVERDATA(Z, U, K) recovers an approximation the `
`+%   original data that has been reduced to K dimensions. It returns the`
`+%   approximate reconstruction in X_rec.`
`+%`
`+`
`+% You need to return the following variables correctly.`
`+X_rec = zeros(size(Z, 1), size(U, 1));`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Compute the approximation of the data by projecting back`
`+%               onto the original space using the top K eigenvectors in U.`
`+%`
`+%               For the i-th example Z(i,:), the (approximate)`
`+%               recovered data for dimension j is given as follows:`
`+%                    v = Z(i, :)';`
`+%                    recovered_j = v' * U(j, 1:K)';`
`+%`
`+%               Notice that U(j, 1:K) is a row vector.`
`+%               `
`+`
`+`
`+`
`+% =============================================================`
`+`
`+end`

# File ex7/runkMeans.m

`+function [centroids, idx] = runkMeans(X, initial_centroids, ...`
`+                                      max_iters, plot_progress)`
`+%RUNKMEANS runs the K-Means algorithm on data matrix X, where each row of X`
`+%is a single example`
`+%   [centroids, idx] = RUNKMEANS(X, initial_centroids, max_iters, ...`
`+%   plot_progress) runs the K-Means algorithm on data matrix X, where each `
`+%   row of X is a single example. It uses initial_centroids used as the`
`+%   initial centroids. max_iters specifies the total number of interactions `
`+%   of K-Means to execute. plot_progress is a true/false flag that `
`+%   indicates if the function should also plot its progress as the `
`+%   learning happens. This is set to false by default. runkMeans returns `
`+%   centroids, a Kxn matrix of the computed centroids and idx, a m x 1 `
`+%   vector of centroid assignments (i.e. each entry in range [1..K])`
`+%`
`+`
`+% Set default value for plot progress`
`+if ~exist('plot_progress', 'var') || isempty(plot_progress)`
`+    plot_progress = false;`
`+end`
`+`
`+% Plot the data if we are plotting progress`
`+if plot_progress`
`+    figure;`
`+    hold on;`
`+end`
`+`
`+% Initialize values`
`+[m n] = size(X);`
`+K = size(initial_centroids, 1);`
`+centroids = initial_centroids;`
`+previous_centroids = centroids;`
`+idx = zeros(m, 1);`
`+`
`+% Run K-Means`
`+for i=1:max_iters`
`+    `
`+    % Output progress`
`+    fprintf('K-Means iteration %d/%d...\n', i, max_iters);`
`+    if exist('OCTAVE_VERSION')`
`+        fflush(stdout);`
`+    end`
`+    `
`+    % For each example in X, assign it to the closest centroid`
`+    idx = findClosestCentroids(X, centroids);`
`+    `
`+    % Optionally, plot progress here`
`+    if plot_progress`
`+        plotProgresskMeans(X, centroids, previous_centroids, idx, K, i);`
`+        previous_centroids = centroids;`
`+        fprintf('Press enter to continue.\n');`
`+        pause;`
`+    end`
`+    `
`+    % Given the memberships, compute new centroids`
`+    centroids = computeCentroids(X, idx, K);`
`+end`
`+`
`+% Hold off if we are plotting progress`
`+if plot_progress`
`+    hold off;`
`+end`
`+`
`+end`
`+`

# File ex7/submit.m

`+function submit(partId, webSubmit)`
`+%SUBMIT Submit your code and output to the ml-class servers`
`+%   SUBMIT() will connect to the ml-class server and submit your solution`
`+`
`+  fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ...`
`+          homework_id());`
`+  if ~exist('partId', 'var') || isempty(partId)`
`+    partId = promptPart();`
`+  end`
`+`
`+  if ~exist('webSubmit', 'var') || isempty(webSubmit)`
`+    webSubmit = 0; % submit directly by default `
`+  end`
`+`
`+  % Check valid partId`
`+  partNames = validParts();`
`+  if ~isValidPartId(partId)`
`+    fprintf('!! Invalid homework part selected.\n');`
`+    fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1);`
`+    fprintf('!! Submission Cancelled\n');`
`+    return`
`+  end`
`+`
`+  if ~exist('ml_login_data.mat','file')`
`+    [login password] = loginPrompt();`
`+    save('ml_login_data.mat','login','password');`
`+  else  `
`+    load('ml_login_data.mat');`
`+    [login password] = quickLogin(login, password);`
`+    save('ml_login_data.mat','login','password');`
`+  end`
`+`
`+  if isempty(login)`
`+    fprintf('!! Submission Cancelled\n');`
`+    return`
`+  end`
`+`
`+  fprintf('\n== Connecting to ml-class ... '); `
`+  if exist('OCTAVE_VERSION') `
`+    fflush(stdout);`
`+  end`
`+`
`+  % Setup submit list`
`+  if partId == numel(partNames) + 1`
`+    submitParts = 1:numel(partNames);`
`+  else`
`+    submitParts = [partId];`
`+  end`
`+`
`+  for s = 1:numel(submitParts)`
`+    thisPartId = submitParts(s);`
`+    if (~webSubmit) % submit directly to server`
`+      [login, ch, signature, auxstring] = getChallenge(login, thisPartId);`
`+      if isempty(login) || isempty(ch) || isempty(signature)`
`+        % Some error occured, error string in first return element.`
`+        fprintf('\n!! Error: %s\n\n', login);`
`+        return`
`+      end`
`+`
`+      % Attempt Submission with Challenge`
`+      ch_resp = challengeResponse(login, password, ch);`
`+`
`+      [result, str] = submitSolution(login, ch_resp, thisPartId, ...`
`+             output(thisPartId, auxstring), source(thisPartId), signature);`
`+`
`+      partName = partNames{thisPartId};`
`+`
`+      fprintf('\n== [ml-class] Submitted Assignment %s - Part %d - %s\n', ...`
`+        homework_id(), thisPartId, partName);`
`+      fprintf('== %s\n', strtrim(str));`
`+`
`+      if exist('OCTAVE_VERSION')`
`+        fflush(stdout);`
`+      end`
`+    else`
`+      [result] = submitSolutionWeb(login, thisPartId, output(thisPartId), ...`
`+                            source(thisPartId));`
`+      result = base64encode(result);`
`+`
`+      fprintf('\nSave as submission file [submit_ex%s_part%d.txt (enter to accept default)]:', ...`
`+        homework_id(), thisPartId);`
`+      saveAsFile = input('', 's');`
`+      if (isempty(saveAsFile))`
`+        saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), thisPartId);`
`+      end`
`+`
`+      fid = fopen(saveAsFile, 'w');`
`+      if (fid)`
`+        fwrite(fid, result);`
`+        fclose(fid);`
`+        fprintf('\nSaved your solutions to %s.\n\n', saveAsFile);`
`+        fprintf(['You can now submit your solutions through the web \n' ...`
`+                 'form in the programming exercises. Select the corresponding \n' ...`
`+                 'programming exercise to access the form.\n']);`
`+`
`+      else`
`+        fprintf('Unable to save to %s\n\n', saveAsFile);`
`+        fprintf(['You can create a submission file by saving the \n' ...`
`+                 'following text in a file: (press enter to continue)\n\n']);`
`+        pause;`
`+        fprintf(result);`
`+      end`
`+    end`
`+  end`
`+end`
`+`
`+% ================== CONFIGURABLES FOR EACH HOMEWORK ==================`
`+`
`+function id = homework_id() `
`+  id = '7';`
`+end`
`+`
`+function [partNames] = validParts()`
`+  partNames = { `
`+                'Find Closest Centroids (k-Means)', ...`
`+                'Compute Centroid Means (k-Means)' ...`
`+                'PCA', ...`
`+                'Project Data (PCA)', ...`
`+                'Recover Data (PCA)' ...`
`+                };`
`+end`
`+`
`+function srcs = sources()`
`+  % Separated by part`
`+  srcs = { { 'findClosestCentroids.m' }, ...`
`+           { 'computeCentroids.m' }, ...`
`+           { 'pca.m' }, ...`
`+           { 'projectData.m' }, ...`
`+           { 'recoverData.m' } ...`
`+           };`
`+end`
`+`
`+function out = output(partId, auxstring)`
`+  % Random Test Cases`
`+  X = reshape(sin(1:165), 15, 11);`
`+  Z = reshape(cos(1:121), 11, 11);`
`+  C = Z(1:5, :);`
`+  idx = (1 + mod(1:15, 3))';`
`+  if partId == 1`
`+    idx = findClosestCentroids(X, C);`
`+    out = sprintf('%0.5f ', idx(:));`
`+  elseif partId == 2`
`+    centroids = computeCentroids(X, idx, 3);`
`+    out = sprintf('%0.5f ', centroids(:));`
`+  elseif partId == 3`
`+    [U, S] = pca(X);`
`+    out = sprintf('%0.5f ', abs([U(:); S(:)]));`
`+  elseif partId == 4`
`+    X_proj = projectData(X, Z, 5);`
`+    out = sprintf('%0.5f ', X_proj(:));`
`+  elseif partId == 5`
`+    X_rec = recoverData(X(:,1:5), Z, 5);`
`+    out = sprintf('%0.5f ', X_rec(:));`
`+  end `
`+end`
`+`
`+% ====================== SERVER CONFIGURATION ===========================`
`+`
`+% ***************** REMOVE -staging WHEN YOU DEPLOY *********************`
`+function url = site_url()`
`+  url = 'http://www.coursera.org/ml';`
`+end`
`+`
`+function url = challenge_url()`
`+  url = [site_url() '/assignment/challenge'];`
`+end`
`+`
`+function url = submit_url()`
`+  url = [site_url() '/assignment/submit'];`
`+end`
`+`
`+% ========================= CHALLENGE HELPERS =========================`
`+`
`+function src = source(partId)`
`+  src = '';`
`+  src_files = sources();`
`+  if partId <= numel(src_files)`
`+      flist = src_files{partId};`
`+      for i = 1:numel(flist)`
`+          fid = fopen(flist{i});`
`+          if (fid == -1) `
`+            error('Error opening %s (is it missing?)', flist{i});`
`+          end`
`+          line = fgets(fid);`
`+          while ischar(line)`
`+            src = [src line];            `
`+            line = fgets(fid);`
`+          end`
`+          fclose(fid);`
`+          src = [src '||||||||'];`
`+      end`
`+  end`
`+end`
`+`
`+function ret = isValidPartId(partId)`
`+  partNames = validParts();`
`+  ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1);`
`+end`
`+`
`+function partId = promptPart()`
`+  fprintf('== Select which part(s) to submit:\n');`
`+  partNames = validParts();`
`+  srcFiles = sources();`
`+  for i = 1:numel(partNames)`
`+    fprintf('==   %d) %s [', i, partNames{i});`
`+    fprintf(' %s ', srcFiles{i}{:});`
`+    fprintf(']\n');`
`+  end`
`+  fprintf('==   %d) All of the above \n==\nEnter your choice [1-%d]: ', ...`
`+          numel(partNames) + 1, numel(partNames) + 1);`
`+  selPart = input('', 's');`
`+  partId = str2num(selPart);`
`+  if ~isValidPartId(partId)`
`+    partId = -1;`
`+  end`
`+end`
`+`
`+function [email,ch,signature,auxstring] = getChallenge(email, part)`
`+  str = urlread(challenge_url(), 'post', {'email_address', email, 'assignment_part_sid', [homework_id() '-' num2str(part)], 'response_encoding', 'delim'});`
`+`
`+  str = strtrim(str);`
`+  r = struct;`
`+  while(numel(str) > 0)`
`+    [f, str] = strtok (str, '|');`
`+    [v, str] = strtok (str, '|');`
`+    r = setfield(r, f, v);`
`+  end`
`+`
`+  email = getfield(r, 'email_address');`
`+  ch = getfield(r, 'challenge_key');`
`+  signature = getfield(r, 'state');`
`+  auxstring = getfield(r, 'challenge_aux_data');`
`+end`
`+`
`+function [result, str] = submitSolutionWeb(email, part, output, source)`
`+`
`+  result = ['{"assignment_part_sid":"' base64encode([homework_id() '-' num2str(part)], '') '",' ...`
`+            '"email_address":"' base64encode(email, '') '",' ...`
`+            '"submission":"' base64encode(output, '') '",' ...`
`+            '"submission_aux":"' base64encode(source, '') '"' ...`
`+            '}'];`
`+  str = 'Web-submission';`
`+end`
`+`
`+function [result, str] = submitSolution(email, ch_resp, part, output, ...`
`+                                        source, signature)`
`+`
`+  params = {'assignment_part_sid', [homework_id() '-' num2str(part)], ...`
`+            'email_address', email, ...`
`+            'submission', base64encode(output, ''), ...`
`+            'submission_aux', base64encode(source, ''), ...`
`+            'challenge_response', ch_resp, ...`
`+            'state', signature};`
`+`
`+  str = urlread(submit_url(), 'post', params);`
`+`
`+  % Parse str to read for success / failure`
`+  result = 0;`
`+`
`+end`
`+`
`+% =========================== LOGIN HELPERS ===========================`
`+`
`+function [login password] = loginPrompt()`
`+  % Prompt for password`
`+  [login password] = basicPrompt();`
`+  `
`+  if isempty(login) || isempty(password)`
`+    login = []; password = [];`
`+  end`
`+end`
`+`
`+`
`+function [login password] = basicPrompt()`
`+  login = input('Login (Email address): ', 's');`
`+  password = input('Password: ', 's');`
`+end`
`+`
`+function [login password] = quickLogin(login,password)`
`+  disp(['You are currently logged in as ' login '.']);`
`+  cont_token = input('Is this you? (y/n - type n to reenter password)','s');`
`+  if(isempty(cont_token) || cont_token(1)=='Y'||cont_token(1)=='y')`
`+    return;`
`+  else`
`+    [login password] = loginPrompt();`
`+  end`
`+end`
`+`
`+function [str] = challengeResponse(email, passwd, challenge)`
`+  str = sha1([challenge passwd]);`
`+end`
`+`
`+% =============================== SHA-1 ================================`
`+`
`+function hash = sha1(str)`
`+  `
`+  % Initialize variables`
`+  h0 = uint32(1732584193);`
`+  h1 = uint32(4023233417);`
`+  h2 = uint32(2562383102);`
`+  h3 = uint32(271733878);`
`+  h4 = uint32(3285377520);`
`+  `
`+  % Convert to word array`
`+  strlen = numel(str);`
`+`
`+  % Break string into chars and append the bit 1 to the message`
`+  mC = [double(str) 128];`
`+  mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')];`
`+  `
`+  numB = strlen * 8;`
`+  if exist('idivide')`
`+    numC = idivide(uint32(numB + 65), 512, 'ceil');`
`+  else`
`+    numC = ceil(double(numB + 65)/512);`
`+  end`
`+  numW = numC * 16;`
`+  mW = zeros(numW, 1, 'uint32');`
`+  `
`+  idx = 1;`
`+  for i = 1:4:strlen + 1`
`+    mW(idx) = bitor(bitor(bitor( ...`
`+                  bitshift(uint32(mC(i)), 24), ...`
`+                  bitshift(uint32(mC(i+1)), 16)), ...`
`+                  bitshift(uint32(mC(i+2)), 8)), ...`
`+                  uint32(mC(i+3)));`
`+    idx = idx + 1;`
`+  end`
`+  `
`+  % Append length of message`
`+  mW(numW - 1) = uint32(bitshift(uint64(numB), -32));`
`+  mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32));`
`+`
`+  % Process the message in successive 512-bit chs`
`+  for cId = 1 : double(numC)`
`+    cSt = (cId - 1) * 16 + 1;`
`+    cEnd = cId * 16;`
`+    ch = mW(cSt : cEnd);`
`+    `
`+    % Extend the sixteen 32-bit words into eighty 32-bit words`
`+    for j = 17 : 80`
`+      ch(j) = ch(j - 3);`
`+      ch(j) = bitxor(ch(j), ch(j - 8));`
`+      ch(j) = bitxor(ch(j), ch(j - 14));`
`+      ch(j) = bitxor(ch(j), ch(j - 16));`
`+      ch(j) = bitrotate(ch(j), 1);`
`+    end`
`+  `
`+    % Initialize hash value for this ch`
`+    a = h0;`
`+    b = h1;`
`+    c = h2;`
`+    d = h3;`
`+    e = h4;`
`+    `
`+    % Main loop`
`+    for i = 1 : 80`
`+      if(i >= 1 && i <= 20)`
`+        f = bitor(bitand(b, c), bitand(bitcmp(b), d));`
`+        k = uint32(1518500249);`
`+      elseif(i >= 21 && i <= 40)`
`+        f = bitxor(bitxor(b, c), d);`
`+        k = uint32(1859775393);`
`+      elseif(i >= 41 && i <= 60)`
`+        f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d));`
`+        k = uint32(2400959708);`
`+      elseif(i >= 61 && i <= 80)`
`+        f = bitxor(bitxor(b, c), d);`
`+        k = uint32(3395469782);`
`+      end`
`+      `
`+      t = bitrotate(a, 5);`
`+      t = bitadd(t, f);`
`+      t = bitadd(t, e);`
`+      t = bitadd(t, k);`
`+      t = bitadd(t, ch(i));`
`+      e = d;`
`+      d = c;`
`+      c = bitrotate(b, 30);`
`+      b = a;`
`+      a = t;`
`+      `
`+    end`
`+    h0 = bitadd(h0, a);`
`+    h1 = bitadd(h1, b);`
`+    h2 = bitadd(h2, c);`
`+    h3 = bitadd(h3, d);`
`+    h4 = bitadd(h4, e);`
`+`
`+  end`
`+`
`+  hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]);`
`+  `
`+  hash = lower(hash);`
`+`
`+end`
`+`
`+function ret = bitadd(iA, iB)`
`+  ret = double(iA) + double(iB);`
`+  ret = bitset(ret, 33, 0);`
`+  ret = uint32(ret);`
`+end`
`+`
`+function ret = bitrotate(iA, places)`
`+  t = bitshift(iA, places - 32);`
`+  ret = bitshift(iA, places);`
`+  ret = bitor(ret, t);`
`+end`
`+`
`+% =========================== Base64 Encoder ============================`
`+% Thanks to Peter John Acklam`
`+%`
`+`
`+function y = base64encode(x, eol)`
`+%BASE64ENCODE Perform base64 encoding on a string.`
`+%`
`+%   BASE64ENCODE(STR, EOL) encode the given string STR.  EOL is the line ending`
`+%   sequence to use; it is optional and defaults to '\n' (ASCII decimal 10).`
`+%   The returned encoded string is broken into lines of no more than 76`
`+%   characters each, and each line will end with EOL unless it is empty.  Let`
`+%   EOL be empty if you do not want the encoded string broken into lines.`
`+%`
`+%   STR and EOL don't have to be strings (i.e., char arrays).  The only`
`+%   requirement is that they are vectors containing values in the range 0-255.`
`+%`
`+%   This function may be used to encode strings into the Base64 encoding`
`+%   specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions).  The`
`+%   Base64 encoding is designed to represent arbitrary sequences of octets in a`
`+%   form that need not be humanly readable.  A 65-character subset`
`+%   ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per`
`+%   printable character.`
`+%`
`+%   Examples`
`+%   --------`
`+%`
`+%   If you want to encode a large file, you should encode it in chunks that are`
`+%   a multiple of 57 bytes.  This ensures that the base64 lines line up and`
`+%   that you do not end up with padding in the middle.  57 bytes of data fills`
`+%   one complete base64 line (76 == 57*4/3):`
`+%`
`+%   If ifid and ofid are two file identifiers opened for reading and writing,`
`+%   respectively, then you can base64 encode the data with`
`+%`
`+%      while ~feof(ifid)`
`+%         fwrite(ofid, base64encode(fread(ifid, 60*57)));`
`+%      end`
`+%`
`+%   or, if you have enough memory,`
`+%`
`+%      fwrite(ofid, base64encode(fread(ifid)));`
`+%`
`+%   See also BASE64DECODE.`
`+`
`+%   Author:      Peter John Acklam`
`+%   Time-stamp:  2004-02-03 21:36:56 +0100`
`+%   E-mail:      pjacklam@online.no`
`+%   URL:         http://home.online.no/~pjacklam`
`+`
`+   if isnumeric(x)`
`+      x = num2str(x);`
`+   end`
`+`
`+   % make sure we have the EOL value`
`+   if nargin < 2`
`+      eol = sprintf('\n');`
`+   else`
`+      if sum(size(eol) > 1) > 1`
`+         error('EOL must be a vector.');`
`+      end`
`+      if any(eol(:) > 255)`
`+         error('EOL can not contain values larger than 255.');`
`+      end`
`+   end`
`+`
`+   if sum(size(x) > 1) > 1`
`+      error('STR must be a vector.');`
`+   end`
`+`
`+   x   = uint8(x);`
`+   eol = uint8(eol);`
`+`
`+   ndbytes = length(x);                 % number of decoded bytes`
`+   nchunks = ceil(ndbytes / 3);         % number of chunks/groups`
`+   nebytes = 4 * nchunks;               % number of encoded bytes`
`+`
`+   % add padding if necessary, to make the length of x a multiple of 3`
`+   if rem(ndbytes, 3)`
`+      x(end+1 : 3*nchunks) = 0;`
`+   end`
`+`
`+   x = reshape(x, [3, nchunks]);        % reshape the data`
`+   y = repmat(uint8(0), 4, nchunks);    % for the encoded data`
`+`
`+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%`
`+   % Split up every 3 bytes into 4 pieces`
`+   %`
`+   %    aaaaaabb bbbbcccc ccdddddd`
`+   %`
`+   % to form`
`+   %`
`+   %    00aaaaaa 00bbbbbb 00cccccc 00dddddd`
`+   %`
`+   y(1,:) = bitshift(x(1,:), -2);                  % 6 highest bits of x(1,:)`
`+`
`+   y(2,:) = bitshift(bitand(x(1,:), 3), 4);        % 2 lowest bits of x(1,:)`
`+   y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4));   % 4 highest bits of x(2,:)`
`+`
`+   y(3,:) = bitshift(bitand(x(2,:), 15), 2);       % 4 lowest bits of x(2,:)`
`+   y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6));   % 2 highest bits of x(3,:)`
`+`
`+   y(4,:) = bitand(x(3,:), 63);                    % 6 lowest bits of x(3,:)`
`+`
`+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%`
`+   % Now perform the following mapping`
`+   %`
`+   %   0  - 25  ->  A-Z`
`+   %   26 - 51  ->  a-z`
`+   %   52 - 61  ->  0-9`
`+   %   62       ->  +`
`+   %   63       ->  /`
`+   %`
`+   % We could use a mapping vector like`
`+   %`
`+   %   ['A':'Z', 'a':'z', '0':'9', '+/']`
`+   %`
`+   % but that would require an index vector of class double.`
`+   %`
`+   z = repmat(uint8(0), size(y));`
`+   i =           y <= 25;  z(i) = 'A'      + double(y(i));`
`+   i = 26 <= y & y <= 51;  z(i) = 'a' - 26 + double(y(i));`
`+   i = 52 <= y & y <= 61;  z(i) = '0' - 52 + double(y(i));`
`+   i =           y == 62;  z(i) = '+';`
`+   i =           y == 63;  z(i) = '/';`
`+   y = z;`
`+`
`+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%`
`+   % Add padding if necessary.`
`+   %`
`+   npbytes = 3 * nchunks - ndbytes;     % number of padding bytes`
`+   if npbytes`
`+      y(end-npbytes+1 : end) = '=';     % '=' is used for padding`
`+   end`
`+`
`+   if isempty(eol)`
`+`
`+      % reshape to a row vector`
`+      y = reshape(y, [1, nebytes]);`
`+`
`+   else`
`+`
`+      nlines = ceil(nebytes / 76);      % number of lines`
`+      neolbytes = length(eol);          % number of bytes in eol string`
`+`
`+      % pad data so it becomes a multiple of 76 elements`
`+      y = [y(:) ; zeros(76 * nlines - numel(y), 1)];`
`+      y(nebytes + 1 : 76 * nlines) = 0;`
`+      y = reshape(y, 76, nlines);`
`+`
`+      % insert eol strings`
`+      eol = eol(:);`
`+      y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines));`
`+`
`+      % remove padding, but keep the last eol string`
`+      m = nebytes + neolbytes * (nlines - 1);`
`+      n = (76+neolbytes)*nlines - neolbytes;`
`+      y(m+1 : n) = '';`
`+`
`+      % extract and reshape to row vector`
`+      y = reshape(y, 1, m+neolbytes);`
`+`
`+   end`
`+`
`+   % output is a character array`
`+   y = char(y);`
`+`
`+end`

# File ex7/submitWeb.m

`+% submitWeb Creates files from your code and output for web submission.`
`+%`
`+%   If the submit function does not work for you, use the web-submission mechanism.`
`+%   Call this function to produce a file for the part you wish to submit. Then,`
`+%   submit the file to the class servers using the "Web Submission" button on the `
`+%   Programming Exercises page on the course website.`
`+%`
`+%   You should call this function without arguments (submitWeb), to receive`
`+%   an interactive prompt for submission; optionally you can call it with the partID`
`+%   if you so wish. Make sure your working directory is set to the directory `
`+%   containing the submitWeb.m file and your assignment files.`
`+`
`+function submitWeb(partId)`
`+  if ~exist('partId', 'var') || isempty(partId)`
`+    partId = [];`
`+  end`
`+  `
`+  submit(partId, 1);`
`+end`
`+`

# File ex8.py

`+#!/usr/bin/env python`
`+`
`+import numpy as np`
`+import matplotlib.pyplot as plt`
`+from scipy.io import loadmat`
`+`
`+`
`+def p1(x, var, mue):`
`+    return (1/np.sqrt(2*np.pi*var))*(np.e**(-(((x-mue)**2)/2*var)))`
`+`
`+`
`+def p(x, mue, var):`
`+    total = 1`
`+    for xi, vari, muei in zip(x, var, mue):`
`+        total *= p1(xi, vari, muei)`
`+    return total`
`+`
`+`
`+def calc_contour(data, fn):`
`+    xs = np.linspace(data[:,0].min(), data[:,0].max(), 100)`
`+    ys = np.linspace(data[:,1].min(), data[:,1].max(), 100)`
`+    z = np.zeros(shape=(len(xs), len(ys)))`
`+    for x, xv in enumerate(xs):`
`+        for y, yv in enumerate(ys):`
`+            z[x, y] = fn(np.array([xv, yv]))`
`+`
`+    return xs, ys, z`
`+`
`+def anomaly():`
`+    data = loadmat('ex8/ex8data1.mat')`
`+    train = data['X']`
`+`
`+    mue = train.mean(0)`
`+    var = train.var(0)`
`+    xs, ys, z = calc_contour(train, lambda x: p(x, mue, var))`
`+    z *= 10`
`+`
`+    plt.scatter(train[:,0], train[:,1], marker='x')`
`+    exps = np.arange(-20, -1, 3)`
`+    fn = np.vectorize(lambda n: 10**n)`
`+    plt.contour(xs, ys, z, fn(exps))`
`+    plt.show()`
`+`
`+`
`+if __name__ == '__main__':`
`+    anomaly()`
`+    raw_input()`

# File ex8/checkCostFunction.m

`+function checkCostFunction(lambda)`
`+%CHECKCOSTFUNCTION Creates a collaborative filering problem `
`+%to check your cost function and gradients`
`+%   CHECKCOSTFUNCTION(lambda) Creates a collaborative filering problem `
`+%   to check your cost function and gradients, it will output the `
`+%   analytical gradients produced by your code and the numerical gradients `
`+%   (computed using computeNumericalGradient). These two gradient `
`+%   computations should result in very similar values.`
`+`
`+% Set lambda`
`+if ~exist('lambda', 'var') || isempty(lambda)`
`+    lambda = 0;`
`+end`
`+`
`+%% Create small problem`
`+X_t = rand(4, 3);`
`+Theta_t = rand(5, 3);`
`+`
`+% Zap out most entries`
`+Y = X_t * Theta_t';`
`+Y(rand(size(Y)) > 0.5) = 0;`
`+R = zeros(size(Y));`
`+R(Y ~= 0) = 1;`
`+`
`+%% Run Gradient Checking`
`+X = randn(size(X_t));`
`+Theta = randn(size(Theta_t));`
`+num_users = size(Y, 2);`
`+num_movies = size(Y, 1);`
`+num_features = size(Theta_t, 2);`
`+`
`+numgrad = computeNumericalGradient( ...`
`+                @(t) cofiCostFunc(t, Y, R, num_users, num_movies, ...`
`+                                num_features, lambda), [X(:); Theta(:)]);`
`+`
`+[cost, grad] = cofiCostFunc([X(:); Theta(:)],  Y, R, num_users, ...`
`+                          num_movies, num_features, lambda);`
`+`
`+disp([numgrad grad]);`
`+fprintf(['The above two columns you get should be very similar.\n' ...`
`+         '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']);`
`+`
`+diff = norm(numgrad-grad)/norm(numgrad+grad);`
`+fprintf(['If your backpropagation implementation is correct, then \n' ...`
`+         'the relative difference will be small (less than 1e-9). \n' ...`
`+         '\nRelative Difference: %g\n'], diff);`
`+`
`+end`

# File ex8/cofiCostFunc.m

`+function [J, grad] = cofiCostFunc(params, Y, R, num_users, num_movies, ...`
`+                                  num_features, lambda)`
`+%COFICOSTFUNC Collaborative filtering cost function`
`+%   [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ...`
`+%   num_features, lambda) returns the cost and gradient for the`
`+%   collaborative filtering problem.`
`+%`
`+`
`+% Unfold the U and W matrices from params`
`+X = reshape(params(1:num_movies*num_features), num_movies, num_features);`
`+Theta = reshape(params(num_movies*num_features+1:end), ...`
`+                num_users, num_features);`
`+`
`+            `
`+% You need to return the following values correctly`
`+J = 0;`
`+X_grad = zeros(size(X));`
`+Theta_grad = zeros(size(Theta));`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Compute the cost function and gradient for collaborative`
`+%               filtering. Concretely, you should first implement the cost`
`+%               function (without regularization) and make sure it is`
`+%               matches our costs. After that, you should implement the `
`+%               gradient and use the checkCostFunction routine to check`
`+%               that the gradient is correct. Finally, you should implement`
`+%               regularization.`
`+%`
`+% Notes: X - num_movies  x num_features matrix of movie features`
`+%        Theta - num_users  x num_features matrix of user features`
`+%        Y - num_movies x num_users matrix of user ratings of movies`
`+%        R - num_movies x num_users matrix, where R(i, j) = 1 if the `
`+%            i-th movie was rated by the j-th user`
`+%`
`+% You should set the following variables correctly:`
`+%`
`+%        X_grad - num_movies x num_features matrix, containing the `
`+%                 partial derivatives w.r.t. to each element of X`
`+%        Theta_grad - num_users x num_features matrix, containing the `
`+%                     partial derivatives w.r.t. to each element of Theta`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+grad = [X_grad(:); Theta_grad(:)];`
`+`
`+end`

`+function numgrad = computeNumericalGradient(J, theta)`
`+%COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"`
`+%and gives us a numerical estimate of the gradient.`
`+%   numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical`
`+%   gradient of the function J around theta. Calling y = J(theta) should`
`+%   return the function value at theta.`
`+`
`+% Notes: The following code implements numerical gradient checking, and `
`+%        returns the numerical gradient.It sets numgrad(i) to (a numerical `
`+%        approximation of) the partial derivative of J with respect to the `
`+%        i-th input argument, evaluated at theta. (i.e., numgrad(i) should `
`+%        be the (approximately) the partial derivative of J with respect `
`+%        to theta(i).)`
`+%                `
`+`
`+numgrad = zeros(size(theta));`
`+perturb = zeros(size(theta));`
`+e = 1e-4;`
`+for p = 1:numel(theta)`
`+    % Set perturbation vector`
`+    perturb(p) = e;`
`+    loss1 = J(theta - perturb);`
`+    loss2 = J(theta + perturb);`
`+    % Compute Numerical Gradient`
`+    numgrad(p) = (loss2 - loss1) / (2*e);`
`+    perturb(p) = 0;`
`+end`
`+`
`+end`

# File ex8/estimateGaussian.m

`+function [mu sigma2] = estimateGaussian(X)`
`+%ESTIMATEGAUSSIAN This function estimates the parameters of a `
`+%Gaussian distribution using the data in X`
`+%   [mu sigma2] = estimateGaussian(X), `
`+%   The input X is the dataset with each n-dimensional data point in one row`
`+%   The output is an n-dimensional vector mu, the mean of the data set`
`+%   and the variances sigma^2, an n x 1 vector`
`+% `
`+`
`+% Useful variables`
`+[m, n] = size(X);`
`+`
`+% You should return these values correctly`
`+mu = zeros(n, 1);`
`+sigma2 = zeros(n, 1);`
`+`
`+% ====================== YOUR CODE HERE ======================`
`+% Instructions: Compute the mean of the data and the variances`
`+%               In particular, mu(i) should contain the mean of`
`+%               the data for the i-th feature and sigma2(i)`
`+%               should contain variance of the i-th feature.`
`+%`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+`
`+% =============================================================`
`+`
`+`
`+end`

# File ex8/ex8.m

`+%% Machine Learning Online Class`
`+%  Exercise 8 | Anomaly Detection and Collaborative Filtering`
`+%`
`+%  Instructions`
`+%  ------------`
`+%`
`+%  This file contains code that helps you get started on the`
`+%  exercise. You will need to complete the following functions:`
`+%`
`+%     estimateGaussian.m`
`+%     selectThreshold.m`
`+%     cofiCostFunc.m`
`+%`
`+%  For this exercise, you will not need to change any code in this file,`
`+%  or any other files other than those mentioned above.`
`+%`
`+`
`+%% Initialization`
`+clear ; close all; clc`
`+`
`+%% ================== Part 1: Load Example Dataset  ===================`
`+%  We start this exercise by using a small dataset that is easy to`
`+%  visualize.`
`+%`
`+%  Our example case consists of 2 network server statistics across`
`+%  several machines: the latency and throughput of each machine.`
`+%  This exercise will help us find possibly faulty (or very fast) machines.`
`+%`
`+`
`+fprintf('Visualizing example dataset for outlier detection.\n\n');`
`+`
`+%  The following command loads the dataset. You should now have the`
`+%  variables X, Xval, yval in your environment`
`+load('ex8data1.mat');`
`+`
`+%  Visualize the example dataset`
`+plot(X(:, 1), X(:, 2), 'bx');`
`+axis([0 30 0 30]);`
`+xlabel('Latency (ms)');`
`+ylabel('Throughput (mb/s)');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause`
`+`
`+`
`+%% ================== Part 2: Estimate the dataset statistics ===================`
`+%  For this exercise, we assume a Gaussian distribution for the dataset.`
`+%`
`+%  We first estimate the parameters of our assumed Gaussian distribution, `
`+%  then compute the probabilities for each of the points and then visualize `
`+%  both the overall distribution and where each of the points falls in `
`+%  terms of that distribution.`
`+%`
`+fprintf('Visualizing Gaussian fit.\n\n');`
`+`
`+%  Estimate my and sigma2`
`+[mu sigma2] = estimateGaussian(X);`
`+`
`+%  Returns the density of the multivariate normal at each data point (row) `
`+%  of X`
`+p = multivariateGaussian(X, mu, sigma2);`
`+`
`+%  Visualize the fit`
`+visualizeFit(X,  mu, sigma2);`
`+xlabel('Latency (ms)');`
`+ylabel('Throughput (mb/s)');`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ================== Part 3: Find Outliers ===================`
`+%  Now you will find a good epsilon threshold using a cross-validation set`
`+%  probabilities given the estimated Gaussian distribution`
`+% `
`+`
`+pval = multivariateGaussian(Xval, mu, sigma2);`
`+`
`+[epsilon F1] = selectThreshold(yval, pval);`
`+fprintf('Best epsilon found using cross-validation: %e\n', epsilon);`
`+fprintf('Best F1 on Cross Validation Set:  %f\n', F1);`
`+fprintf('   (you should see a value epsilon of about 8.99e-05)\n\n');`
`+`
`+%  Find the outliers in the training set and plot the`
`+outliers = find(p < epsilon);`
`+`
`+%  Draw a red circle around those outliers`
`+hold on`
`+plot(X(outliers, 1), X(outliers, 2), 'ro', 'LineWidth', 2, 'MarkerSize', 10);`
`+hold off`
`+`
`+fprintf('Program paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ================== Part 4: Multidimensional Outliers ===================`
`+%  We will now use the code from the previous part and apply it to a `
`+%  harder problem in which more features describe each datapoint and only `
`+%  some features indicate whether a point is an outlier.`
`+%`
`+`
`+%  Loads the second dataset. You should now have the`
`+%  variables X, Xval, yval in your environment`
`+load('ex8data2.mat');`
`+`
`+%  Apply the same steps to the larger dataset`
`+[mu sigma2] = estimateGaussian(X);`
`+`
`+%  Training set `
`+p = multivariateGaussian(X, mu, sigma2);`
`+`
`+%  Cross-validation set`
`+pval = multivariateGaussian(Xval, mu, sigma2);`
`+`
`+%  Find the best threshold`
`+[epsilon F1] = selectThreshold(yval, pval);`
`+`
`+fprintf('Best epsilon found using cross-validation: %e\n', epsilon);`
`+fprintf('Best F1 on Cross Validation Set:  %f\n', F1);`
`+fprintf('# Outliers found: %d\n', sum(p < epsilon));`
`+fprintf('   (you should see a value epsilon of about 1.38e-18)\n\n');`
`+pause`
`+`
`+`
`+`

# File ex8/ex8_cofi.m

`+%% Machine Learning Online Class`
`+%  Exercise 8 | Anomaly Detection and Collaborative Filtering`
`+%`
`+%  Instructions`
`+%  ------------`
`+%`
`+%  This file contains code that helps you get started on the`
`+%  exercise. You will need to complete the following functions:`
`+%`
`+%     estimateGaussian.m`
`+%     selectThreshold.m`
`+%     cofiCostFunc.m`
`+%`
`+%  For this exercise, you will not need to change any code in this file,`
`+%  or any other files other than those mentioned above.`
`+%`
`+`
`+%% =============== Part 1: Loading movie ratings dataset ================`
`+%  You will start by loading the movie ratings dataset to understand the`
`+%  structure of the data.`
`+%  `
`+fprintf('Loading movie ratings dataset.\n\n');`
`+`
`+%  Load data`
`+load ('ex8_movies.mat');`
`+`
`+%  Y is a 1682x943 matrix, containing ratings (1-5) of 1682 movies on `
`+%  943 users`
`+%`
`+%  R is a 1682x943 matrix, where R(i,j) = 1 if and only if user j gave a`
`+%  rating to movie i`
`+`
`+%  From the matrix, we can compute statistics like average rating.`
`+fprintf('Average rating for movie 1 (Toy Story): %f / 5\n\n', ...`
`+        mean(Y(1, R(1, :))));`
`+`
`+%  We can "visualize" the ratings matrix by plotting it with imagesc`
`+imagesc(Y);`
`+ylabel('Movies');`
`+xlabel('Users');`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ============ Part 2: Collaborative Filtering Cost Function ===========`
`+%  You will now implement the cost function for collaborative filtering.`
`+%  To help you debug your cost function, we have included set of weights`
`+%  that we trained on that. Specifically, you should complete the code in `
`+%  cofiCostFunc.m to return J.`
`+`
`+%  Load pre-trained weights (X, Theta, num_users, num_movies, num_features)`
`+load ('ex8_movieParams.mat');`
`+`
`+%  Reduce the data set size so that this runs faster`
`+num_users = 4; num_movies = 5; num_features = 3;`
`+X = X(1:num_movies, 1:num_features);`
`+Theta = Theta(1:num_users, 1:num_features);`
`+Y = Y(1:num_movies, 1:num_users);`
`+R = R(1:num_movies, 1:num_users);`
`+`
`+%  Evaluate cost function`
`+J = cofiCostFunc([X(:) ; Theta(:)], Y, R, num_users, num_movies, ...`
`+               num_features, 0);`
`+           `
`+fprintf(['Cost at loaded parameters: %f '...`
`+         '\n(this value should be about 22.22)\n'], J);`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ============== Part 3: Collaborative Filtering Gradient ==============`
`+%  Once your cost function matches up with ours, you should now implement `
`+%  the collaborative filtering gradient function. Specifically, you should `
`+%  complete the code in cofiCostFunc.m to return the grad argument.`
`+%  `
`+fprintf('\nChecking Gradients (without regularization) ... \n');`
`+`
`+%  Check gradients by running checkNNGradients`
`+checkCostFunction;`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ========= Part 4: Collaborative Filtering Cost Regularization ========`
`+%  Now, you should implement regularization for the cost function for `
`+%  collaborative filtering. You can implement it by adding the cost of`
`+%  regularization to the original cost computation.`
`+%  `
`+`
`+%  Evaluate cost function`
`+J = cofiCostFunc([X(:) ; Theta(:)], Y, R, num_users, num_movies, ...`
`+               num_features, 1.5);`
`+           `
`+fprintf(['Cost at loaded parameters (lambda = 1.5): %f '...`
`+         '\n(this value should be about 31.34)\n'], J);`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ======= Part 5: Collaborative Filtering Gradient Regularization ======`
`+%  Once your cost matches up with ours, you should proceed to implement `
`+%  regularization for the gradient. `
`+%`
`+`
`+%  `
`+fprintf('\nChecking Gradients (with regularization) ... \n');`
`+`
`+%  Check gradients by running checkNNGradients`
`+checkCostFunction(1.5);`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ============== Part 6: Entering ratings for a new user ===============`
`+%  Before we will train the collaborative filtering model, we will first`
`+%  add ratings that correspond to a new user that we just observed. This`
`+%  part of the code will also allow you to put in your own ratings for the`
`+%  movies in our dataset!`
`+%`
`+movieList = loadMovieList();`
`+`
`+%  Initialize my ratings`
`+my_ratings = zeros(1682, 1);`
`+`
`+% Check the file movie_idx.txt for id of each movie in our dataset`
`+% For example, Toy Story (1995) has ID 1, so to rate it "4", you can set`
`+my_ratings(1) = 4;`
`+`
`+% Or suppose did not enjoy Silence of the Lambs (1991), you can set`
`+my_ratings(98) = 2;`
`+`
`+% We have selected a few movies we liked / did not like and the ratings we`
`+% gave are as follows:`
`+my_ratings(7) = 3;`
`+my_ratings(12)= 5;`
`+my_ratings(54) = 4;`
`+my_ratings(64)= 5;`
`+my_ratings(66)= 3;`
`+my_ratings(69) = 5;`
`+my_ratings(183) = 4;`
`+my_ratings(226) = 5;`
`+my_ratings(355)= 5;`
`+`
`+fprintf('\n\nNew user ratings:\n');`
`+for i = 1:length(my_ratings)`
`+    if my_ratings(i) > 0 `
`+        fprintf('Rated %d for %s\n', my_ratings(i), ...`
`+                 movieList{i});`
`+    end`
`+end`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+`
`+%% ================== Part 7: Learning Movie Ratings ====================`
`+%  Now, you will train the collaborative filtering model on a movie rating `
`+%  dataset of 1682 movies and 943 users`
`+%`
`+`
`+fprintf('\nTraining collaborative filtering...\n');`
`+`
`+%  Load data`
`+load('ex8_movies.mat');`
`+`
`+%  Y is a 1682x943 matrix, containing ratings (1-5) of 1682 movies by `
`+%  943 users`
`+%`
`+%  R is a 1682x943 matrix, where R(i,j) = 1 if and only if user j gave a`
`+%  rating to movie i`
`+`
`+%  Add our own ratings to the data matrix`
`+Y = [my_ratings Y];`
`+R = [(my_ratings ~= 0) R];`
`+`
`+%  Normalize Ratings`
`+[Ynorm, Ymean] = normalizeRatings(Y, R);`
`+`
`+%  Useful Values`
`+num_users = size(Y, 2);`
`+num_movies = size(Y, 1);`
`+num_features = 10;`
`+`
`+% Set Initial Parameters (Theta, X)`
`+X = randn(num_movies, num_features);`
`+Theta = randn(num_users, num_features);`
`+`
`+initial_parameters = [X(:); Theta(:)];`
`+`
`+% Set options for fmincg`
`+options = optimset('GradObj', 'on', 'MaxIter', 100);`
`+`
`+% Set Regularization`
`+lambda = 10;`
`+theta = fmincg (@(t)(cofiCostFunc(t, Y, R, num_users, num_movies, ...`
`+                                num_features, lambda)), ...`
`+                initial_parameters, options);`
`+`
`+% Unfold the returned theta back into U and W`
`+X = reshape(theta(1:num_movies*num_features), num_movies, num_features);`
`+Theta = reshape(theta(num_movies*num_features+1:end), ...`
`+                num_users, num_features);`
`+`
`+fprintf('Recommender system learning completed.\n');`
`+`
`+fprintf('\nProgram paused. Press enter to continue.\n');`
`+pause;`
`+`
`+%% ================== Part 8: Recommendation for you ====================`
`+%  After training the model, you can now make recommendations by computing`
`+%  the predictions matrix.`
`+%`
`+`
`+p = X * Theta';`
`+my_predictions = p(:,1) + Ymean;`
`+`
`+movieList = loadMovieList();`
`+`
`+[r, ix] = sort(my_predictions, 'descend');`
`+fprintf('\nTop recommendations for you:\n');`
`+for i=1:10`
`+    j = ix(i);`
`+    fprintf('Predicting rating %.1f for movie %s\n', my_predictions(j), ...`
`+            movieList{j});`
`+end`
`+`
`+fprintf('\n\nOriginal ratings provided:\n');`
`+for i = 1:length(my_ratings)`
`+    if my_ratings(i) > 0 `
`+        fprintf('Rated %d for %s\n', my_ratings(i), ...`
`+                 movieList{i});`
`+    end`
`+end`

# File ex8/fmincg.m

`+function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)`
`+% Minimize a continuous differentialble multivariate function. Starting point`
`+% is given by "X" (D by 1), and the function named in the string "f", must`
`+% return a function value and a vector of partial derivatives. The Polack-`
`+% Ribiere flavour of conjugate gradients is used to compute search directions,`
`+% and a line search using quadratic and cubic polynomial approximations and the`
`+% Wolfe-Powell stopping criteria is used together with the slope ratio method`
`+% for guessing initial step sizes. Additionally a bunch of checks are made to`
`+% make sure that exploration is taking place and that extrapolation will not`
`+% be unboundedly large. The "length" gives the length of the run: if it is`
`+% positive, it gives the maximum number of line searches, if negative its`
`+% absolute gives the maximum allowed number of function evaluations. You can`
`+% (optionally) give "length" a second component, which will indicate the`
`+% reduction in function value to be expected in the first line-search (defaults`
`+% to 1.0). The function returns when either its length is up, or if no further`
`+% progress can be made (ie, we are at a minimum, or so close that due to`
`+% numerical problems, we cannot get any closer). If the function terminates`
`+% within a few iterations, it could be an indication that the function value`
`+% and derivatives are not consistent (ie, there may be a bug in the`
`+% implementation of your "f" function). The function returns the found`
`+% solution "X", a vector of function values "fX" indicating the progress made`
`+% and "i" the number of iterations (line searches or function evaluations,`
`+% depending on the sign of "length") used.`
`+%`
`+% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)`
`+%`
`+% See also: checkgrad `
`+%`
`+% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13`
`+%`
`+%`
`+% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen`
`+% `
`+% Permission is granted for anyone to copy, use, or modify these`
`+% programs and accompanying documents for purposes of research or`
`+% education, provided this copyright notice is retained, and note is`
`+% made of any changes that have been made.`
`+% `
`+% These programs and documents are distributed without any warranty,`
`+% express or implied.  As the programs were written for research`
`+% purposes only, they have not been tested to the degree that would be`
`+% advisable in any important application.  All use of these programs is`
`+% entirely at the user's own risk.`
`+%`
`+% [ml-class] Changes Made:`
`+% 1) Function name and argument specifications`
`+% 2) Output display`
`+%`
`+`
`+% Read options`
`+if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')`
`+    length = options.MaxIter;`
`+else`
`+    length = 100;`
`+end`
`+`
`+`
`+RHO = 0.01;                            % a bunch of constants for line searches`
`+SIG = 0.5;       % RHO and SIG are the constants in the Wolfe-Powell conditions`
`+INT = 0.1;    % don't reevaluate within 0.1 of the limit of the current bracket`
`+EXT = 3.0;                    % extrapolate maximum 3 times the current bracket`
`+MAX = 20;                         % max 20 function evaluations per line search`
`+RATIO = 100;                                      % maximum allowed slope ratio`
`+`
`+argstr = ['feval(f, X'];                      % compose string used to call function`
`+for i = 1:(nargin - 3)`
`+  argstr = [argstr, ',P', int2str(i)];`
`+end`
`+argstr = [argstr, ')'];`
`+`
`+if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end`
`+S=['Iteration '];`
`+`
`+i = 0;                                            % zero the run length counter`
`+ls_failed = 0;                             % no previous line search has failed`
`+fX = [];`
`+[f1 df1] = eval(argstr);                      % get function value and gradient`
`+i = i + (length<0);                                            % count epochs?!`
`+s = -df1;                                        % search direction is steepest`
`+d1 = -s'*s;                                                 % this is the slope`
`+z1 = red/(1-d1);                                  % initial step is red/(|s|+1)`
`+`
`+while i < abs(length)                                      % while not finished`
`+  i = i + (length>0);                                      % count iterations?!`
`+`
`+  X0 = X; f0 = f1; df0 = df1;                   % make a copy of current values`
`+  X = X + z1*s;                                             % begin line search`
`+  [f2 df2] = eval(argstr);`
`+  i = i + (length<0);                                          % count epochs?!`