Commits

Miki Tebeka committed 2f0307d

ex3 using pybrain

  • Participants
  • Parent commits 49428a4

Comments (0)

Files changed (18)

ex2-1.py

-# coding=utf8
-
-import matplotlib.pylab as plt
-import numpy as np
-from sklearn import linear_model
-
-
-def parse_line(line):
-    return map(float, line.split(','))
-
-
-def load():
-    return np.loadtxt('ex2/ex2data1.txt', delimiter=',')
-
-
-def load_features():
-    data = load()
-    xys = data[:,0:2]
-    ones = np.ones((len(xys), 1))
-    return np.append(ones, xys, 1), data[:,2]
-
-
-def sigmoid(z):
-    return 1. / (1 + np.e**(-z))
-
-usig = np.vectorize(sigmoid)
-
-
-def diff(theta, xi, yi):
-    ht = xi * theta.T
-    return -np.log(ht) if yi else np.log(1-ht)
-
-
-def J(theta, xs, ys):
-    d = lambda xi, yi: diff(theta, xi, yi)
-    m = float(len(xs))
-    return sum(d(xi, yi) for (xi, yi) in zip(xs, ys)) / m
-
-
-def learn_p():
-    xs, ys = load_features()
-    clf = linear_model.LogisticRegression()
-    clf.fit(xs, ys)
-
-    count = 0
-    for x, y in zip(xs, ys):
-        v = clf.predict(x)
-        if v >= 0.5 and y:
-            count += 1
-        elif v < 0.5 and not y:
-            count += 1
-
-    return float(count) / len(xs) * 100
-
-
-def plot(data):
-    passed = data[data[:,2]==1]
-    failed = data[data[:,2]==0]
-
-
-    fig = plt.figure()
-    ax = fig.add_subplot(111)
-    ax.scatter(passed[:,0], passed[:,1], color='black', marker='+',
-               label='Admitted')
-    ax.scatter(failed[:,0], failed[:,1], color='red', marker='o',
-              label='Not admitted')
-    ax.set_xlabel('Exam 1 score')
-    ax.set_ylabel('Exam 2 score')
-    plt.legend()
-    plt.show()
-
-
-if __name__ == '__main__':
-    print(learn_p())
+# coding=utf8
+
+import matplotlib.pylab as plt
+import numpy as np
+from sklearn import linear_model
+
+
+def parse_line(line):
+    return map(float, line.split(','))
+
+
+def load():
+    return np.loadtxt('ex2/ex2data1.txt', delimiter=',')
+
+
+def load_features():
+    data = load()
+    xys = data[:,0:2]
+    ones = np.ones((len(xys), 1))
+    return np.append(ones, xys, 1), data[:,2]
+
+
+def sigmoid(z):
+    return 1. / (1 + np.e**(-z))
+
+usig = np.vectorize(sigmoid)
+
+
+def diff(theta, xi, yi):
+    ht = xi * theta.T
+    return -np.log(ht) if yi else np.log(1-ht)
+
+
+def J(theta, xs, ys):
+    d = lambda xi, yi: diff(theta, xi, yi)
+    m = float(len(xs))
+    return sum(d(xi, yi) for (xi, yi) in zip(xs, ys)) / m
+
+
+def learn_p():
+    xs, ys = load_features()
+    clf = linear_model.LogisticRegression()
+    clf.fit(xs, ys)
+
+    count = 0
+    for x, y in zip(xs, ys):
+        v = clf.predict(x)
+        if v >= 0.5 and y:
+            count += 1
+        elif v < 0.5 and not y:
+            count += 1
+
+    return float(count) / len(xs) * 100
+
+
+def plot(data, xlabel='Exam 1 score', ylabel='Exam 2 score', legend=True):
+    passed = data[data[:,2]==1]
+    failed = data[data[:,2]==0]
+
+
+    fig = plt.figure()
+    ax = fig.add_subplot(111)
+    ax.scatter(passed[:,0], passed[:,1], color='black', marker='+',
+               label='Admitted')
+    ax.scatter(failed[:,0], failed[:,1], color='red', marker='o',
+              label='Not admitted')
+    ax.set_xlabel(xlabel)
+    ax.set_ylabel(ylabel)
+    if legend:
+        plt.legend()
+    plt.show()
+
+
+if __name__ == '__main__':
+    print(learn_p())
+#!/usr/bin/env python
+
+import numpy as np
+from ex2_1 import plot
+
+def load():
+    data = np.loadtxt('ex2/ex2data2.txt', delimiter=',')
+    return data[:,0:2], data[:,2]
+
+def map_feature(x1, x2):
+    '''
+    Maps the two input features to quadratic features.
+
+    Returns a new feature array with more features, comprising of
+    X1, X2, X1 ** 2, X2 ** 2, X1*X2, X1*X2 ** 2, etc...
+
+    Inputs X1, X2 must be the same size
+    '''
+    x1.shape = (x1.size, 1)
+    x2.shape = (x2.size, 1)
+    degree = 6
+    out = np.ones(shape=(x1[:, 0].size, 1))
+
+    for i in range(1, degree + 1):
+        for j in range(i + 1):
+            r = (x1 ** (i - j)) * (x2 ** j)
+            out = np.append(out, r, axis=1)
+
+    return out
+
+
+
+if __name__ == '__main__':
+    data = load()
+    plot(data, 't1', 't2')

ex3.pdf

Binary file added.
+#!/usr/bin/env python
+
+from pybrain.datasets import SupervisedDataSet
+from scipy.io import loadmat
+
+data = loadmat('ex3/ex3data1.mat')
+X, y = data['X'], data['y']
+ds = SupervisedDataSet(X.shape[1], y.shape[1])
+

ex3/displayData.m

+function [h, display_array] = displayData(X, example_width)
+%DISPLAYDATA Display 2D data in a nice grid
+%   [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
+%   stored in X in a nice grid. It returns the figure handle h and the 
+%   displayed array if requested.
+
+% Set example_width automatically if not passed in
+if ~exist('example_width', 'var') || isempty(example_width) 
+	example_width = round(sqrt(size(X, 2)));
+end
+
+% Gray Image
+colormap(gray);
+
+% Compute rows, cols
+[m n] = size(X);
+example_height = (n / example_width);
+
+% Compute number of items to display
+display_rows = floor(sqrt(m));
+display_cols = ceil(m / display_rows);
+
+% Between images padding
+pad = 1;
+
+% Setup blank display
+display_array = - ones(pad + display_rows * (example_height + pad), ...
+                       pad + display_cols * (example_width + pad));
+
+% Copy each example into a patch on the display array
+curr_ex = 1;
+for j = 1:display_rows
+	for i = 1:display_cols
+		if curr_ex > m, 
+			break; 
+		end
+		% Copy the patch
+		
+		% Get the max value of the patch
+		max_val = max(abs(X(curr_ex, :)));
+		display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...
+		              pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...
+						reshape(X(curr_ex, :), example_height, example_width) / max_val;
+		curr_ex = curr_ex + 1;
+	end
+	if curr_ex > m, 
+		break; 
+	end
+end
+
+% Display Image
+h = imagesc(display_array, [-1 1]);
+
+% Do not show axis
+axis image off
+
+drawnow;
+
+end
+%% Machine Learning Online Class - Exercise 3 | Part 1: One-vs-all
+
+%  Instructions
+%  ------------
+% 
+%  This file contains code that helps you get started on the
+%  linear exercise. You will need to complete the following functions 
+%  in this exericse:
+%
+%     lrCostFunction.m (logistic regression cost function)
+%     oneVsAll.m
+%     predictOneVsAll.m
+%     predict.m
+%
+%  For this exercise, you will not need to change any code in this file,
+%  or any other files other than those mentioned above.
+%
+
+%% Initialization
+clear ; close all; clc
+
+%% Setup the parameters you will use for this part of the exercise
+input_layer_size  = 400;  % 20x20 Input Images of Digits
+num_labels = 10;          % 10 labels, from 1 to 10   
+                          % (note that we have mapped "0" to label 10)
+
+%% =========== Part 1: Loading and Visualizing Data =============
+%  We start the exercise by first loading and visualizing the dataset. 
+%  You will be working with a dataset that contains handwritten digits.
+%
+
+% Load Training Data
+fprintf('Loading and Visualizing Data ...\n')
+
+load('ex3data1.mat'); % training data stored in arrays X, y
+m = size(X, 1);
+
+% Randomly select 100 data points to display
+rand_indices = randperm(m);
+sel = X(rand_indices(1:100), :);
+
+displayData(sel);
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+%% ============ Part 2: Vectorize Logistic Regression ============
+%  In this part of the exercise, you will reuse your logistic regression
+%  code from the last exercise. You task here is to make sure that your
+%  regularized logistic regression implementation is vectorized. After
+%  that, you will implement one-vs-all classification for the handwritten
+%  digit dataset.
+%
+
+fprintf('\nTraining One-vs-All Logistic Regression...\n')
+
+lambda = 0.1;
+[all_theta] = oneVsAll(X, y, num_labels, lambda);
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% ================ Part 3: Predict for One-Vs-All ================
+%  After ...
+pred = predictOneVsAll(all_theta, X);
+
+fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
+
+%% Machine Learning Online Class - Exercise 3 | Part 2: Neural Networks
+
+%  Instructions
+%  ------------
+% 
+%  This file contains code that helps you get started on the
+%  linear exercise. You will need to complete the following functions 
+%  in this exericse:
+%
+%     lrCostFunction.m (logistic regression cost function)
+%     oneVsAll.m
+%     predictOneVsAll.m
+%     predict.m
+%
+%  For this exercise, you will not need to change any code in this file,
+%  or any other files other than those mentioned above.
+%
+
+%% Initialization
+clear ; close all; clc
+
+%% Setup the parameters you will use for this exercise
+input_layer_size  = 400;  % 20x20 Input Images of Digits
+hidden_layer_size = 25;   % 25 hidden units
+num_labels = 10;          % 10 labels, from 1 to 10   
+                          % (note that we have mapped "0" to label 10)
+
+%% =========== Part 1: Loading and Visualizing Data =============
+%  We start the exercise by first loading and visualizing the dataset. 
+%  You will be working with a dataset that contains handwritten digits.
+%
+
+% Load Training Data
+fprintf('Loading and Visualizing Data ...\n')
+
+load('ex3data1.mat');
+m = size(X, 1);
+
+% Randomly select 100 data points to display
+sel = randperm(size(X, 1));
+sel = sel(1:100);
+
+displayData(X(sel, :));
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+%% ================ Part 2: Loading Pameters ================
+% In this part of the exercise, we load some pre-initialized 
+% neural network parameters.
+
+fprintf('\nLoading Saved Neural Network Parameters ...\n')
+
+% Load the weights into variables Theta1 and Theta2
+load('ex3weights.mat');
+
+%% ================= Part 3: Implement Predict =================
+%  After training the neural network, we would like to use it to predict
+%  the labels. You will now implement the "predict" function to use the
+%  neural network to predict the labels of the training set. This lets
+%  you compute the training set accuracy.
+
+pred = predict(Theta1, Theta2, X);
+
+fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+%  To give you an idea of the network's output, you can also run
+%  through the examples one at the a time to see what it is predicting.
+
+%  Randomly permute examples
+rp = randperm(m);
+
+for i = 1:m
+    % Display 
+    fprintf('\nDisplaying Example Image\n');
+    displayData(X(rp(i), :));
+
+    pred = predict(Theta1, Theta2, X(rp(i),:));
+    fprintf('\nNeural Network Prediction: %d (digit %d)\n', pred, mod(pred, 10));
+    
+    % Pause
+    fprintf('Program paused. Press enter to continue.\n');
+    pause;
+end
+

ex3/ex3data1.mat

Binary file added.

ex3/ex3weights.mat

Binary file added.
+function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
+% Minimize a continuous differentialble multivariate function. Starting point
+% is given by "X" (D by 1), and the function named in the string "f", must
+% return a function value and a vector of partial derivatives. The Polack-
+% Ribiere flavour of conjugate gradients is used to compute search directions,
+% and a line search using quadratic and cubic polynomial approximations and the
+% Wolfe-Powell stopping criteria is used together with the slope ratio method
+% for guessing initial step sizes. Additionally a bunch of checks are made to
+% make sure that exploration is taking place and that extrapolation will not
+% be unboundedly large. The "length" gives the length of the run: if it is
+% positive, it gives the maximum number of line searches, if negative its
+% absolute gives the maximum allowed number of function evaluations. You can
+% (optionally) give "length" a second component, which will indicate the
+% reduction in function value to be expected in the first line-search (defaults
+% to 1.0). The function returns when either its length is up, or if no further
+% progress can be made (ie, we are at a minimum, or so close that due to
+% numerical problems, we cannot get any closer). If the function terminates
+% within a few iterations, it could be an indication that the function value
+% and derivatives are not consistent (ie, there may be a bug in the
+% implementation of your "f" function). The function returns the found
+% solution "X", a vector of function values "fX" indicating the progress made
+% and "i" the number of iterations (line searches or function evaluations,
+% depending on the sign of "length") used.
+%
+% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
+%
+% See also: checkgrad 
+%
+% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
+%
+%
+% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
+% 
+% Permission is granted for anyone to copy, use, or modify these
+% programs and accompanying documents for purposes of research or
+% education, provided this copyright notice is retained, and note is
+% made of any changes that have been made.
+% 
+% These programs and documents are distributed without any warranty,
+% express or implied.  As the programs were written for research
+% purposes only, they have not been tested to the degree that would be
+% advisable in any important application.  All use of these programs is
+% entirely at the user's own risk.
+%
+% [ml-class] Changes Made:
+% 1) Function name and argument specifications
+% 2) Output display
+%
+
+% Read options
+if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')
+    length = options.MaxIter;
+else
+    length = 100;
+end
+
+
+RHO = 0.01;                            % a bunch of constants for line searches
+SIG = 0.5;       % RHO and SIG are the constants in the Wolfe-Powell conditions
+INT = 0.1;    % don't reevaluate within 0.1 of the limit of the current bracket
+EXT = 3.0;                    % extrapolate maximum 3 times the current bracket
+MAX = 20;                         % max 20 function evaluations per line search
+RATIO = 100;                                      % maximum allowed slope ratio
+
+argstr = ['feval(f, X'];                      % compose string used to call function
+for i = 1:(nargin - 3)
+  argstr = [argstr, ',P', int2str(i)];
+end
+argstr = [argstr, ')'];
+
+if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end
+S=['Iteration '];
+
+i = 0;                                            % zero the run length counter
+ls_failed = 0;                             % no previous line search has failed
+fX = [];
+[f1 df1] = eval(argstr);                      % get function value and gradient
+i = i + (length<0);                                            % count epochs?!
+s = -df1;                                        % search direction is steepest
+d1 = -s'*s;                                                 % this is the slope
+z1 = red/(1-d1);                                  % initial step is red/(|s|+1)
+
+while i < abs(length)                                      % while not finished
+  i = i + (length>0);                                      % count iterations?!
+
+  X0 = X; f0 = f1; df0 = df1;                   % make a copy of current values
+  X = X + z1*s;                                             % begin line search
+  [f2 df2] = eval(argstr);
+  i = i + (length<0);                                          % count epochs?!
+  d2 = df2'*s;
+  f3 = f1; d3 = d1; z3 = -z1;             % initialize point 3 equal to point 1
+  if length>0, M = MAX; else M = min(MAX, -length-i); end
+  success = 0; limit = -1;                     % initialize quanteties
+  while 1
+    while ((f2 > f1+z1*RHO*d1) | (d2 > -SIG*d1)) & (M > 0) 
+      limit = z1;                                         % tighten the bracket
+      if f2 > f1
+        z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3);                 % quadratic fit
+      else
+        A = 6*(f2-f3)/z3+3*(d2+d3);                                 % cubic fit
+        B = 3*(f3-f2)-z3*(d3+2*d2);
+        z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A;       % numerical error possible - ok!
+      end
+      if isnan(z2) | isinf(z2)
+        z2 = z3/2;                  % if we had a numerical problem then bisect
+      end
+      z2 = max(min(z2, INT*z3),(1-INT)*z3);  % don't accept too close to limits
+      z1 = z1 + z2;                                           % update the step
+      X = X + z2*s;
+      [f2 df2] = eval(argstr);
+      M = M - 1; i = i + (length<0);                           % count epochs?!
+      d2 = df2'*s;
+      z3 = z3-z2;                    % z3 is now relative to the location of z2
+    end
+    if f2 > f1+z1*RHO*d1 | d2 > -SIG*d1
+      break;                                                % this is a failure
+    elseif d2 > SIG*d1
+      success = 1; break;                                             % success
+    elseif M == 0
+      break;                                                          % failure
+    end
+    A = 6*(f2-f3)/z3+3*(d2+d3);                      % make cubic extrapolation
+    B = 3*(f3-f2)-z3*(d3+2*d2);
+    z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3));        % num. error possible - ok!
+    if ~isreal(z2) | isnan(z2) | isinf(z2) | z2 < 0   % num prob or wrong sign?
+      if limit < -0.5                               % if we have no upper limit
+        z2 = z1 * (EXT-1);                 % the extrapolate the maximum amount
+      else
+        z2 = (limit-z1)/2;                                   % otherwise bisect
+      end
+    elseif (limit > -0.5) & (z2+z1 > limit)          % extraplation beyond max?
+      z2 = (limit-z1)/2;                                               % bisect
+    elseif (limit < -0.5) & (z2+z1 > z1*EXT)       % extrapolation beyond limit
+      z2 = z1*(EXT-1.0);                           % set to extrapolation limit
+    elseif z2 < -z3*INT
+      z2 = -z3*INT;
+    elseif (limit > -0.5) & (z2 < (limit-z1)*(1.0-INT))   % too close to limit?
+      z2 = (limit-z1)*(1.0-INT);
+    end
+    f3 = f2; d3 = d2; z3 = -z2;                  % set point 3 equal to point 2
+    z1 = z1 + z2; X = X + z2*s;                      % update current estimates
+    [f2 df2] = eval(argstr);
+    M = M - 1; i = i + (length<0);                             % count epochs?!
+    d2 = df2'*s;
+  end                                                      % end of line search
+
+  if success                                         % if line search succeeded
+    f1 = f2; fX = [fX' f1]';
+    fprintf('%s %4i | Cost: %4.6e\r', S, i, f1);
+    s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2;      % Polack-Ribiere direction
+    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
+    d2 = df1'*s;
+    if d2 > 0                                      % new slope must be negative
+      s = -df1;                              % otherwise use steepest direction
+      d2 = -s'*s;    
+    end
+    z1 = z1 * min(RATIO, d1/(d2-realmin));          % slope ratio but max RATIO
+    d1 = d2;
+    ls_failed = 0;                              % this line search did not fail
+  else
+    X = X0; f1 = f0; df1 = df0;  % restore point from before failed line search
+    if ls_failed | i > abs(length)          % line search failed twice in a row
+      break;                             % or we ran out of time, so we give up
+    end
+    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
+    s = -df1;                                                    % try steepest
+    d1 = -s'*s;
+    z1 = 1/(1-d1);                     
+    ls_failed = 1;                                    % this line search failed
+  end
+  if exist('OCTAVE_VERSION')
+    fflush(stdout);
+  end
+end
+fprintf('\n');

ex3/lrCostFunction.m

+function [J, grad] = lrCostFunction(theta, X, y, lambda)
+%LRCOSTFUNCTION Compute cost and gradient for logistic regression with 
+%regularization
+%   J = LRCOSTFUNCTION(theta, X, y, lambda) computes the cost of using
+%   theta as the parameter for regularized logistic regression and the
+%   gradient of the cost w.r.t. to the parameters. 
+
+% Initialize some useful values
+m = length(y); % number of training examples
+
+% You need to return the following variables correctly 
+J = 0;
+grad = zeros(size(theta));
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: Compute the cost of a particular choice of theta.
+%               You should set J to the cost.
+%               Compute the partial derivatives and set grad to the partial
+%               derivatives of the cost w.r.t. each parameter in theta
+%
+% Hint: The computation of the cost function and gradients can be
+%       efficiently vectorized. For example, consider the computation
+%
+%           sigmoid(X * theta)
+%
+%       Each row of the resulting matrix will contain the value of the
+%       prediction for that example. You can make use of this to vectorize
+%       the cost function and gradient computations. 
+%
+% Hint: When computing the gradient of the regularized cost function, 
+%       there're many possible vectorized solutions, but one solution
+%       looks like:
+%           grad = (unregularized gradient for logistic regression)
+%           temp = theta; 
+%           temp(1) = 0;   % because we don't add anything for j = 0  
+%           grad = grad + YOUR_CODE_HERE (using the temp variable)
+%
+
+
+
+
+
+
+
+
+
+
+% =============================================================
+
+grad = grad(:);
+
+end
+function [all_theta] = oneVsAll(X, y, num_labels, lambda)
+%ONEVSALL trains multiple logistic regression classifiers and returns all
+%the classifiers in a matrix all_theta, where the i-th row of all_theta 
+%corresponds to the classifier for label i
+%   [all_theta] = ONEVSALL(X, y, num_labels, lambda) trains num_labels
+%   logisitc regression classifiers and returns each of these classifiers
+%   in a matrix all_theta, where the i-th row of all_theta corresponds 
+%   to the classifier for label i
+
+% Some useful variables
+m = size(X, 1);
+n = size(X, 2);
+
+% You need to return the following variables correctly 
+all_theta = zeros(num_labels, n + 1);
+
+% Add ones to the X data matrix
+X = [ones(m, 1) X];
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: You should complete the following code to train num_labels
+%               logistic regression classifiers with regularization
+%               parameter lambda. 
+%
+% Hint: theta(:) will return a column vector.
+%
+% Hint: You can use y == c to obtain a vector of 1's and 0's that tell use 
+%       whether the ground truth is true/false for this class.
+%
+% Note: For this assignment, we recommend using fmincg to optimize the cost
+%       function. It is okay to use a for-loop (for c = 1:num_labels) to
+%       loop over the different classes.
+%
+%       fmincg works similarly to fminunc, but is more efficient when we
+%       are dealing with large number of parameters.
+%
+% Example Code for fmincg:
+%
+%     % Set Initial theta
+%     initial_theta = zeros(n + 1, 1);
+%     
+%     % Set options for fminunc
+%     options = optimset('GradObj', 'on', 'MaxIter', 50);
+% 
+%     % Run fmincg to obtain the optimal theta
+%     % This function will return theta and the cost 
+%     [theta] = ...
+%         fmincg (@(t)(lrCostFunction(t, X, (y == c), lambda)), ...
+%                 initial_theta, options);
+%
+
+
+
+
+
+
+
+
+
+
+
+
+% =========================================================================
+
+
+end
+function p = predict(Theta1, Theta2, X)
+%PREDICT Predict the label of an input given a trained neural network
+%   p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the
+%   trained weights of a neural network (Theta1, Theta2)
+
+% Useful values
+m = size(X, 1);
+num_labels = size(Theta2, 1);
+
+% You need to return the following variables correctly 
+p = zeros(size(X, 1), 1);
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: Complete the following code to make predictions using
+%               your learned neural network. You should set p to a 
+%               vector containing labels between 1 to num_labels.
+%
+% Hint: The max function might come in useful. In particular, the max
+%       function can also return the index of the max element, for more
+%       information see 'help max'. If your examples are in rows, then, you
+%       can use max(A, [], 2) to obtain the max for each row.
+%
+
+
+
+
+
+
+
+
+
+% =========================================================================
+
+
+end

ex3/predictOneVsAll.m

+function p = predictOneVsAll(all_theta, X)
+%PREDICT Predict the label for a trained one-vs-all classifier. The labels 
+%are in the range 1..K, where K = size(all_theta, 1). 
+%  p = PREDICTONEVSALL(all_theta, X) will return a vector of predictions
+%  for each example in the matrix X. Note that X contains the examples in
+%  rows. all_theta is a matrix where the i-th row is a trained logistic
+%  regression theta vector for the i-th class. You should set p to a vector
+%  of values from 1..K (e.g., p = [1; 3; 1; 2] predicts classes 1, 3, 1, 2
+%  for 4 examples) 
+
+m = size(X, 1);
+num_labels = size(all_theta, 1);
+
+% You need to return the following variables correctly 
+p = zeros(size(X, 1), 1);
+
+% Add ones to the X data matrix
+X = [ones(m, 1) X];
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: Complete the following code to make predictions using
+%               your learned logistic regression parameters (one-vs-all).
+%               You should set p to a vector of predictions (from 1 to
+%               num_labels).
+%
+% Hint: This code can be done all vectorized using the max function.
+%       In particular, the max function can also return the index of the 
+%       max element, for more information see 'help max'. If your examples 
+%       are in rows, then, you can use max(A, [], 2) to obtain the max 
+%       for each row.
+%       
+
+
+
+
+
+
+
+% =========================================================================
+
+
+end
+function g = sigmoid(z)
+%SIGMOID Compute sigmoid functoon
+%   J = SIGMOID(z) computes the sigmoid of z.
+
+g = 1.0 ./ (1.0 + exp(-z));
+end
+function submit(partId, webSubmit)
+%SUBMIT Submit your code and output to the ml-class servers
+%   SUBMIT() will connect to the ml-class server and submit your solution
+
+  fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ...
+          homework_id());
+  if ~exist('partId', 'var') || isempty(partId)
+    partId = promptPart();
+  end
+
+  if ~exist('webSubmit', 'var') || isempty(webSubmit)
+    webSubmit = 0; % submit directly by default 
+  end
+
+  % Check valid partId
+  partNames = validParts();
+  if ~isValidPartId(partId)
+    fprintf('!! Invalid homework part selected.\n');
+    fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1);
+    fprintf('!! Submission Cancelled\n');
+    return
+  end
+
+  if ~exist('ml_login_data.mat','file')
+    [login password] = loginPrompt();
+    save('ml_login_data.mat','login','password');
+  else  
+    load('ml_login_data.mat');
+    [login password] = quickLogin(login, password);
+    save('ml_login_data.mat','login','password');
+  end
+
+  if isempty(login)
+    fprintf('!! Submission Cancelled\n');
+    return
+  end
+
+  fprintf('\n== Connecting to ml-class ... '); 
+  if exist('OCTAVE_VERSION') 
+    fflush(stdout);
+  end
+
+  % Setup submit list
+  if partId == numel(partNames) + 1
+    submitParts = 1:numel(partNames);
+  else
+    submitParts = [partId];
+  end
+
+  for s = 1:numel(submitParts)
+    thisPartId = submitParts(s);
+    if (~webSubmit) % submit directly to server
+      [login, ch, signature, auxstring] = getChallenge(login, thisPartId);
+      if isempty(login) || isempty(ch) || isempty(signature)
+        % Some error occured, error string in first return element.
+        fprintf('\n!! Error: %s\n\n', login);
+        return
+      end
+
+      % Attempt Submission with Challenge
+      ch_resp = challengeResponse(login, password, ch);
+
+      [result, str] = submitSolution(login, ch_resp, thisPartId, ...
+             output(thisPartId, auxstring), source(thisPartId), signature);
+
+      partName = partNames{thisPartId};
+
+      fprintf('\n== [ml-class] Submitted Assignment %s - Part %d - %s\n', ...
+        homework_id(), thisPartId, partName);
+      fprintf('== %s\n', strtrim(str));
+
+      if exist('OCTAVE_VERSION')
+        fflush(stdout);
+      end
+    else
+      [result] = submitSolutionWeb(login, thisPartId, output(thisPartId), ...
+                            source(thisPartId));
+      result = base64encode(result);
+
+      fprintf('\nSave as submission file [submit_ex%s_part%d.txt (enter to accept default)]:', ...
+        homework_id(), thisPartId);
+      saveAsFile = input('', 's');
+      if (isempty(saveAsFile))
+        saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), thisPartId);
+      end
+
+      fid = fopen(saveAsFile, 'w');
+      if (fid)
+        fwrite(fid, result);
+        fclose(fid);
+        fprintf('\nSaved your solutions to %s.\n\n', saveAsFile);
+        fprintf(['You can now submit your solutions through the web \n' ...
+                 'form in the programming exercises. Select the corresponding \n' ...
+                 'programming exercise to access the form.\n']);
+
+      else
+        fprintf('Unable to save to %s\n\n', saveAsFile);
+        fprintf(['You can create a submission file by saving the \n' ...
+                 'following text in a file: (press enter to continue)\n\n']);
+        pause;
+        fprintf(result);
+      end
+    end
+  end
+end
+
+% ================== CONFIGURABLES FOR EACH HOMEWORK ==================
+
+function id = homework_id() 
+  id = '3';
+end
+
+function [partNames] = validParts()
+  partNames = { 'Vectorized Logistic Regression ', ...
+                'One-vs-all classifier training', ...
+                'One-vs-all classifier prediction', ...
+                'Neural network prediction function' ...
+                };
+end
+
+function srcs = sources()
+  % Separated by part
+  srcs = { { 'lrCostFunction.m' }, ...
+           { 'oneVsAll.m' }, ...
+           { 'predictOneVsAll.m' }, ...
+           { 'predict.m' } };
+end
+
+function out = output(partId, auxdata)
+  % Random Test Cases
+  X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))'];
+  y = sin(X(:,1) + X(:,2)) > 0;
+  Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ...
+          1 1 ;  1 2 ;  2 1 ; 2 2 ; ...
+         -1 1 ;  -1 2 ;  -2 1 ; -2 2 ; ...
+          1 -1 ; 1 -2 ;  -2 -1 ; -2 -2 ];
+  ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]';
+  t1 = sin(reshape(1:2:24, 4, 3));
+  t2 = cos(reshape(1:2:40, 4, 5));
+
+  if partId == 1
+    [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1);
+    out = sprintf('%0.5f ', J);
+    out = [out sprintf('%0.5f ', grad)];
+  elseif partId == 2
+    out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1));
+  elseif partId == 3
+    out = sprintf('%0.5f ', predictOneVsAll(t1, Xm));
+  elseif partId == 4
+    out = sprintf('%0.5f ', predict(t1, t2, Xm));
+  end 
+end
+
+
+% ====================== SERVER CONFIGURATION ===========================
+
+% ***************** REMOVE -staging WHEN YOU DEPLOY *********************
+function url = site_url()
+  url = 'http://www.coursera.org/ml';
+end
+
+function url = challenge_url()
+  url = [site_url() '/assignment/challenge'];
+end
+
+function url = submit_url()
+  url = [site_url() '/assignment/submit'];
+end
+
+% ========================= CHALLENGE HELPERS =========================
+
+function src = source(partId)
+  src = '';
+  src_files = sources();
+  if partId <= numel(src_files)
+      flist = src_files{partId};
+      for i = 1:numel(flist)
+          fid = fopen(flist{i});
+          if (fid == -1) 
+            error('Error opening %s (is it missing?)', flist{i});
+          end
+          line = fgets(fid);
+          while ischar(line)
+            src = [src line];            
+            line = fgets(fid);
+          end
+          fclose(fid);
+          src = [src '||||||||'];
+      end
+  end
+end
+
+function ret = isValidPartId(partId)
+  partNames = validParts();
+  ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1);
+end
+
+function partId = promptPart()
+  fprintf('== Select which part(s) to submit:\n');
+  partNames = validParts();
+  srcFiles = sources();
+  for i = 1:numel(partNames)
+    fprintf('==   %d) %s [', i, partNames{i});
+    fprintf(' %s ', srcFiles{i}{:});
+    fprintf(']\n');
+  end
+  fprintf('==   %d) All of the above \n==\nEnter your choice [1-%d]: ', ...
+          numel(partNames) + 1, numel(partNames) + 1);
+  selPart = input('', 's');
+  partId = str2num(selPart);
+  if ~isValidPartId(partId)
+    partId = -1;
+  end
+end
+
+function [email,ch,signature,auxstring] = getChallenge(email, part)
+  str = urlread(challenge_url(), 'post', {'email_address', email, 'assignment_part_sid', [homework_id() '-' num2str(part)], 'response_encoding', 'delim'});
+
+  str = strtrim(str);
+  r = struct;
+  while(numel(str) > 0)
+    [f, str] = strtok (str, '|');
+    [v, str] = strtok (str, '|');
+    r = setfield(r, f, v);
+  end
+
+  email = getfield(r, 'email_address');
+  ch = getfield(r, 'challenge_key');
+  signature = getfield(r, 'state');
+  auxstring = getfield(r, 'challenge_aux_data');
+end
+
+function [result, str] = submitSolutionWeb(email, part, output, source)
+
+  result = ['{"assignment_part_sid":"' base64encode([homework_id() '-' num2str(part)], '') '",' ...
+            '"email_address":"' base64encode(email, '') '",' ...
+            '"submission":"' base64encode(output, '') '",' ...
+            '"submission_aux":"' base64encode(source, '') '"' ...
+            '}'];
+  str = 'Web-submission';
+end
+
+function [result, str] = submitSolution(email, ch_resp, part, output, ...
+                                        source, signature)
+
+  params = {'assignment_part_sid', [homework_id() '-' num2str(part)], ...
+            'email_address', email, ...
+            'submission', base64encode(output, ''), ...
+            'submission_aux', base64encode(source, ''), ...
+            'challenge_response', ch_resp, ...
+            'state', signature};
+
+  str = urlread(submit_url(), 'post', params);
+
+  % Parse str to read for success / failure
+  result = 0;
+
+end
+
+% =========================== LOGIN HELPERS ===========================
+
+function [login password] = loginPrompt()
+  % Prompt for password
+  [login password] = basicPrompt();
+  
+  if isempty(login) || isempty(password)
+    login = []; password = [];
+  end
+end
+
+
+function [login password] = basicPrompt()
+  login = input('Login (Email address): ', 's');
+  password = input('Password: ', 's');
+end
+
+function [login password] = quickLogin(login,password)
+  disp(['You are currently logged in as ' login '.']);
+  cont_token = input('Is this you? (y/n - type n to reenter password)','s');
+  if(isempty(cont_token) || cont_token(1)=='Y'||cont_token(1)=='y')
+    return;
+  else
+    [login password] = loginPrompt();
+  end
+end
+
+function [str] = challengeResponse(email, passwd, challenge)
+  str = sha1([challenge passwd]);
+end
+
+% =============================== SHA-1 ================================
+
+function hash = sha1(str)
+  
+  % Initialize variables
+  h0 = uint32(1732584193);
+  h1 = uint32(4023233417);
+  h2 = uint32(2562383102);
+  h3 = uint32(271733878);
+  h4 = uint32(3285377520);
+  
+  % Convert to word array
+  strlen = numel(str);
+
+  % Break string into chars and append the bit 1 to the message
+  mC = [double(str) 128];
+  mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')];
+  
+  numB = strlen * 8;
+  if exist('idivide')
+    numC = idivide(uint32(numB + 65), 512, 'ceil');
+  else
+    numC = ceil(double(numB + 65)/512);
+  end
+  numW = numC * 16;
+  mW = zeros(numW, 1, 'uint32');
+  
+  idx = 1;
+  for i = 1:4:strlen + 1
+    mW(idx) = bitor(bitor(bitor( ...
+                  bitshift(uint32(mC(i)), 24), ...
+                  bitshift(uint32(mC(i+1)), 16)), ...
+                  bitshift(uint32(mC(i+2)), 8)), ...
+                  uint32(mC(i+3)));
+    idx = idx + 1;
+  end
+  
+  % Append length of message
+  mW(numW - 1) = uint32(bitshift(uint64(numB), -32));
+  mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32));
+
+  % Process the message in successive 512-bit chs
+  for cId = 1 : double(numC)
+    cSt = (cId - 1) * 16 + 1;
+    cEnd = cId * 16;
+    ch = mW(cSt : cEnd);
+    
+    % Extend the sixteen 32-bit words into eighty 32-bit words
+    for j = 17 : 80
+      ch(j) = ch(j - 3);
+      ch(j) = bitxor(ch(j), ch(j - 8));
+      ch(j) = bitxor(ch(j), ch(j - 14));
+      ch(j) = bitxor(ch(j), ch(j - 16));
+      ch(j) = bitrotate(ch(j), 1);
+    end
+  
+    % Initialize hash value for this ch
+    a = h0;
+    b = h1;
+    c = h2;
+    d = h3;
+    e = h4;
+    
+    % Main loop
+    for i = 1 : 80
+      if(i >= 1 && i <= 20)
+        f = bitor(bitand(b, c), bitand(bitcmp(b), d));
+        k = uint32(1518500249);
+      elseif(i >= 21 && i <= 40)
+        f = bitxor(bitxor(b, c), d);
+        k = uint32(1859775393);
+      elseif(i >= 41 && i <= 60)
+        f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d));
+        k = uint32(2400959708);
+      elseif(i >= 61 && i <= 80)
+        f = bitxor(bitxor(b, c), d);
+        k = uint32(3395469782);
+      end
+      
+      t = bitrotate(a, 5);
+      t = bitadd(t, f);
+      t = bitadd(t, e);
+      t = bitadd(t, k);
+      t = bitadd(t, ch(i));
+      e = d;
+      d = c;
+      c = bitrotate(b, 30);
+      b = a;
+      a = t;
+      
+    end
+    h0 = bitadd(h0, a);
+    h1 = bitadd(h1, b);
+    h2 = bitadd(h2, c);
+    h3 = bitadd(h3, d);
+    h4 = bitadd(h4, e);
+
+  end
+
+  hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]);
+  
+  hash = lower(hash);
+
+end
+
+function ret = bitadd(iA, iB)
+  ret = double(iA) + double(iB);
+  ret = bitset(ret, 33, 0);
+  ret = uint32(ret);
+end
+
+function ret = bitrotate(iA, places)
+  t = bitshift(iA, places - 32);
+  ret = bitshift(iA, places);
+  ret = bitor(ret, t);
+end
+
+% =========================== Base64 Encoder ============================
+% Thanks to Peter John Acklam
+%
+
+function y = base64encode(x, eol)
+%BASE64ENCODE Perform base64 encoding on a string.
+%
+%   BASE64ENCODE(STR, EOL) encode the given string STR.  EOL is the line ending
+%   sequence to use; it is optional and defaults to '\n' (ASCII decimal 10).
+%   The returned encoded string is broken into lines of no more than 76
+%   characters each, and each line will end with EOL unless it is empty.  Let
+%   EOL be empty if you do not want the encoded string broken into lines.
+%
+%   STR and EOL don't have to be strings (i.e., char arrays).  The only
+%   requirement is that they are vectors containing values in the range 0-255.
+%
+%   This function may be used to encode strings into the Base64 encoding
+%   specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions).  The
+%   Base64 encoding is designed to represent arbitrary sequences of octets in a
+%   form that need not be humanly readable.  A 65-character subset
+%   ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per
+%   printable character.
+%
+%   Examples
+%   --------
+%
+%   If you want to encode a large file, you should encode it in chunks that are
+%   a multiple of 57 bytes.  This ensures that the base64 lines line up and
+%   that you do not end up with padding in the middle.  57 bytes of data fills
+%   one complete base64 line (76 == 57*4/3):
+%
+%   If ifid and ofid are two file identifiers opened for reading and writing,
+%   respectively, then you can base64 encode the data with
+%
+%      while ~feof(ifid)
+%         fwrite(ofid, base64encode(fread(ifid, 60*57)));
+%      end
+%
+%   or, if you have enough memory,
+%
+%      fwrite(ofid, base64encode(fread(ifid)));
+%
+%   See also BASE64DECODE.
+
+%   Author:      Peter John Acklam
+%   Time-stamp:  2004-02-03 21:36:56 +0100
+%   E-mail:      pjacklam@online.no
+%   URL:         http://home.online.no/~pjacklam
+
+   if isnumeric(x)
+      x = num2str(x);
+   end
+
+   % make sure we have the EOL value
+   if nargin < 2
+      eol = sprintf('\n');
+   else
+      if sum(size(eol) > 1) > 1
+         error('EOL must be a vector.');
+      end
+      if any(eol(:) > 255)
+         error('EOL can not contain values larger than 255.');
+      end
+   end
+
+   if sum(size(x) > 1) > 1
+      error('STR must be a vector.');
+   end
+
+   x   = uint8(x);
+   eol = uint8(eol);
+
+   ndbytes = length(x);                 % number of decoded bytes
+   nchunks = ceil(ndbytes / 3);         % number of chunks/groups
+   nebytes = 4 * nchunks;               % number of encoded bytes
+
+   % add padding if necessary, to make the length of x a multiple of 3
+   if rem(ndbytes, 3)
+      x(end+1 : 3*nchunks) = 0;
+   end
+
+   x = reshape(x, [3, nchunks]);        % reshape the data
+   y = repmat(uint8(0), 4, nchunks);    % for the encoded data
+
+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+   % Split up every 3 bytes into 4 pieces
+   %
+   %    aaaaaabb bbbbcccc ccdddddd
+   %
+   % to form
+   %
+   %    00aaaaaa 00bbbbbb 00cccccc 00dddddd
+   %
+   y(1,:) = bitshift(x(1,:), -2);                  % 6 highest bits of x(1,:)
+
+   y(2,:) = bitshift(bitand(x(1,:), 3), 4);        % 2 lowest bits of x(1,:)
+   y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4));   % 4 highest bits of x(2,:)
+
+   y(3,:) = bitshift(bitand(x(2,:), 15), 2);       % 4 lowest bits of x(2,:)
+   y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6));   % 2 highest bits of x(3,:)
+
+   y(4,:) = bitand(x(3,:), 63);                    % 6 lowest bits of x(3,:)
+
+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+   % Now perform the following mapping
+   %
+   %   0  - 25  ->  A-Z
+   %   26 - 51  ->  a-z
+   %   52 - 61  ->  0-9
+   %   62       ->  +
+   %   63       ->  /
+   %
+   % We could use a mapping vector like
+   %
+   %   ['A':'Z', 'a':'z', '0':'9', '+/']
+   %
+   % but that would require an index vector of class double.
+   %
+   z = repmat(uint8(0), size(y));
+   i =           y <= 25;  z(i) = 'A'      + double(y(i));
+   i = 26 <= y & y <= 51;  z(i) = 'a' - 26 + double(y(i));
+   i = 52 <= y & y <= 61;  z(i) = '0' - 52 + double(y(i));
+   i =           y == 62;  z(i) = '+';
+   i =           y == 63;  z(i) = '/';
+   y = z;
+
+   %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+   % Add padding if necessary.
+   %
+   npbytes = 3 * nchunks - ndbytes;     % number of padding bytes
+   if npbytes
+      y(end-npbytes+1 : end) = '=';     % '=' is used for padding
+   end
+
+   if isempty(eol)
+
+      % reshape to a row vector
+      y = reshape(y, [1, nebytes]);
+
+   else
+
+      nlines = ceil(nebytes / 76);      % number of lines
+      neolbytes = length(eol);          % number of bytes in eol string
+
+      % pad data so it becomes a multiple of 76 elements
+      y = [y(:) ; zeros(76 * nlines - numel(y), 1)];
+      y(nebytes + 1 : 76 * nlines) = 0;
+      y = reshape(y, 76, nlines);
+
+      % insert eol strings
+      eol = eol(:);
+      y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines));
+
+      % remove padding, but keep the last eol string
+      m = nebytes + neolbytes * (nlines - 1);
+      n = (76+neolbytes)*nlines - neolbytes;
+      y(m+1 : n) = '';
+
+      % extract and reshape to row vector
+      y = reshape(y, 1, m+neolbytes);
+
+   end
+
+   % output is a character array
+   y = char(y);
+
+end
+% submitWeb Creates files from your code and output for web submission.
+%
+%   If the submit function does not work for you, use the web-submission mechanism.
+%   Call this function to produce a file for the part you wish to submit. Then,
+%   submit the file to the class servers using the "Web Submission" button on the 
+%   Programming Exercises page on the course website.
+%
+%   You should call this function without arguments (submitWeb), to receive
+%   an interactive prompt for submission; optionally you can call it with the partID
+%   if you so wish. Make sure your working directory is set to the directory 
+%   containing the submitWeb.m file and your assignment files.
+
+function submitWeb(partId)
+  if ~exist('partId', 'var') || isempty(partId)
+    partId = [];
+  end
+  
+  submit(partId, 1);
+end
+