Commits

Jiangge Zhang committed c3c0259

extract the calculate method of network.

Comments (0)

Files changed (4)

bp.py

-#!/usr/bin/env python
-#-*- coding:utf-8 -*-
-
-from collections import namedtuple
-
-import numpy as np
-import matplotlib.pyplot as plt
-
-
-NeuronNumber = namedtuple("NeuronNumber", ["input", "hidden", "output"])
-LearningRate = namedtuple("LearningRate", ["alpha", "beta", "gamma"])
-sigmoid = lambda x: 1 / (1 + np.exp(-x))
-
-
-class BackPropagationNetwork(object):
-    """Back Propagation Neuron Network"""
-
-    def __init__(self, num, learning_rate):
-        """Initialize a neuron network.
-        :param num: named tuple `bpnn.NeuronNumber`, the numbers of neuron
-                    nodes in three layer.
-        :param learning_rate: named tuple `bpnn.LearningRate`, three rate
-                              argument alpha, beta and gamma.
-        """
-        self.num = num
-        self.learning_rate = learning_rate
-
-        # initialize weights matrix
-        self.input_to_hidden = np.random.rand(num.input, num.hidden) * 2 - 1
-        self.hidden_to_output = np.random.rand(num.hidden, num.output) * 2 - 1
-
-        # initialize thresholds
-        self.hidden_thresholds = np.random.rand(1, num.hidden) * 2 - 1
-        self.output_thresholds = np.random.rand(1, num.output) * 2 - 1
-
-        # initialize weights' error
-        self.input_to_hidden_error = np.zeros(self.input_to_hidden.shape)
-        self.hidden_to_output_error = np.zeros(self.hidden_to_output.shape)
-
-        # thresholds' error
-        self.hidden_thresholds_error = np.zeros(self.hidden_thresholds.shape)
-        self.output_thresholds_error = np.zeros(self.output_thresholds.shape)
-
-    def train(self, data, desired_result):
-        """Train the network with sample data.
-        :param data: sample data array, it should have a shape (sp_a, sp_b),
-                     sp_a been not limited and sp_b should equal to number of
-                     input layer nodes.
-        :param desired_result: the desired training result for sample data,
-                               it should hava a shape (sp_a, 1), sp_a should
-                               equal to the number of sample items.
-        """
-        # validate input np.array
-        num_of_records, num_of_input_nodes = data.shape
-        num_of_result_records, num_of_output_nodes = desired_result.shape
-        assert num_of_input_nodes == self.num.input
-        assert num_of_output_nodes == self.num.output
-        assert num_of_records == num_of_result_records
-
-        # normalize data to interval [-1, 1)
-        max_values = data.max(axis=0)
-        min_values = data.min(axis=0)
-        data = ((data - min_values) / (max_values - min_values) - 0.5) * 2
-
-        # iterate training network
-        alpha, beta, gamma = self.learning_rate  
-        evaluate_error = 0
-        for index, item in enumerate(data):
-            # desired result of current item
-            desired_result_item = desired_result[index]
-
-            # forward propagation
-            item.shape = 1, -1
-            hidden_activation = sigmoid(item.dot(self.input_to_hidden) +
-                                        self.hidden_thresholds)
-            output_activation = sigmoid(hidden_activation.dot(
-                                        self.hidden_to_output) +
-                                        self.output_thresholds)
-
-            # back propagation and calculate the errors
-            output_error = (output_activation * (1 - output_activation) *
-                            (desired_result_item - output_activation))
-            hidden_error = (hidden_activation * (1 - hidden_activation) *
-                            output_error.dot(self.hidden_to_output.T))
-
-            # adjust weights from hidden layer to output layer
-            self.hidden_to_output_error = (alpha * hidden_activation.T *
-                                           output_error + gamma *
-                                           self.hidden_to_output_error)
-            self.hidden_to_output += self.hidden_to_output_error
-
-            # adjust weights from input layer to hidden layer
-            self.input_to_hidden_error = beta * item.T.dot(hidden_error)
-            self.input_to_hidden += self.input_to_hidden_error
-
-            # adjust thresholds
-            self.hidden_thresholds_error = (beta * hidden_error + gamma *
-                                            self.hidden_thresholds_error)
-            self.hidden_thresholds += self.hidden_thresholds_error
-
-            self.output_thresholds_error = (alpha * output_error + gamma *
-                                            self.output_thresholds_error)
-            self.output_thresholds += self.output_thresholds_error
-
-            # evaluate output layer error
-            evaluate_error += (output_error ** 2).sum() * 0.5
-
-            yield evaluate_error / len(data)
-
-    def train_until(self, error_less_than, *args, **kwargs):
-        """Train the network repeatedly until error less than a threshold.
-        This is a wrap of `BackPropagationNetwork.train` method, only add a
-        argument `error_less_than`.
-        """
-        error = np.Inf
-        while error > error_less_than:
-            training = self.train(*args, **kwargs)
-            for error in training:
-                yield error
-
-# -------------
-# Training Case
-# -------------
-
-def load_and_preprocess_data(filepath):
-    """Load data from a text format file."""
-    flower = namedtuple("flower", ["typeid", "a", "b", "c", "d"])
-    with open(filepath, "r") as datafile:
-        dataset = datafile.readlines()
-    for record in dataset:
-        record = record.strip().split(" ")[1:]
-        record = (int(item) for item in record)
-        yield flower(*record)
-
-
-def main():
-    # create a neuron net
-    bpnet = BackPropagationNetwork(NeuronNumber(input=4, hidden=3, output=3),
-                                   LearningRate(0.1, 0.1, 0.85))
-
-    # load and preprocess train data
-    raw_data = load_and_preprocess_data("./data.dat")
-    raw_data = np.array(list(raw_data), dtype=np.double)
-    train_data = raw_data[:, 1:]
-    desired_result = np.array([[(1 if i == typeid else 0) for i in range(3)]
-                               for typeid in raw_data[:, 0]], dtype=np.double)
-
-    # train
-    training = bpnet.train_until(data=train_data[:115, :],
-                                 desired_result=desired_result[:115, :],
-                                 error_less_than=0.0005)
-    training_result = list(training)
-    
-    x = np.arange(len(training_result), step=10)
-    y = np.array([training_result[ix] for ix in x])
-    plt.figure(figsize=(8, 4))
-    plt.plot(x, y, label="error (x)", color="blue")
-    plt.xlabel("training times")
-    plt.ylabel("error")
-    plt.title("BP Demo")
-    plt.legend()
-    plt.show()
-
-
-if __name__ == "__main__":
-    main()
+#!/usr/bin/env python
+#-*- coding:utf-8 -*-
+
+from collections import namedtuple
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+NeuronNumber = namedtuple("NeuronNumber", ["input", "hidden", "output"])
+LearningRate = namedtuple("LearningRate", ["alpha", "beta", "gamma"])
+sigmoid = lambda x: 1 / (1 + np.exp(-x))
+
+
+class BackPropagationNetwork(object):
+    """Back Propagation Neuron Network"""
+
+    def __init__(self, num, learning_rate):
+        """Initialize a neuron network.
+        :param num: named tuple `bpann.NeuronNumber`, the numbers of neuron
+                    nodes in three layer.
+        :param learning_rate: named tuple `bpann.LearningRate`, three rate
+                              argument alpha, beta and gamma.
+        """
+        self.num = num
+        self.learning_rate = learning_rate
+
+        # initialize weights matrix
+        self.input_to_hidden = np.random.rand(num.input, num.hidden) * 2 - 1
+        self.hidden_to_output = np.random.rand(num.hidden, num.output) * 2 - 1
+
+        # initialize thresholds
+        self.hidden_thresholds = np.random.rand(1, num.hidden) * 2 - 1
+        self.output_thresholds = np.random.rand(1, num.output) * 2 - 1
+
+        # initialize weights' error
+        self.input_to_hidden_error = np.zeros(self.input_to_hidden.shape)
+        self.hidden_to_output_error = np.zeros(self.hidden_to_output.shape)
+
+        # thresholds' error
+        self.hidden_thresholds_error = np.zeros(self.hidden_thresholds.shape)
+        self.output_thresholds_error = np.zeros(self.output_thresholds.shape)
+
+    def train(self, data, desired_result):
+        """Train the network with sample data.
+        :param data: sample data array, it should have a shape (sp_a, sp_b),
+                     sp_a been not limited and sp_b should equal to number of
+                     input layer nodes.
+        :param desired_result: the desired training result for sample data,
+                               it should hava a shape (sp_a, 1), sp_a should
+                               equal to the number of sample items.
+        """
+        # validate input np.array
+        num_of_records, num_of_input_nodes = data.shape
+        num_of_result_records, num_of_output_nodes = desired_result.shape
+        assert num_of_input_nodes == self.num.input
+        assert num_of_output_nodes == self.num.output
+        assert num_of_records == num_of_result_records
+
+        # normalize data to interval [-1, 1)
+        max_values = data.max(axis=0)
+        min_values = data.min(axis=0)
+        data = ((data - min_values) / (max_values - min_values) - 0.5) * 2
+
+        # iterate training network
+        alpha, beta, gamma = self.learning_rate  
+        evaluate_error = 0
+        for index, item in enumerate(data):
+            # desired result of current item
+            desired_result_item = desired_result[index]
+
+            # forward propagation
+            hidden_activation, output_activation = self.calculate(item)
+
+            # back propagation and calculate the errors
+            output_error = (output_activation * (1 - output_activation) *
+                            (desired_result_item - output_activation))
+            hidden_error = (hidden_activation * (1 - hidden_activation) *
+                            output_error.dot(self.hidden_to_output.T))
+
+            # adjust weights from hidden layer to output layer
+            self.hidden_to_output_error = (alpha * hidden_activation.T *
+                                           output_error + gamma *
+                                           self.hidden_to_output_error)
+            self.hidden_to_output += self.hidden_to_output_error
+
+            # adjust weights from input layer to hidden layer
+            self.input_to_hidden_error = beta * item.T.dot(hidden_error)
+            self.input_to_hidden += self.input_to_hidden_error
+
+            # adjust thresholds
+            self.hidden_thresholds_error = (beta * hidden_error + gamma *
+                                            self.hidden_thresholds_error)
+            self.hidden_thresholds += self.hidden_thresholds_error
+
+            self.output_thresholds_error = (alpha * output_error + gamma *
+                                            self.output_thresholds_error)
+            self.output_thresholds += self.output_thresholds_error
+
+            # evaluate output layer error
+            evaluate_error += (output_error ** 2).sum() * 0.5
+
+            yield evaluate_error / len(data)
+
+    def train_until(self, error_less_than, *args, **kwargs):
+        """Train the network repeatedly until error less than a threshold.
+        This is a wrap of `BackPropagationNetwork.train` method, only add a
+        argument `error_less_than`.
+        """
+        error = np.Inf
+        while error > error_less_than:
+            training = self.train(*args, **kwargs)
+            for error in training:
+                yield error
+
+    def calculate(self, item):
+        """Calculate with this network.
+        :param item: data item, it should have a shape (1, sp_n), the sp_n
+                     should equal to the number of input layer nodes.
+        :return: tuple (hidden_activation, output_activation)
+        """
+        item.shape = 1, -1
+        hidden_activation = sigmoid(item.dot(self.input_to_hidden) +
+                                    self.hidden_thresholds)
+        output_activation = sigmoid(hidden_activation.dot(
+                                    self.hidden_to_output) +
+                                    self.output_thresholds)
+        return hidden_activation, output_activation
+
+
+# -------------
+# Training Case
+# -------------
+
+def load_and_preprocess_data(filepath):
+    """Load data from a text format file."""
+    flower = namedtuple("flower", ["typeid", "a", "b", "c", "d"])
+    with open(filepath, "r") as datafile:
+        dataset = datafile.readlines()
+    for record in dataset:
+        record = record.strip().split(" ")[1:]
+        record = (int(item) for item in record)
+        yield flower(*record)
+
+
+def main():
+    # create a neuron net
+    bpnet = BackPropagationNetwork(NeuronNumber(input=4, hidden=3, output=3),
+                                   LearningRate(0.1, 0.1, 0.85))
+
+    # load and preprocess train data
+    raw_data = load_and_preprocess_data("./test-data.dat")
+    raw_data = np.array(list(raw_data), dtype=np.double)
+    train_data = raw_data[:, 1:]
+    desired_result = np.array([[(1 if i == typeid else 0) for i in range(3)]
+                               for typeid in raw_data[:, 0]], dtype=np.double)
+
+    # train
+    training = bpnet.train_until(data=train_data[:115, :],
+                                 desired_result=desired_result[:115, :],
+                                 error_less_than=0.0005)
+    training_result = list(training)
+    
+    x = np.arange(len(training_result), step=10)
+    y = np.array([training_result[ix] for ix in x])
+    plt.figure(figsize=(8, 4))
+    plt.plot(x, y, label="error (x)", color="blue")
+    plt.xlabel("training times")
+    plt.ylabel("error")
+    plt.title("BP Demo")
+    plt.legend()
+    plt.show()
+
+
+if __name__ == "__main__":
+    main()

data.dat

-1 0 2 14 33 50
-2 1 24 56 31 67
-3 1 23 51 31 69
-4 0 2 10 36 46
-5 1 20 52 30 65
-6 1 19 51 27 58
-7 2 13 45 28 57
-8 2 16 47 33 63
-9 1 17 45 25 49
-10 2 14 47 32 70
-11 0 2 16 31 48
-12 1 19 50 25 63
-13 0 1 14 36 49
-14 0 2 13 32 44
-15 2 12 40 26 58
-16 1 18 49 27 63
-17 2 10 33 23 50
-18 0 2 16 38 51
-19 0 2 16 30 50
-20 1 21 56 28 64
-21 0 4 19 38 51
-22 0 2 14 30 49
-23 2 10 41 27 58
-24 2 15 45 29 60
-25 0 2 14 36 50
-26 1 19 51 27 58
-27 0 4 15 34 54
-28 1 18 55 31 64
-29 2 10 33 24 49
-30 0 2 14 42 55
-31 1 15 50 22 60
-32 2 14 39 27 52
-33 0 2 14 29 44
-34 2 12 39 27 58
-35 1 23 57 32 69
-36 2 15 42 30 59
-37 1 20 49 28 56
-38 1 18 58 25 67
-39 2 13 44 23 63
-40 2 15 49 25 63
-41 2 11 30 25 51
-42 1 21 54 31 69
-43 1 25 61 36 72
-44 2 13 36 29 56
-45 1 21 55 30 68
-46 0 1 14 30 48
-47 0 3 17 38 57
-48 2 14 44 30 66
-49 0 4 15 37 51
-50 2 17 50 30 67
-51 1 22 56 28 64
-52 1 15 51 28 63
-53 2 15 45 22 62
-54 2 14 46 30 61
-55 2 11 39 25 56
-56 1 23 59 32 68
-57 1 23 54 34 62
-58 1 25 57 33 67
-59 0 2 13 35 55
-60 2 15 45 32 64
-61 1 18 51 30 59
-62 1 23 53 32 64
-63 2 15 45 30 54
-64 1 21 57 33 67
-65 0 2 13 30 44
-66 0 2 16 32 47
-67 1 18 60 32 72
-68 1 18 49 30 61
-69 0 2 12 32 50
-70 0 1 11 30 43
-71 2 14 44 31 67
-72 0 2 14 35 51
-73 0 4 16 34 50
-74 2 10 35 26 57
-75 1 23 61 30 77
-76 2 13 42 26 57
-77 0 1 15 41 52
-78 1 18 48 30 60
-79 2 13 42 27 56
-80 0 2 15 31 49
-81 0 4 17 39 54
-82 2 16 45 34 60
-83 2 10 35 20 50
-84 0 2 13 32 47
-85 2 13 54 29 62
-86 0 2 15 34 51
-87 2 10 50 22 60
-88 0 1 15 31 49
-89 0 2 15 37 54
-90 2 12 47 28 61
-91 2 13 41 28 57
-92 0 4 13 39 54
-93 1 20 51 32 65
-94 2 15 49 31 69
-95 2 13 40 25 55
-96 0 3 13 23 45
-97 0 3 15 38 51
-98 2 14 48 28 68
-99 0 2 15 35 52
-100 1 25 60 33 63
-101 2 15 46 28 65
-102 0 3 14 34 46
-103 2 18 48 32 59
-104 2 16 51 27 60
-105 1 18 55 30 65
-106 0 5 17 33 51
-107 1 22 67 38 77
-108 1 21 66 30 76
-109 1 13 52 30 67
-110 2 13 40 28 61
-111 2 11 38 24 55
-112 0 2 14 34 52
-113 1 20 64 38 79
-114 0 6 16 35 50
-115 1 20 67 28 77
-116 2 12 44 26 55
-117 0 3 14 30 48
-118 0 2 19 34 48
-119 1 14 56 26 61
-120 0 2 12 40 58
-121 1 18 48 28 62
-122 2 15 45 30 56
-123 0 2 14 32 46
-124 0 4 15 44 57
-125 1 24 56 34 63
-126 1 16 58 30 72
-127 1 21 59 30 71
-128 1 18 56 29 63
-129 2 12 42 30 57
-130 1 23 69 26 77
-131 2 13 56 29 66
-132 0 2 15 34 52
-133 2 10 37 24 55
-134 0 2 15 31 46
-135 1 19 61 28 74
-136 0 3 13 35 50
-137 1 18 63 29 73
-138 2 15 47 31 67
-139 2 13 41 30 56
-140 2 13 43 29 64
-141 1 22 58 30 65
-142 0 3 14 35 51
-143 2 14 47 29 61
-144 1 19 53 27 64
-145 0 2 16 34 48
-146 1 20 50 25 57
-147 2 13 40 23 55
-148 0 2 17 34 54
-149 1 24 51 28 58
-150 0 2 15 37 53
+1 0 2 14 33 50
+2 1 24 56 31 67
+3 1 23 51 31 69
+4 0 2 10 36 46
+5 1 20 52 30 65
+6 1 19 51 27 58
+7 2 13 45 28 57
+8 2 16 47 33 63
+9 1 17 45 25 49
+10 2 14 47 32 70
+11 0 2 16 31 48
+12 1 19 50 25 63
+13 0 1 14 36 49
+14 0 2 13 32 44
+15 2 12 40 26 58
+16 1 18 49 27 63
+17 2 10 33 23 50
+18 0 2 16 38 51
+19 0 2 16 30 50
+20 1 21 56 28 64
+21 0 4 19 38 51
+22 0 2 14 30 49
+23 2 10 41 27 58
+24 2 15 45 29 60
+25 0 2 14 36 50
+26 1 19 51 27 58
+27 0 4 15 34 54
+28 1 18 55 31 64
+29 2 10 33 24 49
+30 0 2 14 42 55
+31 1 15 50 22 60
+32 2 14 39 27 52
+33 0 2 14 29 44
+34 2 12 39 27 58
+35 1 23 57 32 69
+36 2 15 42 30 59
+37 1 20 49 28 56
+38 1 18 58 25 67
+39 2 13 44 23 63
+40 2 15 49 25 63
+41 2 11 30 25 51
+42 1 21 54 31 69
+43 1 25 61 36 72
+44 2 13 36 29 56
+45 1 21 55 30 68
+46 0 1 14 30 48
+47 0 3 17 38 57
+48 2 14 44 30 66
+49 0 4 15 37 51
+50 2 17 50 30 67
+51 1 22 56 28 64
+52 1 15 51 28 63
+53 2 15 45 22 62
+54 2 14 46 30 61
+55 2 11 39 25 56
+56 1 23 59 32 68
+57 1 23 54 34 62
+58 1 25 57 33 67
+59 0 2 13 35 55
+60 2 15 45 32 64
+61 1 18 51 30 59
+62 1 23 53 32 64
+63 2 15 45 30 54
+64 1 21 57 33 67
+65 0 2 13 30 44
+66 0 2 16 32 47
+67 1 18 60 32 72
+68 1 18 49 30 61
+69 0 2 12 32 50
+70 0 1 11 30 43
+71 2 14 44 31 67
+72 0 2 14 35 51
+73 0 4 16 34 50
+74 2 10 35 26 57
+75 1 23 61 30 77
+76 2 13 42 26 57
+77 0 1 15 41 52
+78 1 18 48 30 60
+79 2 13 42 27 56
+80 0 2 15 31 49
+81 0 4 17 39 54
+82 2 16 45 34 60
+83 2 10 35 20 50
+84 0 2 13 32 47
+85 2 13 54 29 62
+86 0 2 15 34 51
+87 2 10 50 22 60
+88 0 1 15 31 49
+89 0 2 15 37 54
+90 2 12 47 28 61
+91 2 13 41 28 57
+92 0 4 13 39 54
+93 1 20 51 32 65
+94 2 15 49 31 69
+95 2 13 40 25 55
+96 0 3 13 23 45
+97 0 3 15 38 51
+98 2 14 48 28 68
+99 0 2 15 35 52
+100 1 25 60 33 63
+101 2 15 46 28 65
+102 0 3 14 34 46
+103 2 18 48 32 59
+104 2 16 51 27 60
+105 1 18 55 30 65
+106 0 5 17 33 51
+107 1 22 67 38 77
+108 1 21 66 30 76
+109 1 13 52 30 67
+110 2 13 40 28 61
+111 2 11 38 24 55
+112 0 2 14 34 52
+113 1 20 64 38 79
+114 0 6 16 35 50
+115 1 20 67 28 77
+116 2 12 44 26 55
+117 0 3 14 30 48
+118 0 2 19 34 48
+119 1 14 56 26 61
+120 0 2 12 40 58
+121 1 18 48 28 62
+122 2 15 45 30 56
+123 0 2 14 32 46
+124 0 4 15 44 57
+125 1 24 56 34 63
+126 1 16 58 30 72
+127 1 21 59 30 71
+128 1 18 56 29 63
+129 2 12 42 30 57
+130 1 23 69 26 77
+131 2 13 56 29 66
+132 0 2 15 34 52
+133 2 10 37 24 55
+134 0 2 15 31 46
+135 1 19 61 28 74
+136 0 3 13 35 50
+137 1 18 63 29 73
+138 2 15 47 31 67
+139 2 13 41 30 56
+140 2 13 43 29 64
+141 1 22 58 30 65
+142 0 3 14 35 51
+143 2 14 47 29 61
+144 1 19 53 27 64
+145 0 2 16 34 48
+146 1 20 50 25 57
+147 2 13 40 23 55
+148 0 2 17 34 54
+149 1 24 51 28 58
+150 0 2 15 37 53
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.