| 1 | #!/usr/bin/env python3 |
| 2 | |
| 3 | # -*- coding: utf-8 -*- |
| 4 | import numpy as np |
| 5 | from numpy.random import rand |
| 6 | import pylab as pl |
| 7 | |
| 8 | |
| 9 | def generateData(n): |
| 10 | """ |
| 11 | Generates a 2D linearly separable dataset with 2n samples. |
| 12 | The third element of the sample is the label |
| 13 | """ |
| 14 | linear_offset = 0.6 |
| 15 | xb = (rand(n) * 2 - 1) / 2 - linear_offset |
| 16 | yb = (rand(n) * 2 - 1) / 2 + linear_offset |
| 17 | xr = (rand(n) * 2 - 1) / 2 + linear_offset |
| 18 | yr = (rand(n) * 2 - 1) / 2 - linear_offset |
| 19 | inputs = [] |
| 20 | for i in range(n): |
| 21 | inputs.append([xb[i], yb[i], -1]) |
| 22 | inputs.append([xr[i], yr[i], 1]) |
| 23 | return inputs |
| 24 | |
| 25 | |
| 26 | def generateData2(n): |
| 27 | """ |
| 28 | Generates a 2D linearly separable dataset with 2n samples. |
| 29 | The third element of the sample is the label |
| 30 | """ |
| 31 | xb = (rand(n) * 2 - 1) / 2 - 0.5 |
| 32 | yb = (rand(n) * 2 - 1) / 2 |
| 33 | xr = (rand(n) * 2 - 1) / 2 + 1.5 |
| 34 | yr = (rand(n) * 2 - 1) / 2 - 0.5 |
| 35 | inputs = [] |
| 36 | for i in range(n): |
| 37 | inputs.append([xb[i], yb[i], -1]) |
| 38 | inputs.append([xr[i], yr[i], 1]) |
| 39 | return inputs |
| 40 | |
| 41 | |
| 42 | def generateData3(n): |
| 43 | """ |
| 44 | Generates a 2D linearly separable dataset with about 2n samples. |
| 45 | The third element of the sample is the label |
| 46 | """ |
| 47 | # (xb, yb) est dans le carré centré à l’origine de côté 1 |
| 48 | xb = (rand(n) * 2 - 1) / 2 |
| 49 | yb = (rand(n) * 2 - 1) / 2 |
| 50 | # (xr, yr) est dans le carré centré à l’origine de côté 3 |
| 51 | xr = 3 * (rand(4 * n) * 2 - 1) / 2 |
| 52 | yr = 3 * (rand(4 * n) * 2 - 1) / 2 |
| 53 | inputs = [] |
| 54 | for i in range(n): |
| 55 | inputs.append([xb[i], yb[i], -1]) |
| 56 | for i in range(4 * n): |
| 57 | # on ne conserve que les points extérieurs au carré centré à l’origine |
| 58 | # de côté 2 |
| 59 | if abs(xr[i]) >= 1 or abs(yr[i]) >= 1: |
| 60 | inputs.append([xr[i], yr[i], 1]) |
| 61 | return inputs |
| 62 | |
| 63 | |
| 64 | training_set_size = 150 |
| 65 | training_set = generateData3(training_set_size) |
| 66 | data = np.array(training_set) |
| 67 | X = data[:, 0:2] |
| 68 | Y = data[:, -1] |
| 69 | |
| 70 | |
| 71 | def perceptron_nobias(X, Y): |
| 72 | w = np.zeros([len(X[0])]) |
| 73 | # Go in the loop at least one time |
| 74 | classification_error = 1 |
| 75 | while not classification_error == 0: |
| 76 | classification_error = 0 |
| 77 | for i in range(X.shape[0]): |
| 78 | if Y[i] * np.dot(w, X[i]) <= 0: |
| 79 | classification_error += 1 |
| 80 | w = w + Y[i] * X[i] |
| 81 | return w |
| 82 | |
| 83 | |
| 84 | def complete(sample): |
| 85 | new_sample = np.insert(sample, len(sample[0]), [1], axis=1) |
| 86 | return np.array(new_sample) |
| 87 | |
| 88 | |
| 89 | def plongement_phi(sample_element): |
| 90 | return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]] |
| 91 | |
| 92 | |
| 93 | def apply_plongement(sample, p): |
| 94 | output = [] |
| 95 | for i in range(sample.shape[0]): |
| 96 | current = p(sample[i]) |
| 97 | output.append(current) |
| 98 | return np.array(output) |
| 99 | |
| 100 | |
| 101 | def f_from_k(coeffs, support_set, k, x): |
| 102 | output = 0 |
| 103 | for c, s in zip(coeffs, support_set): |
| 104 | output += c * s[0] * k(s[1], x) |
| 105 | return output |
| 106 | |
| 107 | |
| 108 | def k1(X1, X2): |
| 109 | return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0] * X1[0] * X2[0] * X2[0] + X1[0] * X1[1] * X2[0] * X2[1] + X1[1] * X1[1] * X2[1] * X2[1] |
| 110 | |
| 111 | |
| 112 | def kg(x, y, sigma=10): |
| 113 | return np.exp(-((x[0] - y[0])**2 + (x[1] - y[1])**2) / sigma**2) |
| 114 | |
| 115 | |
| 116 | def perceptron_k(X, Y, k): |
| 117 | coeffs = [] |
| 118 | support_set = [] |
| 119 | # Go in the loop at least one time |
| 120 | classification_error = 1 |
| 121 | while not classification_error == 0: |
| 122 | classification_error = 0 |
| 123 | for i in range(X.shape[0]): |
| 124 | if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0: |
| 125 | classification_error += 1 |
| 126 | support_set.append([Y[i], X[i]]) |
| 127 | coeffs.append(1) |
| 128 | else: |
| 129 | coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1 |
| 130 | return coeffs, support_set |
| 131 | |
| 132 | |
| 133 | print(perceptron_k(X, Y, k1)) |
| 134 | # print(perceptron_k(X, Y, kg)) |
| 135 | |
| 136 | X = apply_plongement(X, plongement_phi) |
| 137 | w = perceptron_nobias(X, Y) |
| 138 | print(w) |
| 139 | |
| 140 | pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) |
| 141 | pl.title(u"Perceptron - hyperplan") |
| 142 | pl.show() |