Commit | Line | Data |
---|---|---|
6f4ffbd7 JB |
1 | #!/usr/bin/env python3 |
2 | ||
3 | # -*- coding: utf-8 -*- | |
4 | import numpy as np | |
5 | from numpy.random import rand | |
6 | import pylab as pl | |
7 | ||
8 | ||
9 | def generateData(n): | |
10 | """ | |
11 | Generates a 2D linearly separable dataset with 2n samples. | |
12 | The third element of the sample is the label | |
13 | """ | |
14 | linear_offset = 0.6 | |
15 | xb = (rand(n) * 2 - 1) / 2 - linear_offset | |
16 | yb = (rand(n) * 2 - 1) / 2 + linear_offset | |
17 | xr = (rand(n) * 2 - 1) / 2 + linear_offset | |
18 | yr = (rand(n) * 2 - 1) / 2 - linear_offset | |
19 | inputs = [] | |
20 | for i in range(n): | |
21 | inputs.append([xb[i], yb[i], -1]) | |
22 | inputs.append([xr[i], yr[i], 1]) | |
23 | return inputs | |
24 | ||
25 | ||
26 | def generateData2(n): | |
27 | """ | |
28 | Generates a 2D linearly separable dataset with 2n samples. | |
29 | The third element of the sample is the label | |
30 | """ | |
31 | xb = (rand(n) * 2 - 1) / 2 - 0.5 | |
32 | yb = (rand(n) * 2 - 1) / 2 | |
33 | xr = (rand(n) * 2 - 1) / 2 + 1.5 | |
34 | yr = (rand(n) * 2 - 1) / 2 - 0.5 | |
35 | inputs = [] | |
36 | for i in range(n): | |
37 | inputs.append([xb[i], yb[i], -1]) | |
38 | inputs.append([xr[i], yr[i], 1]) | |
39 | return inputs | |
40 | ||
41 | ||
ce56d6ab JB |
42 | training_set_size = 150 |
43 | training_set = generateData2(training_set_size) | |
6f4ffbd7 JB |
44 | data = np.array(training_set) |
45 | X = data[:, 0:2] | |
46 | Y = data[:, -1] | |
47 | ||
48 | ||
49 | def perceptron_nobias(X, Y): | |
50 | w = np.zeros([len(X[0])]) | |
51 | # Go in the loop at least one time | |
52 | classification_error = 1 | |
53 | while not classification_error == 0: | |
54 | classification_error = 0 | |
55 | for i in range(X.shape[0]): | |
56 | if Y[i] * np.dot(w, X[i]) <= 0: | |
ce56d6ab | 57 | classification_error += 1 |
6f4ffbd7 JB |
58 | w = w + Y[i] * X[i] |
59 | return w | |
60 | ||
61 | ||
62 | def complete(sample): | |
ce56d6ab JB |
63 | new_sample = np.insert(sample, len(sample[0]), [1], axis=1) |
64 | return np.array(new_sample) | |
6f4ffbd7 JB |
65 | |
66 | ||
ce56d6ab | 67 | X = complete(X) |
6f4ffbd7 | 68 | w = perceptron_nobias(X, Y) |
ce56d6ab JB |
69 | # w is orthogonal to the hyperplan |
70 | # with generateData | |
71 | # pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]]) | |
72 | # with generateData2 and complete | |
73 | # FIXME: the hyperplan equation is not correct | |
74 | pl.plot([0, -1 / w[1]], [w[0] / w[1] - 1 / w[1], -w[0] / w[1] - 1 / w[1]]) | |
6f4ffbd7 | 75 | pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) |
ce56d6ab | 76 | pl.title(u"Perceptron - hyperplan") |
6f4ffbd7 | 77 | pl.show() |