c59b671479c474d3b2800ae39e4bbc0c3aa559da
[TP_AA.git] / TP3 / exo2 / tp3_exo2.py
1 #!/usr/bin/env python3
2
3 # -*- coding: utf-8 -*-
4 import numpy as np
5 from numpy.random import rand
6 import pylab as pl
7
8
9 def generateData(n):
10 """
11 Generates a 2D linearly separable dataset with 2n samples.
12 The third element of the sample is the label
13 """
14 linear_offset = 0.6
15 xb = (rand(n) * 2 - 1) / 2 - linear_offset
16 yb = (rand(n) * 2 - 1) / 2 + linear_offset
17 xr = (rand(n) * 2 - 1) / 2 + linear_offset
18 yr = (rand(n) * 2 - 1) / 2 - linear_offset
19 inputs = []
20 for i in range(n):
21 inputs.append([xb[i], yb[i], -1])
22 inputs.append([xr[i], yr[i], 1])
23 return inputs
24
25
26 def generateData2(n):
27 """
28 Generates a 2D linearly separable dataset with 2n samples.
29 The third element of the sample is the label
30 """
31 xb = (rand(n) * 2 - 1) / 2 - 0.5
32 yb = (rand(n) * 2 - 1) / 2
33 xr = (rand(n) * 2 - 1) / 2 + 1.5
34 yr = (rand(n) * 2 - 1) / 2 - 0.5
35 inputs = []
36 for i in range(n):
37 inputs.append([xb[i], yb[i], -1])
38 inputs.append([xr[i], yr[i], 1])
39 return inputs
40
41
42 def generateData3(n):
43 """
44 Generates a 2D linearly separable dataset with 2n samples.
45 The third element of the sample is the label
46 """
47 # (xb, yb) est dans le carré centré à l’origine de côté 1
48 xb = (rand(n) * 2 - 1) / 2
49 yb = (rand(n) * 2 - 1) / 2
50 # (xr, yr) est dans le carré centré à l’origine de côté 3
51 xr = 3 * (rand(4 * n) * 2 - 1) / 2
52 yr = 3 * (rand(4 * n) * 2 - 1) / 2
53 inputs = []
54 for i in range(n):
55 inputs.append([xb[i], yb[i], -1])
56 for i in range(4 * n):
57 # on ne conserve que les points extérieurs au carré centré à l’origine
58 # de côté 2
59 if abs(xr[i]) >= 1 or abs(yr[i]) >= 1:
60 inputs.append([xr[i], yr[i], 1])
61 return inputs
62
63
64 training_set_size = 150
65 training_set = generateData2(training_set_size)
66 data = np.array(training_set)
67 X = data[:, 0:2]
68 Y = data[:, -1]
69
70
71 def perceptron_nobias(X, Y):
72 w = np.zeros([len(X[0])])
73 # Go in the loop at least one time
74 classification_error = 1
75 while not classification_error == 0:
76 classification_error = 0
77 for i in range(X.shape[0]):
78 if Y[i] * np.dot(w, X[i]) <= 0:
79 classification_error += 1
80 w = w + Y[i] * X[i]
81 return w
82
83
84 def complete(sample):
85 new_sample = np.insert(sample, len(sample[0]), [1], axis=1)
86 return np.array(new_sample)
87
88
89 def plongement(sample_element):
90 return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]]
91
92
93 def apply_plongement(sample):
94 output = []
95 for i in range(sample.shape[0]):
96 current = plongement(sample[i])
97 output.append(current)
98 return np.array(output)
99
100
101 def f_from_k(coeffs, support_set, k, x):
102 output = 0
103 for c, s in zip(coeffs, support_set):
104 output += c * s[0] * k(s[1], x)
105 return output
106
107
108 def k1(X1, X2):
109 return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0] * X1[0] * X2[0] * X2[0] + X1[0] * X1[1] * X2[0] * X1[1] + X1[1] * X2[1] * X2[1]
110
111
112 def perceptron_k(X, Y, k):
113 coeffs = []
114 support_set = []
115 # Go in the loop at least one time
116 classification_error = 1
117 while not classification_error == 0:
118 classification_error = 0
119 for i in range(X.shape[0]):
120 if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0:
121 classification_error += 1
122 support_set.append([Y[i], X[i]])
123 coeffs.append(1)
124 else:
125 coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1
126 return coeffs, support_set
127
128
129 print(perceptron_k(X, Y, k1))
130
131 X = apply_plongement(X)
132 w = perceptron_nobias(X, Y)
133 print(w)
134
135 pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
136 pl.title(u"Perceptron - hyperplan")
137 pl.show()