3 # -*- coding: utf-8 -*-
5 from numpy
.random
import rand
11 Generates a 2D linearly separable dataset with 2n samples.
12 The third element of the sample is the label
15 xb
= (rand(n
) * 2 - 1) / 2 - linear_offset
16 yb
= (rand(n
) * 2 - 1) / 2 + linear_offset
17 xr
= (rand(n
) * 2 - 1) / 2 + linear_offset
18 yr
= (rand(n
) * 2 - 1) / 2 - linear_offset
21 inputs
.append([xb
[i
], yb
[i
], -1])
22 inputs
.append([xr
[i
], yr
[i
], 1])
28 Generates a 2D linearly separable dataset with 2n samples.
29 The third element of the sample is the label
31 xb
= (rand(n
) * 2 - 1) / 2 + 0.5
32 yb
= (rand(n
) * 2 - 1) / 2
33 xr
= (rand(n
) * 2 - 1) / 2 + 1.5
34 yr
= (rand(n
) * 2 - 1) / 2 - 0.5
37 inputs
.append([xb
[i
], yb
[i
], -1])
38 inputs
.append([xr
[i
], yr
[i
], 1])
44 Generates a 2D linearly separable dataset with about 2n samples.
45 The third element of the sample is the label
47 # (xb, yb) est dans le carré centré à l’origine de côté 1
48 xb
= (rand(n
) * 2 - 1) / 2
49 yb
= (rand(n
) * 2 - 1) / 2
50 # (xr, yr) est dans le carré centré à l’origine de côté 3
51 xr
= 3 * (rand(4 * n
) * 2 - 1) / 2
52 yr
= 3 * (rand(4 * n
) * 2 - 1) / 2
55 inputs
.append([xb
[i
], yb
[i
], -1])
56 for i
in range(4 * n
):
57 # on ne conserve que les points extérieurs au carré centré à l’origine
59 if abs(xr
[i
]) >= 1 or abs(yr
[i
]) >= 1:
60 inputs
.append([xr
[i
], yr
[i
], 1])
64 training_set_size
= 150
65 training_set
= generateData3(training_set_size
)
66 data
= np
.array(training_set
)
71 def perceptron_nobias(X
, Y
):
72 w
= np
.zeros([len(X
[0])])
73 # Go in the loop at least one time
74 classification_error
= 1
75 while not classification_error
== 0:
76 classification_error
= 0
77 for x
, y
in zip(X
, Y
):
78 if y
* np
.dot(w
, x
) <= 0:
79 classification_error
+= 1
81 print(classification_error
)
86 new_sample
= np
.insert(sample
, len(sample
[0]), [1], axis
=1)
87 return np
.array(new_sample
)
90 def plongement_phi(sample_element
):
91 return [1, sample_element
[0], sample_element
[1], sample_element
[0]**2,
92 sample_element
[0] * sample_element
[1], sample_element
[1]**2]
95 def apply_plongement(sample
, p
):
97 for i
in range(sample
.shape
[0]):
98 current
= p(sample
[i
])
99 output
.append(current
)
100 return np
.array(output
)
103 def f_from_k(coeffs
, support_set
, k
, x
):
105 for c
, s
in zip(coeffs
, support_set
):
106 output
+= c
* s
[1] * k(s
[0], x
)
111 return 1 + X1
[0] * X2
[0] + X1
[1] * X2
[1] + X1
[0]**2 * X2
[0]**2 \
112 + X1
[0] * X1
[1] * X2
[0] * X2
[1] + X1
[1]**2 * X2
[1]**2
116 # sigma = 20 # do not converge
117 # sigma = 10 # do not converge
119 # sigma = 0.5 # overfitting
120 # sigma = 0.2 # overfitting
121 return np
.exp(-((x
[0] - y
[0])**2 + (x
[1] - y
[1])**2) / sigma
**2)
124 def perceptron_k(X
, Y
, k
):
127 # Go in the loop at least one time
128 classification_error
= 1
129 while not classification_error
== 0:
130 classification_error
= 0
131 for x
, y
in zip(X
, Y
):
132 if y
* f_from_k(coeffs
, support_set
, k
, x
) <= 0:
133 if x
not in support_set
:
134 support_set
.append((x
, y
))
137 coeffs
[support_set
.index((x
, y
))] += 1
138 classification_error
+= 1
139 print(classification_error
)
140 return np
.array(coeffs
), np
.array(support_set
)
144 return w
[0] + w
[1] * x
+ w
[2] * y
+ w
[3] * x
**2 + w
[4] * x
* y
+ w
[5] * y
**2
147 pl
.scatter(X
[:, 0], X
[:, 1], c
=Y
, s
=training_set_size
)
148 pl
.title(u
"Perceptron - prolontaged hyperplan")
151 # coeffs, support_set = perceptron_k(X, Y, k)
153 coeffs
, support_set
= perceptron_k(X
, Y
, k
)
154 res
= training_set_size
157 if abs(f_from_k(coeffs
, support_set
, k
, [-3 / 2 + 3 * x
/ res
, -3 / 2 + 3 * y
/ res
])) < 0.01:
158 pl
.plot(-3 / 2 + 3 * x
/ res
, -3 / 2 + 3 * y
/ res
, 'xr')
160 # X = apply_plongement(X, plongement_phi)
161 # w = perceptron_nobias(X, Y)
162 # for x in range(res):
163 # for y in range(res):
164 # if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01:
165 # pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb')