Generates a 2D linearly separable dataset with 2n samples.
The third element of the sample is the label
"""
- xb = (rand(n) * 2 - 1) / 2 - 0.5
+ xb = (rand(n) * 2 - 1) / 2 + 0.5
yb = (rand(n) * 2 - 1) / 2
xr = (rand(n) * 2 - 1) / 2 + 1.5
yr = (rand(n) * 2 - 1) / 2 - 0.5
classification_error = 1
while not classification_error == 0:
classification_error = 0
- for i in range(X.shape[0]):
- if Y[i] * np.dot(w, X[i]) <= 0:
+ for x, y in zip(X, Y):
+ if y * np.dot(w, x) <= 0:
classification_error += 1
- w = w + Y[i] * X[i]
+ w = w + y * x
+ print(classification_error)
return w
def f_from_k(coeffs, support_set, k, x):
output = 0
for c, s in zip(coeffs, support_set):
- output += c * s[0] * k(s[1], x)
+ output += c * s[1] * k(s[0], x)
return output
classification_error = 1
while not classification_error == 0:
classification_error = 0
- for i in range(X.shape[0]):
- if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0:
+ for x, y in zip(X, Y):
+ if y * f_from_k(coeffs, support_set, k, x) <= 0:
+ if x not in support_set:
+ support_set.append((x, y))
+ coeffs.append(1)
+ else:
+ coeffs[support_set.index((x, y))] += 1
classification_error += 1
- support_set.append([Y[i], X[i]])
- coeffs.append(1)
- else:
- coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1
- return coeffs, support_set
+ print(classification_error)
+ return np.array(coeffs), np.array(support_set)
-print(perceptron_k(X, Y, k1))
-# print(perceptron_k(X, Y, kg))
+def f(w, x, y):
+ return w[0] + w[1] * x + w[2] * y + w[3] * x**2 + w[4] * x * y + w[5] * y**2
-X = apply_plongement(X, plongement_phi)
-w = perceptron_nobias(X, Y)
-print(w)
pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
pl.title(u"Perceptron - hyperplan")
+
+coeffs, support_set = perceptron_k(X, Y, k1)
+# coeffs, support_set = perceptron_k(X, Y, kg)
+res = training_set_size
+for x in range(res):
+ for y in range(res):
+ if abs(f_from_k(coeffs, support_set, k1, [-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res])) < 0.01:
+ pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xr')
+
+# X = apply_plongement(X, plongement_phi)
+# w = perceptron_nobias(X, Y)
+# for x in range(res):
+# for y in range(res):
+# if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01:
+# pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb')
+
pl.show()