Generates a 2D linearly separable dataset with 2n samples.
The third element of the sample is the label
"""
- xb = (rand(n) * 2 - 1) / 2 - 0.5
+ xb = (rand(n) * 2 - 1) / 2 + 0.5
yb = (rand(n) * 2 - 1) / 2
xr = (rand(n) * 2 - 1) / 2 + 1.5
yr = (rand(n) * 2 - 1) / 2 - 0.5
w = perceptron_nobias(X, Y)
# w is orthogonal to the hyperplan
# with generateData
+# plot arguments format is pl.plot([x1,x2],[y1,y2])
+# w[0]x + w[1]y = 0, so y = -w[0]x / w[1]
# pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]])
# with generateData2 and complete
-# FIXME: the hyperplan equation is not correct
-pl.plot([0, -1 / w[1]], [w[0] / w[1] - 1 / w[1], -w[0] / w[1] - 1 / w[1]])
+# w[0]x + w[1]y + w[2] = 0, so y = -(w[0]x + w[2]) / w[1]
+x_start1 = -0.5
+x_start2 = 2.5
+pl.plot([x_start1, x_start2], [-(w[0] * x_start1 + w[2]) /
+ w[1], -(w[0] * x_start2 + w[2]) / w[1]])
pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
pl.title(u"Perceptron - hyperplan")
pl.show()
Generates a 2D linearly separable dataset with 2n samples.
The third element of the sample is the label
"""
- xb = (rand(n) * 2 - 1) / 2 - 0.5
+ xb = (rand(n) * 2 - 1) / 2 + 0.5
yb = (rand(n) * 2 - 1) / 2
xr = (rand(n) * 2 - 1) / 2 + 1.5
yr = (rand(n) * 2 - 1) / 2 - 0.5
return coeffs, support_set
-print(perceptron_k(X, Y, k1))
-# print(perceptron_k(X, Y, kg))
+def f(x, y, w):
+ return
+
+
+coeffs, support_set = perceptron_k(X, Y, k1)
+# coeffs, support_set = perceptron_k(X, Y, kg)
+print(coeffs)
+print(support_set)
X = apply_plongement(X, plongement_phi)
w = perceptron_nobias(X, Y)