Add some more implementation of TP3 exo2.
[TP_AA.git] / TP3 / exo2 / tp3_exo2.py
index c92d590e7013db5771abd8c644a62c94418baa32..c59b671479c474d3b2800ae39e4bbc0c3aa559da 100755 (executable)
@@ -98,8 +98,40 @@ def apply_plongement(sample):
     return np.array(output)
 
 
+def f_from_k(coeffs, support_set, k, x):
+    output = 0
+    for c, s in zip(coeffs, support_set):
+        output += c * s[0] * k(s[1], x)
+    return output
+
+
+def k1(X1, X2):
+    return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0] * X1[0] * X2[0] * X2[0] + X1[0] * X1[1] * X2[0] * X1[1] + X1[1] * X2[1] * X2[1]
+
+
+def perceptron_k(X, Y, k):
+    coeffs = []
+    support_set = []
+    # Go in the loop at least one time
+    classification_error = 1
+    while not classification_error == 0:
+        classification_error = 0
+        for i in range(X.shape[0]):
+            if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0:
+                classification_error += 1
+                support_set.append([Y[i], X[i]])
+                coeffs.append(1)
+            else:
+                coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1
+    return coeffs, support_set
+
+
+print(perceptron_k(X, Y, k1))
+
 X = apply_plongement(X)
 w = perceptron_nobias(X, Y)
+print(w)
+
 pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
 pl.title(u"Perceptron - hyperplan")
 pl.show()