Fix TP3 exo2.
authorJérôme Benoit <jerome.benoit@piment-noir.org>
Wed, 14 Nov 2018 12:45:35 +0000 (13:45 +0100)
committerJérôme Benoit <jerome.benoit@piment-noir.org>
Wed, 14 Nov 2018 12:45:35 +0000 (13:45 +0100)
Signed-off-by: Jérôme Benoit <jerome.benoit@piment-noir.org>
TP3/exo1/tp3_exo1.py
TP3/exo2/tp3_exo2.py

index 02416ba46171c462dde7934537888c37661bdd3f..9efcb98968963696643b9dc30801c0a0f3d9b1a5 100755 (executable)
@@ -52,10 +52,10 @@ def perceptron_nobias(X, Y):
     classification_error = 1
     while not classification_error == 0:
         classification_error = 0
-        for i in range(X.shape[0]):
-            if Y[i] * np.dot(w, X[i]) <= 0:
+        for x, y in zip(X, Y):
+            if y * np.dot(w, x) <= 0:
                 classification_error += 1
-                w = w + Y[i] * X[i]
+                w = w + y * x
     return w
 
 
index 4ca092ddd0015126efe66d2a3b02fceb8b444433..63179871840e06d636fa96e0d08ff9db7dee6bff 100755 (executable)
@@ -74,10 +74,10 @@ def perceptron_nobias(X, Y):
     classification_error = 1
     while not classification_error == 0:
         classification_error = 0
-        for i in range(X.shape[0]):
-            if Y[i] * np.dot(w, X[i]) <= 0:
+        for x, y in zip(X, Y):
+            if y * np.dot(w, x) <= 0:
                 classification_error += 1
-                w = w + Y[i] * X[i]
+                w = w + y * x
     return w
 
 
@@ -102,7 +102,7 @@ def apply_plongement(sample, p):
 def f_from_k(coeffs, support_set, k, x):
     output = 0
     for c, s in zip(coeffs, support_set):
-        output += c * s[0] * k(s[1], x)
+        output += c * s[1] * k(s[0], x)
     return output
 
 
@@ -117,19 +117,21 @@ def kg(x, y, sigma=10):
 
 def perceptron_k(X, Y, k):
     coeffs = []
-    support_set = np.array([])
+    support_set = []
     # Go in the loop at least one time
     classification_error = 1
     while not classification_error == 0:
         classification_error = 0
-        for i in range(X.shape[0]):
-            if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0:
+        for x, y in zip(X, Y):
+            if y * f_from_k(coeffs, support_set, k, x) <= 0:
+                if x not in support_set:
+                    support_set.append((x, y))
+                    coeffs.append(1)
+                else:
+                    coeffs[support_set.index((x, y))] += 1
                 classification_error += 1
-                np.append(support_set, X[i])
-                coeffs.append(1)
-            else:
-                coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1
-    return np.array(coeffs), support_set
+        print(classification_error)
+    return np.array(coeffs), np.array(support_set)
 
 
 def f(w, x, y):
@@ -139,17 +141,19 @@ def f(w, x, y):
 pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
 pl.title(u"Perceptron - hyperplan")
 
-coeffs, support_set = perceptron_k(X, Y, k1)
+coeffs, support_set = perceptron_k(X, Y, k1)
 # coeffs, support_set = perceptron_k(X, Y, kg)
 res = training_set_size
-# for c, X in zip(coeffs, support_set):
-#     pl.plot(X[0], 'xr')
-
-X = apply_plongement(X, plongement_phi)
-w = perceptron_nobias(X, Y)
 for x in range(res):
     for y in range(res):
-        if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01:
-            pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb')
+        if abs(f_from_k(coeffs, support_set, k1, [-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res])) < 0.01:
+            pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xr')
+
+# X = apply_plongement(X, plongement_phi)
+# w = perceptron_nobias(X, Y)
+# for x in range(res):
+#     for y in range(res):
+#         if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01:
+#             pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb')
 
 pl.show()