Code cleanup (PEP8)
[TP_AA.git] / TP3 / exo2 / tp3_exo2.py
index c92d590e7013db5771abd8c644a62c94418baa32..2a831877d1afa81b9794af3a9cc89541789eb242 100755 (executable)
@@ -41,7 +41,7 @@ def generateData2(n):
 
 def generateData3(n):
     """
-    Generates a 2D linearly separable dataset with 2n samples.
+    Generates a 2D linearly separable dataset with about 2n samples.
     The third element of the sample is the label
     """
     # (xb, yb) est dans le carré centré à l’origine de côté 1
@@ -62,7 +62,7 @@ def generateData3(n):
 
 
 training_set_size = 150
-training_set = generateData2(training_set_size)
+training_set = generateData3(training_set_size)
 data = np.array(training_set)
 X = data[:, 0:2]
 Y = data[:, -1]
@@ -86,20 +86,59 @@ def complete(sample):
     return np.array(new_sample)
 
 
-def plongement(sample_element):
-    return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]]
+def plongement_phi(sample_element):
+    return [1, sample_element[0], sample_element[1], sample_element[0]**2,
+            sample_element[0] * sample_element[1], sample_element[1]**2]
 
 
-def apply_plongement(sample):
+def apply_plongement(sample, p):
     output = []
     for i in range(sample.shape[0]):
-        current = plongement(sample[i])
+        current = p(sample[i])
         output.append(current)
     return np.array(output)
 
 
-X = apply_plongement(X)
+def f_from_k(coeffs, support_set, k, x):
+    output = 0
+    for c, s in zip(coeffs, support_set):
+        output += c * s[0] * k(s[1], x)
+    return output
+
+
+def k1(X1, X2):
+    return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0]**2 * X2[0]**2 \
+             + X1[0] * X1[1] * X2[0] * X2[1] + X1[1]**2 * X2[1]**2
+
+
+def kg(x, y, sigma=10):
+    return np.exp(-((x[0] - y[0])**2 + (x[1] - y[1])**2) / sigma**2)
+
+
+def perceptron_k(X, Y, k):
+    coeffs = []
+    support_set = []
+    # Go in the loop at least one time
+    classification_error = 1
+    while not classification_error == 0:
+        classification_error = 0
+        for i in range(X.shape[0]):
+            if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0:
+                classification_error += 1
+                support_set.append([Y[i], X[i]])
+                coeffs.append(1)
+            else:
+                coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1
+    return coeffs, support_set
+
+
+print(perceptron_k(X, Y, k1))
+# print(perceptron_k(X, Y, kg))
+
+X = apply_plongement(X, plongement_phi)
 w = perceptron_nobias(X, Y)
+print(w)
+
 pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size)
 pl.title(u"Perceptron - hyperplan")
 pl.show()