X-Git-Url: https://git.piment-noir.org/?a=blobdiff_plain;f=TP3%2Fexo2%2Ftp3_exo2.py;h=4ca092ddd0015126efe66d2a3b02fceb8b444433;hb=c60d868ed5752a8c5adef46881c6a8d792351370;hp=c92d590e7013db5771abd8c644a62c94418baa32;hpb=ce56d6abaff4ef1fd9598f542d521764278ca5bb;p=TP_AA.git diff --git a/TP3/exo2/tp3_exo2.py b/TP3/exo2/tp3_exo2.py index c92d590..4ca092d 100755 --- a/TP3/exo2/tp3_exo2.py +++ b/TP3/exo2/tp3_exo2.py @@ -28,7 +28,7 @@ def generateData2(n): Generates a 2D linearly separable dataset with 2n samples. The third element of the sample is the label """ - xb = (rand(n) * 2 - 1) / 2 - 0.5 + xb = (rand(n) * 2 - 1) / 2 + 0.5 yb = (rand(n) * 2 - 1) / 2 xr = (rand(n) * 2 - 1) / 2 + 1.5 yr = (rand(n) * 2 - 1) / 2 - 0.5 @@ -41,7 +41,7 @@ def generateData2(n): def generateData3(n): """ - Generates a 2D linearly separable dataset with 2n samples. + Generates a 2D linearly separable dataset with about 2n samples. The third element of the sample is the label """ # (xb, yb) est dans le carré centré à l’origine de côté 1 @@ -62,7 +62,7 @@ def generateData3(n): training_set_size = 150 -training_set = generateData2(training_set_size) +training_set = generateData3(training_set_size) data = np.array(training_set) X = data[:, 0:2] Y = data[:, -1] @@ -86,20 +86,70 @@ def complete(sample): return np.array(new_sample) -def plongement(sample_element): - return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]] +def plongement_phi(sample_element): + return [1, sample_element[0], sample_element[1], sample_element[0]**2, + sample_element[0] * sample_element[1], sample_element[1]**2] -def apply_plongement(sample): +def apply_plongement(sample, p): output = [] for i in range(sample.shape[0]): - current = plongement(sample[i]) + current = p(sample[i]) output.append(current) return np.array(output) -X = apply_plongement(X) -w = perceptron_nobias(X, Y) +def f_from_k(coeffs, support_set, k, x): + output = 0 + for c, s in zip(coeffs, support_set): + output += c * s[0] * k(s[1], x) + return output + + +def k1(X1, X2): + return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0]**2 * X2[0]**2 \ + + X1[0] * X1[1] * X2[0] * X2[1] + X1[1]**2 * X2[1]**2 + + +def kg(x, y, sigma=10): + return np.exp(-((x[0] - y[0])**2 + (x[1] - y[1])**2) / sigma**2) + + +def perceptron_k(X, Y, k): + coeffs = [] + support_set = np.array([]) + # Go in the loop at least one time + classification_error = 1 + while not classification_error == 0: + classification_error = 0 + for i in range(X.shape[0]): + if Y[i] * f_from_k(coeffs, support_set, k, X[i]) <= 0: + classification_error += 1 + np.append(support_set, X[i]) + coeffs.append(1) + else: + coeffs[len(coeffs) - 1] = coeffs[len(coeffs) - 1] + 1 + return np.array(coeffs), support_set + + +def f(w, x, y): + return w[0] + w[1] * x + w[2] * y + w[3] * x**2 + w[4] * x * y + w[5] * y**2 + + pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) pl.title(u"Perceptron - hyperplan") + +# coeffs, support_set = perceptron_k(X, Y, k1) +# coeffs, support_set = perceptron_k(X, Y, kg) +res = training_set_size +# for c, X in zip(coeffs, support_set): +# pl.plot(X[0], 'xr') + +X = apply_plongement(X, plongement_phi) +w = perceptron_nobias(X, Y) +for x in range(res): + for y in range(res): + if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01: + pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb') + pl.show()