X-Git-Url: https://git.piment-noir.org/?a=blobdiff_plain;f=TP3%2Fexo2%2Ftp3_exo2.py;h=63179871840e06d636fa96e0d08ff9db7dee6bff;hb=5d8acd865d25e611bf3cbee51d563e509ab3e93d;hp=c92d590e7013db5771abd8c644a62c94418baa32;hpb=ce56d6abaff4ef1fd9598f542d521764278ca5bb;p=TP_AA.git diff --git a/TP3/exo2/tp3_exo2.py b/TP3/exo2/tp3_exo2.py index c92d590..6317987 100755 --- a/TP3/exo2/tp3_exo2.py +++ b/TP3/exo2/tp3_exo2.py @@ -28,7 +28,7 @@ def generateData2(n): Generates a 2D linearly separable dataset with 2n samples. The third element of the sample is the label """ - xb = (rand(n) * 2 - 1) / 2 - 0.5 + xb = (rand(n) * 2 - 1) / 2 + 0.5 yb = (rand(n) * 2 - 1) / 2 xr = (rand(n) * 2 - 1) / 2 + 1.5 yr = (rand(n) * 2 - 1) / 2 - 0.5 @@ -41,7 +41,7 @@ def generateData2(n): def generateData3(n): """ - Generates a 2D linearly separable dataset with 2n samples. + Generates a 2D linearly separable dataset with about 2n samples. The third element of the sample is the label """ # (xb, yb) est dans le carré centré à l’origine de côté 1 @@ -62,7 +62,7 @@ def generateData3(n): training_set_size = 150 -training_set = generateData2(training_set_size) +training_set = generateData3(training_set_size) data = np.array(training_set) X = data[:, 0:2] Y = data[:, -1] @@ -74,10 +74,10 @@ def perceptron_nobias(X, Y): classification_error = 1 while not classification_error == 0: classification_error = 0 - for i in range(X.shape[0]): - if Y[i] * np.dot(w, X[i]) <= 0: + for x, y in zip(X, Y): + if y * np.dot(w, x) <= 0: classification_error += 1 - w = w + Y[i] * X[i] + w = w + y * x return w @@ -86,20 +86,74 @@ def complete(sample): return np.array(new_sample) -def plongement(sample_element): - return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]] +def plongement_phi(sample_element): + return [1, sample_element[0], sample_element[1], sample_element[0]**2, + sample_element[0] * sample_element[1], sample_element[1]**2] -def apply_plongement(sample): +def apply_plongement(sample, p): output = [] for i in range(sample.shape[0]): - current = plongement(sample[i]) + current = p(sample[i]) output.append(current) return np.array(output) -X = apply_plongement(X) -w = perceptron_nobias(X, Y) +def f_from_k(coeffs, support_set, k, x): + output = 0 + for c, s in zip(coeffs, support_set): + output += c * s[1] * k(s[0], x) + return output + + +def k1(X1, X2): + return 1 + X1[0] * X2[0] + X1[1] * X2[1] + X1[0]**2 * X2[0]**2 \ + + X1[0] * X1[1] * X2[0] * X2[1] + X1[1]**2 * X2[1]**2 + + +def kg(x, y, sigma=10): + return np.exp(-((x[0] - y[0])**2 + (x[1] - y[1])**2) / sigma**2) + + +def perceptron_k(X, Y, k): + coeffs = [] + support_set = [] + # Go in the loop at least one time + classification_error = 1 + while not classification_error == 0: + classification_error = 0 + for x, y in zip(X, Y): + if y * f_from_k(coeffs, support_set, k, x) <= 0: + if x not in support_set: + support_set.append((x, y)) + coeffs.append(1) + else: + coeffs[support_set.index((x, y))] += 1 + classification_error += 1 + print(classification_error) + return np.array(coeffs), np.array(support_set) + + +def f(w, x, y): + return w[0] + w[1] * x + w[2] * y + w[3] * x**2 + w[4] * x * y + w[5] * y**2 + + pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) pl.title(u"Perceptron - hyperplan") + +coeffs, support_set = perceptron_k(X, Y, k1) +# coeffs, support_set = perceptron_k(X, Y, kg) +res = training_set_size +for x in range(res): + for y in range(res): + if abs(f_from_k(coeffs, support_set, k1, [-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res])) < 0.01: + pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xr') + +# X = apply_plongement(X, plongement_phi) +# w = perceptron_nobias(X, Y) +# for x in range(res): +# for y in range(res): +# if abs(f(w, -3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res)) < 0.01: +# pl.plot(-3 / 2 + 3 * x / res, -3 / 2 + 3 * y / res, 'xb') + pl.show()