From: Jérôme Benoit Date: Tue, 13 Nov 2018 16:07:41 +0000 (+0100) Subject: Add the exo2 beginning implementation. X-Git-Url: https://git.piment-noir.org/?a=commitdiff_plain;h=ce56d6abaff4ef1fd9598f542d521764278ca5bb;p=TP_AA.git Add the exo2 beginning implementation. Signed-off-by: Jérôme Benoit --- diff --git a/TP3/exo1/tp3_exo1.py b/TP3/exo1/tp3_exo1.py index dfd04d8..73dd307 100755 --- a/TP3/exo1/tp3_exo1.py +++ b/TP3/exo1/tp3_exo1.py @@ -39,8 +39,8 @@ def generateData2(n): return inputs -training_set_size = 100 -training_set = generateData(training_set_size) +training_set_size = 150 +training_set = generateData2(training_set_size) data = np.array(training_set) X = data[:, 0:2] Y = data[:, -1] @@ -54,17 +54,24 @@ def perceptron_nobias(X, Y): classification_error = 0 for i in range(X.shape[0]): if Y[i] * np.dot(w, X[i]) <= 0: - classification_error = classification_error + 1 + classification_error += 1 w = w + Y[i] * X[i] return w def complete(sample): - sample = np.expand_dims(sample, axis=0) - return sample + new_sample = np.insert(sample, len(sample[0]), [1], axis=1) + return np.array(new_sample) +X = complete(X) w = perceptron_nobias(X, Y) -pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]]) +# w is orthogonal to the hyperplan +# with generateData +# pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]]) +# with generateData2 and complete +# FIXME: the hyperplan equation is not correct +pl.plot([0, -1 / w[1]], [w[0] / w[1] - 1 / w[1], -w[0] / w[1] - 1 / w[1]]) pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) +pl.title(u"Perceptron - hyperplan") pl.show() diff --git a/TP3/exo2/tp3_exo2.py b/TP3/exo2/tp3_exo2.py new file mode 100755 index 0000000..c92d590 --- /dev/null +++ b/TP3/exo2/tp3_exo2.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +# -*- coding: utf-8 -*- +import numpy as np +from numpy.random import rand +import pylab as pl + + +def generateData(n): + """ + Generates a 2D linearly separable dataset with 2n samples. + The third element of the sample is the label + """ + linear_offset = 0.6 + xb = (rand(n) * 2 - 1) / 2 - linear_offset + yb = (rand(n) * 2 - 1) / 2 + linear_offset + xr = (rand(n) * 2 - 1) / 2 + linear_offset + yr = (rand(n) * 2 - 1) / 2 - linear_offset + inputs = [] + for i in range(n): + inputs.append([xb[i], yb[i], -1]) + inputs.append([xr[i], yr[i], 1]) + return inputs + + +def generateData2(n): + """ + Generates a 2D linearly separable dataset with 2n samples. + The third element of the sample is the label + """ + xb = (rand(n) * 2 - 1) / 2 - 0.5 + yb = (rand(n) * 2 - 1) / 2 + xr = (rand(n) * 2 - 1) / 2 + 1.5 + yr = (rand(n) * 2 - 1) / 2 - 0.5 + inputs = [] + for i in range(n): + inputs.append([xb[i], yb[i], -1]) + inputs.append([xr[i], yr[i], 1]) + return inputs + + +def generateData3(n): + """ + Generates a 2D linearly separable dataset with 2n samples. + The third element of the sample is the label + """ + # (xb, yb) est dans le carré centré à l’origine de côté 1 + xb = (rand(n) * 2 - 1) / 2 + yb = (rand(n) * 2 - 1) / 2 + # (xr, yr) est dans le carré centré à l’origine de côté 3 + xr = 3 * (rand(4 * n) * 2 - 1) / 2 + yr = 3 * (rand(4 * n) * 2 - 1) / 2 + inputs = [] + for i in range(n): + inputs.append([xb[i], yb[i], -1]) + for i in range(4 * n): + # on ne conserve que les points extérieurs au carré centré à l’origine + # de côté 2 + if abs(xr[i]) >= 1 or abs(yr[i]) >= 1: + inputs.append([xr[i], yr[i], 1]) + return inputs + + +training_set_size = 150 +training_set = generateData2(training_set_size) +data = np.array(training_set) +X = data[:, 0:2] +Y = data[:, -1] + + +def perceptron_nobias(X, Y): + w = np.zeros([len(X[0])]) + # Go in the loop at least one time + classification_error = 1 + while not classification_error == 0: + classification_error = 0 + for i in range(X.shape[0]): + if Y[i] * np.dot(w, X[i]) <= 0: + classification_error += 1 + w = w + Y[i] * X[i] + return w + + +def complete(sample): + new_sample = np.insert(sample, len(sample[0]), [1], axis=1) + return np.array(new_sample) + + +def plongement(sample_element): + return [1, sample_element[0], sample_element[1], sample_element[0] * sample_element[0], sample_element[0] * sample_element[1], sample_element[1] * sample_element[1]] + + +def apply_plongement(sample): + output = [] + for i in range(sample.shape[0]): + current = plongement(sample[i]) + output.append(current) + return np.array(output) + + +X = apply_plongement(X) +w = perceptron_nobias(X, Y) +pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) +pl.title(u"Perceptron - hyperplan") +pl.show()