X-Git-Url: https://git.piment-noir.org/?p=TP_AA.git;a=blobdiff_plain;f=TP3%2Fexo1%2Ftp3_exo1.py;h=73dd307de18e12ea18c25324c03142001c04c12d;hp=dfd04d831840fa1024b5a45c35073325826d4ffa;hb=ce56d6abaff4ef1fd9598f542d521764278ca5bb;hpb=6f4ffbd78d75ea191f5d7708c1fe3a6f5ae734b5 diff --git a/TP3/exo1/tp3_exo1.py b/TP3/exo1/tp3_exo1.py index dfd04d8..73dd307 100755 --- a/TP3/exo1/tp3_exo1.py +++ b/TP3/exo1/tp3_exo1.py @@ -39,8 +39,8 @@ def generateData2(n): return inputs -training_set_size = 100 -training_set = generateData(training_set_size) +training_set_size = 150 +training_set = generateData2(training_set_size) data = np.array(training_set) X = data[:, 0:2] Y = data[:, -1] @@ -54,17 +54,24 @@ def perceptron_nobias(X, Y): classification_error = 0 for i in range(X.shape[0]): if Y[i] * np.dot(w, X[i]) <= 0: - classification_error = classification_error + 1 + classification_error += 1 w = w + Y[i] * X[i] return w def complete(sample): - sample = np.expand_dims(sample, axis=0) - return sample + new_sample = np.insert(sample, len(sample[0]), [1], axis=1) + return np.array(new_sample) +X = complete(X) w = perceptron_nobias(X, Y) -pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]]) +# w is orthogonal to the hyperplan +# with generateData +# pl.plot([-1, 1], [w[0] / w[1], -w[0] / w[1]]) +# with generateData2 and complete +# FIXME: the hyperplan equation is not correct +pl.plot([0, -1 / w[1]], [w[0] / w[1] - 1 / w[1], -w[0] / w[1] - 1 / w[1]]) pl.scatter(X[:, 0], X[:, 1], c=Y, s=training_set_size) +pl.title(u"Perceptron - hyperplan") pl.show()