K ta yaqin qo’shni usuli
Ushbu qismda o’rganmoqchi bo’lgan algoritmimiz bu k ta yaqin qo’shni deb nomlanadi. Ushbu usul biz rasmlardagi raqamlarni aniqlashda ishlatilgan usulning umumiyroq ko’rinishi hisoblanadi. Hamda bu usulni ham biz umumiy holda “Bayes” usuli deyishimiz mumkin. Chunki ushbu usul ham oldingi mavjud to’plamdan foydalanib obyektga baho beradi. Yaqin qo’shni usuli asosida rasmidagi raqamni topayotganimizda bitta yaqin qo’shinisi qaragan edik. Endi shuni kengaytirgan holda bitta emas k ta qo’shni e’tiborga olamiz. Undan tashqari ushbu bobda biz ushbu usulni umumlashtirib regressiya masalalari uchun ham qo’llaymiz.
[1]:
import numpy as np
[2]:
data = np.loadtxt('/home/tqqt1/AI/teachings/online-courses/ai_intro/datasets/housing.csv',
skiprows=1,
delimiter=',')
X = data[:, 1:]
y = data[:, 0]
[3]:
num_train = 300
num_val = 145
num_test = 100
[4]:
indices = np.arange(data.shape[0])
np.random.seed(42)
np.random.shuffle(indices)
X_tr = X[indices[:num_train]]
y_tr = y[indices[:num_train]]
X_va = X[indices[num_train:num_train+num_val]]
y_va = y[indices[num_train:num_train+num_val]]
X_te = X[indices[num_train+num_val:]]
y_te = y[indices[num_train+num_val:]]
[5]:
x_tr_max = X_tr.max(axis=0)
X_tr = X_tr / x_tr_max
X_va = X_va / x_tr_max
X_te = X_te / x_tr_max
[23]:
D = np.zeros((num_val, num_train))
# val to'plam bo'yicha loop
i = 0
while i < num_val:
# train to'plam bo'yicha loop
j = 0
while j < num_train:
# Manhattan
# D[i, j] = np.sum(np.abs(X_va[i] - X_tr[j]))
# Euclidean
D[i, j] = np.sqrt(np.sum((X_va[i] - X_tr[j])**2))
j += 1 # j = j + 1
i += 1 # i = i + 1
D_argsort = np.argsort(D, axis=1)
[24]:
k = 1
k_best = 2
E_best = 1e10
while k <= 50:
y_pred = y_tr[D_argsort[:, :k]]
y_pred = np.mean(y_pred, axis=1)
E = np.mean(np.abs(y_pred - y_va))
if E < E_best:
E_best = E
k_best = k
k += 1 # k = k + 1
print(f"k={k_best}, E={E:.4f}")
k=8, E=835205.0000
k=11, E=784745.0897 k=8, E=835205.0000
[25]:
D = np.zeros((num_test, num_train))
# val to'plam bo'yicha loop
i = 0
while i < num_test:
# train to'plam bo'yicha loop
j = 0
while j < num_train:
# D[i, j] = np.sum(np.abs(X_te[i] - X_tr[j]))
D[i, j] = np.sqrt(np.sum((X_te[i] - X_tr[j])**2))
j += 1 # j = j + 1
i += 1 # i = i + 1
D_argsort = np.argsort(D, axis=1)
[26]:
y_pred = y_tr[D_argsort[:, :k_best]]
y_pred = np.mean(y_pred, axis=1)
E = np.mean(np.abs(y_pred - y_te))
print(f"k={k_best}, Test xatoligi: {E:.4f}")
k=8, Test xatoligi: 920443.5625
Vazinlashgan KNN
[18]:
D = np.zeros((num_val, num_train))
# val to'plam bo'yicha loop
i = 0
while i < num_val:
# train to'plam bo'yicha loop
j = 0
while j < num_train:
# Manhattan
# D[i, j] = np.sum(np.abs(X_va[i] - X_tr[j]))
# Euclidean
D[i, j] = np.sqrt(np.sum((X_va[i] - X_tr[j])**2))
j += 1 # j = j + 1
i += 1 # i = i + 1
D_argsort = np.argsort(D, axis=1)
[19]:
k_best = 2
E_best = 1e10
k = 2
while k <= 50:
D_k = np.take_along_axis(D, D_argsort, axis=1)[:, :k]
D_k_sum = np.sum(D_k, axis=1, keepdims=True)
D_k_1 = D_k / D_k_sum
D_k_2 = 1 - D_k_1
if k > 1:
w = D_k_2 / (k - 1)
else:
w = D_k_2
y_pred = y_tr[D_argsort[:, :k]]
y_pred = y_pred * w
y_pred = np.sum(y_pred, axis=1)
E = np.mean(np.abs(y_pred - y_va))
if E < E_best:
E_best = E
k_best = k
k += 1 # k = k + 1
print(f"k={k_best}, E={E:.4f}")
k=8, E=834687.4107
k=8, E=784162.1482 k=8, E=834687.4107
[21]:
D = np.zeros((num_test, num_train))
# val to'plam bo'yicha loop
i = 0
while i < num_test:
# train to'plam bo'yicha loop
j = 0
while j < num_train:
# Manhattan
# D[i, j] = np.sum(np.abs(X_te[i] - X_tr[j]))
# Euclidean
D[i, j] = np.sqrt(np.sum((X_te[i] - X_tr[j])**2))
j += 1 # j = j + 1
i += 1 # i = i + 1
D_argsort = np.argsort(D, axis=1)
[22]:
D_k = np.take_along_axis(D, D_argsort, axis=1)[:, :k_best]
D_k_sum = np.sum(D_k, axis=1, keepdims=True)
D_k_1 = D_k / D_k_sum
D_k_2 = 1 - D_k_1
w = D_k_2 / (k - 1)
y_pred = y_tr[D_argsort[:, :k_best]]
y_pred = y_pred * w
y_pred = np.sum(y_pred, axis=1)
E = np.mean(np.abs(y_pred - y_te))
print(f"k={k_best}, Test xatoligi: {E:.4f}")
k=8, Test xatoligi: 3916707.8565