Ex_treme's blog.

神经元实现(二分类logistic回归模型实现)(2)

2018/11/13 Share
  • 调整了数据集的测试代码
  • 新增了训练的测试代码

数据集的测试代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def load_data(filename):
"""read date from data file."""
with open(filename,'rb') as f:
data = pickle.load(f,encoding='bytes')
return data[b'data'],data[b'labels']

class CifarData:
def __init__(self,filenames,need_shuffle):
all_data = []
all_labels = []
for filename in filenames:
data,labels = load_data(filename)
for item,label in zip(data,labels):
if label in [0,1]:
all_data.append(item)
all_labels.append(label)
self._data = np.vstack(all_data)
self._data = self._data / 127.5 -1
self._labels = np.hstack(all_labels)
print(self._data.shape)
print(self._labels.shape)

self._num_examples = self._data.shape[0]
self._need_shuffle = need_shuffle
self._indicator = 0
if self._need_shuffle:
self._shuffle_data()

def _shuffle_data(self):
# [0,1,2,3,4,5] - > [5,3,2,4,0,1]
p = np.random.permutation(self._num_examples)
self._data = self._data[p]
self._labels = self._labels[p]

def next_batch(self, batch_size):
"""return batch_size examples as a batch."""
end_indicator = self._indicator + batch_size
if end_indicator > self._num_examples:
if self._need_shuffle:
self._shuffle_data()
self._indicator = 0
end_indicator = batch_size
else:
raise Exception("have no more examples")
if end_indicator > self._num_examples:
raise Exception("batch size is larger than all examples")
batch_data = self._data[self._indicator:end_indicator]
batch_labels = self._labels[self._indicator: end_indicator]
self._indicator = end_indicator
return batch_data, batch_labels

train_filenames = [os.path.join(CIFAR_DIR,'data_batch_%d' % i ) for i in range(1,6)]
test_filenames = [os.path.join(CIFAR_DIR,'test_batch')]

train_data = CifarData(train_filenames,True)
test_data = CifarData(test_filenames,False)

# batch_data,batch_labels = train_data.next_batch(10)
# print(batch_data)
# print(batch_labels)

(10000, 3072)

(10000,)

(2000, 3072)

(2000,)

新增了训练的测试代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
init = tf.global_variables_initializer()
batch_size = 20
train_steps = 100000
test_steps = 100

with tf.Session() as sess:
sess.run(init)
for i in range(train_steps):
batch_data,batch_labels = train_data.next_batch(batch_size)
loss_val, accu_val, _ = sess.run(
[loss,accuracy,train_op],
feed_dict={
x: batch_data,
y: batch_labels})
if (i+1) % 500 == 0:
print('[Train] Step: %d, loss:%4.5f, acc: %4.5f'
% (i+1,loss_val,accu_val))
if (i+1) % 5000 == 0:
test_data = CifarData(test_filenames,False)
all_test_acc_val = []
for j in range(test_steps):
test_batch_data, test_batch_labels = test_data.next_batch(batch_size)
test_acc_val = sess.run(
[accuracy],
feed_dict={
x: test_batch_data,
y: test_batch_labels})
all_test_acc_val.append(test_acc_val)
test_acc = np.mean(all_test_acc_val)
print("[Test] Step: %d, acc: %4.5f" % (i+1,test_acc))

训练结果:

[Train] Step: 500, loss:0.34972, acc: 0.65000
[Train] Step: 1000, loss:0.25081, acc: 0.75000
[Train] Step: 1500, loss:0.32387, acc: 0.65000
[Train] Step: 2000, loss:0.19776, acc: 0.80000
[Train] Step: 2500, loss:0.15003, acc: 0.85000
[Train] Step: 3000, loss:0.13229, acc: 0.85000
[Train] Step: 3500, loss:0.20558, acc: 0.80000
[Train] Step: 4000, loss:0.29615, acc: 0.70000
[Train] Step: 4500, loss:0.08947, acc: 0.90000
[Train] Step: 5000, loss:0.24596, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 5000, acc: 0.79400
[Train] Step: 5500, loss:0.15080, acc: 0.85000
[Train] Step: 6000, loss:0.13784, acc: 0.85000
[Train] Step: 6500, loss:0.32210, acc: 0.65000
[Train] Step: 7000, loss:0.19997, acc: 0.80000
[Train] Step: 7500, loss:0.30646, acc: 0.65000
[Train] Step: 8000, loss:0.29746, acc: 0.70000
[Train] Step: 8500, loss:0.34132, acc: 0.65000
[Train] Step: 9000, loss:0.24997, acc: 0.75000
[Train] Step: 9500, loss:0.01327, acc: 1.00000
[Train] Step: 10000, loss:0.10051, acc: 0.90000
(2000, 3072)
(2000,)
[Test] Step: 10000, acc: 0.80700
[Train] Step: 10500, loss:0.15171, acc: 0.85000
[Train] Step: 11000, loss:0.16315, acc: 0.80000
[Train] Step: 11500, loss:0.03247, acc: 0.95000
[Train] Step: 12000, loss:0.11663, acc: 0.85000
[Train] Step: 12500, loss:0.15000, acc: 0.85000
[Train] Step: 13000, loss:0.15000, acc: 0.85000
[Train] Step: 13500, loss:0.12864, acc: 0.85000
[Train] Step: 14000, loss:0.10034, acc: 0.90000
[Train] Step: 14500, loss:0.15703, acc: 0.85000
[Train] Step: 15000, loss:0.23820, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 15000, acc: 0.81350
[Train] Step: 15500, loss:0.23760, acc: 0.75000
[Train] Step: 16000, loss:0.31062, acc: 0.70000
[Train] Step: 16500, loss:0.21815, acc: 0.75000
[Train] Step: 17000, loss:0.10000, acc: 0.90000
[Train] Step: 17500, loss:0.11240, acc: 0.85000
[Train] Step: 18000, loss:0.19982, acc: 0.80000
[Train] Step: 18500, loss:0.10140, acc: 0.90000
[Train] Step: 19000, loss:0.14327, acc: 0.80000
[Train] Step: 19500, loss:0.34986, acc: 0.65000
[Train] Step: 20000, loss:0.19997, acc: 0.80000
(2000, 3072)
(2000,)
[Test] Step: 20000, acc: 0.81400
[Train] Step: 20500, loss:0.15000, acc: 0.85000
[Train] Step: 21000, loss:0.05008, acc: 0.95000
[Train] Step: 21500, loss:0.10000, acc: 0.90000
[Train] Step: 22000, loss:0.20128, acc: 0.80000
[Train] Step: 22500, loss:0.10240, acc: 0.90000
[Train] Step: 23000, loss:0.06212, acc: 0.95000
[Train] Step: 23500, loss:0.15000, acc: 0.85000
[Train] Step: 24000, loss:0.15001, acc: 0.85000
[Train] Step: 24500, loss:0.09032, acc: 0.90000
[Train] Step: 25000, loss:0.11498, acc: 0.85000
(2000, 3072)
(2000,)
[Test] Step: 25000, acc: 0.81500
[Train] Step: 25500, loss:0.21641, acc: 0.75000
[Train] Step: 26000, loss:0.29773, acc: 0.70000
[Train] Step: 26500, loss:0.19523, acc: 0.80000
[Train] Step: 27000, loss:0.14999, acc: 0.85000
[Train] Step: 27500, loss:0.29387, acc: 0.70000
[Train] Step: 28000, loss:0.09986, acc: 0.90000
[Train] Step: 28500, loss:0.20017, acc: 0.80000
[Train] Step: 29000, loss:0.10000, acc: 0.90000
[Train] Step: 29500, loss:0.06900, acc: 0.90000
[Train] Step: 30000, loss:0.20000, acc: 0.80000
(2000, 3072)
(2000,)
[Test] Step: 30000, acc: 0.81400
[Train] Step: 30500, loss:0.29999, acc: 0.70000
[Train] Step: 31000, loss:0.10000, acc: 0.90000
[Train] Step: 31500, loss:0.28020, acc: 0.70000
[Train] Step: 32000, loss:0.23061, acc: 0.75000
[Train] Step: 32500, loss:0.09479, acc: 0.90000
[Train] Step: 33000, loss:0.15682, acc: 0.80000
[Train] Step: 33500, loss:0.20000, acc: 0.80000
[Train] Step: 34000, loss:0.10102, acc: 0.90000
[Train] Step: 34500, loss:0.10090, acc: 0.90000
[Train] Step: 35000, loss:0.20003, acc: 0.80000
(2000, 3072)
(2000,)
[Test] Step: 35000, acc: 0.81150
[Train] Step: 35500, loss:0.20290, acc: 0.80000
[Train] Step: 36000, loss:0.05014, acc: 0.95000
[Train] Step: 36500, loss:0.20018, acc: 0.80000
[Train] Step: 37000, loss:0.25043, acc: 0.75000
[Train] Step: 37500, loss:0.11776, acc: 0.85000
[Train] Step: 38000, loss:0.20055, acc: 0.80000
[Train] Step: 38500, loss:0.30000, acc: 0.70000
[Train] Step: 39000, loss:0.20100, acc: 0.80000
[Train] Step: 39500, loss:0.05023, acc: 0.95000
[Train] Step: 40000, loss:0.05000, acc: 0.95000
(2000, 3072)
(2000,)
[Test] Step: 40000, acc: 0.81500
[Train] Step: 40500, loss:0.10000, acc: 0.90000
[Train] Step: 41000, loss:0.29134, acc: 0.70000
[Train] Step: 41500, loss:0.10074, acc: 0.90000
[Train] Step: 42000, loss:0.34999, acc: 0.65000
[Train] Step: 42500, loss:0.15017, acc: 0.85000
[Train] Step: 43000, loss:0.10000, acc: 0.90000
[Train] Step: 43500, loss:0.20008, acc: 0.80000
[Train] Step: 44000, loss:0.34917, acc: 0.65000
[Train] Step: 44500, loss:0.05077, acc: 0.95000
[Train] Step: 45000, loss:0.16136, acc: 0.85000
(2000, 3072)
(2000,)
[Test] Step: 45000, acc: 0.81750
[Train] Step: 45500, loss:0.10502, acc: 0.90000
[Train] Step: 46000, loss:0.10036, acc: 0.90000
[Train] Step: 46500, loss:0.25016, acc: 0.75000
[Train] Step: 47000, loss:0.25000, acc: 0.75000
[Train] Step: 47500, loss:0.11138, acc: 0.90000
[Train] Step: 48000, loss:0.15029, acc: 0.85000
[Train] Step: 48500, loss:0.20625, acc: 0.80000
[Train] Step: 49000, loss:0.15002, acc: 0.85000
[Train] Step: 49500, loss:0.13137, acc: 0.85000
[Train] Step: 50000, loss:0.20007, acc: 0.80000
(2000, 3072)
(2000,)
[Test] Step: 50000, acc: 0.81350
[Train] Step: 50500, loss:0.25050, acc: 0.75000
[Train] Step: 51000, loss:0.16586, acc: 0.80000
[Train] Step: 51500, loss:0.05447, acc: 0.95000
[Train] Step: 52000, loss:0.09779, acc: 0.90000
[Train] Step: 52500, loss:0.09993, acc: 0.90000
[Train] Step: 53000, loss:0.10002, acc: 0.90000
[Train] Step: 53500, loss:0.24999, acc: 0.75000
[Train] Step: 54000, loss:0.09413, acc: 0.90000
[Train] Step: 54500, loss:0.05066, acc: 0.95000
[Train] Step: 55000, loss:0.06452, acc: 0.95000
(2000, 3072)
(2000,)
[Test] Step: 55000, acc: 0.81650
[Train] Step: 55500, loss:0.15000, acc: 0.85000
[Train] Step: 56000, loss:0.10002, acc: 0.90000
[Train] Step: 56500, loss:0.16470, acc: 0.80000
[Train] Step: 57000, loss:0.15289, acc: 0.85000
[Train] Step: 57500, loss:0.14925, acc: 0.85000
[Train] Step: 58000, loss:0.00004, acc: 1.00000
[Train] Step: 58500, loss:0.17873, acc: 0.80000
[Train] Step: 59000, loss:0.10021, acc: 0.90000
[Train] Step: 59500, loss:0.15005, acc: 0.85000
[Train] Step: 60000, loss:0.25000, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 60000, acc: 0.81350
[Train] Step: 60500, loss:0.20056, acc: 0.80000
[Train] Step: 61000, loss:0.18156, acc: 0.80000
[Train] Step: 61500, loss:0.05029, acc: 0.95000
[Train] Step: 62000, loss:0.19842, acc: 0.80000
[Train] Step: 62500, loss:0.20000, acc: 0.80000
[Train] Step: 63000, loss:0.15062, acc: 0.85000
[Train] Step: 63500, loss:0.15000, acc: 0.85000
[Train] Step: 64000, loss:0.15163, acc: 0.85000
[Train] Step: 64500, loss:0.15050, acc: 0.85000
[Train] Step: 65000, loss:0.24990, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 65000, acc: 0.81650
[Train] Step: 65500, loss:0.05177, acc: 0.95000
[Train] Step: 66000, loss:0.25148, acc: 0.75000
[Train] Step: 66500, loss:0.25219, acc: 0.75000
[Train] Step: 67000, loss:0.01503, acc: 1.00000
[Train] Step: 67500, loss:0.14373, acc: 0.85000
[Train] Step: 68000, loss:0.10028, acc: 0.90000
[Train] Step: 68500, loss:0.10000, acc: 0.90000
[Train] Step: 69000, loss:0.15000, acc: 0.85000
[Train] Step: 69500, loss:0.10036, acc: 0.90000
[Train] Step: 70000, loss:0.20056, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 70000, acc: 0.81350
[Train] Step: 70500, loss:0.10613, acc: 0.90000
[Train] Step: 71000, loss:0.20001, acc: 0.80000
[Train] Step: 71500, loss:0.00083, acc: 1.00000
[Train] Step: 72000, loss:0.05001, acc: 0.95000
[Train] Step: 72500, loss:0.25088, acc: 0.75000
[Train] Step: 73000, loss:0.05021, acc: 0.95000
[Train] Step: 73500, loss:0.05031, acc: 0.95000
[Train] Step: 74000, loss:0.23307, acc: 0.75000
[Train] Step: 74500, loss:0.20002, acc: 0.80000
[Train] Step: 75000, loss:0.15080, acc: 0.85000
(2000, 3072)
(2000,)
[Test] Step: 75000, acc: 0.81400
[Train] Step: 75500, loss:0.10167, acc: 0.90000
[Train] Step: 76000, loss:0.14995, acc: 0.85000
[Train] Step: 76500, loss:0.15019, acc: 0.85000
[Train] Step: 77000, loss:0.10022, acc: 0.90000
[Train] Step: 77500, loss:0.00061, acc: 1.00000
[Train] Step: 78000, loss:0.20023, acc: 0.80000
[Train] Step: 78500, loss:0.22279, acc: 0.75000
[Train] Step: 79000, loss:0.20009, acc: 0.80000
[Train] Step: 79500, loss:0.10502, acc: 0.90000
[Train] Step: 80000, loss:0.21905, acc: 0.75000
(2000, 3072)
(2000,)
[Test] Step: 80000, acc: 0.81250
[Train] Step: 80500, loss:0.20000, acc: 0.80000
[Train] Step: 81000, loss:0.10098, acc: 0.90000
[Train] Step: 81500, loss:0.15061, acc: 0.85000
[Train] Step: 82000, loss:0.00301, acc: 1.00000
[Train] Step: 82500, loss:0.05492, acc: 0.95000
[Train] Step: 83000, loss:0.10396, acc: 0.90000
[Train] Step: 83500, loss:0.16083, acc: 0.85000
[Train] Step: 84000, loss:0.14995, acc: 0.85000
[Train] Step: 84500, loss:0.10011, acc: 0.90000
[Train] Step: 85000, loss:0.14943, acc: 0.85000
(2000, 3072)
(2000,)
[Test] Step: 85000, acc: 0.80850
[Train] Step: 85500, loss:0.15764, acc: 0.85000
[Train] Step: 86000, loss:0.10340, acc: 0.90000
[Train] Step: 86500, loss:0.09898, acc: 0.90000
[Train] Step: 87000, loss:0.20000, acc: 0.80000
[Train] Step: 87500, loss:0.10002, acc: 0.90000
[Train] Step: 88000, loss:0.05000, acc: 0.95000
[Train] Step: 88500, loss:0.15000, acc: 0.85000
[Train] Step: 89000, loss:0.20583, acc: 0.80000
[Train] Step: 89500, loss:0.15086, acc: 0.85000
[Train] Step: 90000, loss:0.19963, acc: 0.80000
(2000, 3072)
(2000,)
[Test] Step: 90000, acc: 0.80950
[Train] Step: 90500, loss:0.20024, acc: 0.80000
[Train] Step: 91000, loss:0.20210, acc: 0.80000
[Train] Step: 91500, loss:0.10000, acc: 0.90000
[Train] Step: 92000, loss:0.20000, acc: 0.80000
[Train] Step: 92500, loss:0.10005, acc: 0.90000
[Train] Step: 93000, loss:0.34990, acc: 0.65000
[Train] Step: 93500, loss:0.12632, acc: 0.85000
[Train] Step: 94000, loss:0.20098, acc: 0.80000
[Train] Step: 94500, loss:0.15001, acc: 0.85000
[Train] Step: 95000, loss:0.15000, acc: 0.85000
(2000, 3072)
(2000,)
[Test] Step: 95000, acc: 0.81250
[Train] Step: 95500, loss:0.15014, acc: 0.85000
[Train] Step: 96000, loss:0.20001, acc: 0.80000
[Train] Step: 96500, loss:0.20008, acc: 0.80000
[Train] Step: 97000, loss:0.09981, acc: 0.90000
[Train] Step: 97500, loss:0.10011, acc: 0.90000
[Train] Step: 98000, loss:0.10008, acc: 0.90000
[Train] Step: 98500, loss:0.15029, acc: 0.85000
[Train] Step: 99000, loss:0.10099, acc: 0.90000
[Train] Step: 99500, loss:0.05000, acc: 0.95000
[Train] Step: 100000, loss:0.05000, acc: 0.95000
(2000, 3072)
(2000,)
[Test] Step: 100000, acc: 0.81150
CATALOG
  1. 1. 数据集的测试代码
  2. 2. 新增了训练的测试代码