しがないエンジニアのブログ

技術的な内容をメモ代わりにつらつら

ChainerでMNISTの手書き文字認識

9割9分こちらのブログを参照していますので詳しい内容はこちらをご覧ください
【機械学習】ディープラーニング フレームワークChainerを試しながら解説してみる。 - Qiita

ただPythonのバージョンが違ったり一部自分の環境ではうまくいかない部分があったのでそこは少し変更した
特に最後の結合荷重を可視化する部分
chainerのVariableの値のアクセスの仕方がわからずにめっちゃ手間取った。。
ただ.dataって書くだけでいいんだね。。
ってかこれMNISTの画素を正規化するときに使ってるよね。。
もっと勉強します(`・ω・´)ゞ

以下ソース

# -*- coding: utf-8 -*-

import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_mldata
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
import sys

plt.style.use("ggplot")

batchsize = 100
n_epoch = 20
n_units = 1000 # 中間層
pixel_size = 28

# MNISTの画像データDL
print("fetch MNIST dataset")
mnist = fetch_mldata('MNIST original', data_home=".")
# mnist.data : 70,000件の28x28=784次元ベクトルデータ
mnist.data = mnist.data.astype(np.float32)
mnist.data /= 255  # 正規化

# mnist.target : 正解データ
mnist.target = mnist.target.astype(np.int32)

# 学習用データN個,検証用データを残りの個数に設定
N = 60000
x_train, x_test = np.split(mnist.data,      [N])
y_train, y_test = np.split(mnist.target,    [N])
N_test = y_test.size

# MLP モデル
# 入力784, 出力10
model = FunctionSet(l1 = F.Linear(784, n_units),
                    l2 = F.Linear(n_units, n_units),
                    l3 = F.Linear(n_units, 10))

# NNの構造
def forward(x_data, y_data, train = True):
    x, t = Variable(x_data), Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)), train = train)
    h2 = F.dropout(F.relu(model.l2(h1)), train = train)
    y = model.l3(h2)
    
    # 多クラス分類なのでsoftmax関数の交差エントロピー関数を誤差関数とする
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)

# optimizerの設定
optimizer = optimizers.Adam()
optimizer.setup(model)

# train and show results
train_loss = []
train_acc = []
test_loss = []
test_acc = []

l1_W = []
l2_W = []
l3_W = []

# Learning loop
for epoch in range(1, n_epoch + 1):
    print("epoch", epoch)
    
    # training
    # Nこの順番をランダムに並び替える
    perm = np.random.permutation(N)
    sum_accuracy = 0
    sum_loss = 0
    
    # 0~Nまでのデータをバッチサイズごとに使って学習
    for i in range(0, N, batchsize):
        x_batch = x_train[perm[i:i+batchsize]]
        y_batch = y_train[perm[i:i+batchsize]]
        
        # 勾配を初期化
        optimizer.zero_grads()
        # 順伝播させて誤差と精度を算出
        loss, acc = forward(x_batch, y_batch)
        # 誤差逆伝播で勾配を計算
        loss.backward()
        optimizer.update()
        
        train_loss.append(loss.data)
        train_acc.append(acc.data)
        sum_loss     += float(cuda.to_cpu(loss.data)) * batchsize
        sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize
        
    # 訓練データの誤差と正解精度を表示
    print("train mean loss = {0}, accuracy = {1}".format(sum_loss / N, sum_accuracy / N))
    
    # evaluation
    # テストデータで誤差と正解精度を算出し汎化性能を確認
    sum_accuracy = 0
    sum_loss = 0
    for i in range(0, N_test, batchsize):
        x_batch = x_test[i:i+batchsize]
        y_batch = y_test[i:i+batchsize]
        
        # 順伝播させて誤差と精度を算出
        loss, acc = forward(x_batch, y_batch, train = False)
        
        test_loss.append(loss.data)
        test_acc.append(acc.data)
        sum_loss     += float(cuda.to_cpu(loss.data)) * batchsize
        sum_accuracy += float(cuda.to_cpu(acc.data)) * batchsize
        
    # テストデータの誤差と正解精度を表示
    print("test  mean loss = {0}, accuracy = {1}".format(sum_loss / N_test, sum_accuracy / N_test))
    
    # 学習したパラメータを保存
    # model.l1.W : 1000行28x28列(1行に中間ユニット1つにかかるweightのリスト)
    l1_W.append(model.l1.W)
    l2_W.append(model.l2.W)
    l3_W.append(model.l3.W)

# 精度と誤差をグラフ描画
plt.figure(figsize = (8, 6))
plt.plot(range(len(train_acc)), train_acc)
plt.plot(range(len(test_acc)), test_acc)
plt.legend(["train_acc", "test_acc"], loc = 4)
plt.title("Accuracy of MNIST recognition.")
plt.plot()

# 答え合わせ
plt.style.use("fivethirtyeight")

def draw_digit3(data, n, ans, recog):
    plt.subplot(10, 10, n)
    Z = data.reshape(pixel_size, pixel_size)
    Z = Z[::-1]
    plt.xlim(0, pixel_size - 1)
    plt.ylim(0, pixel_size - 1)
    plt.pcolor(Z)
    plt.title("ans = {0}, recog = {1}".format(ans, recog), size = 8)
    plt.gray()
    plt.tick_params(labelbottom = "off")
    plt.tick_params(labelleft = "off")

plt.figure(figsize = (15, 15))

# ランダムにサンプルを100個抽出して答え合わせ
cnt = 0
for idx in np.random.permutation(N)[:100]:
    xxx = x_train[idx].astype(np.float32)
    h1 = F.dropout(F.relu(model.l1(Variable(xxx.reshape(1, 784)))), train = False)
    h2 = F.dropout(F.relu(model.l2(h1)), train = False)
    y = model.l3(h2)
    cnt += 1
    draw_digit3(x_train[idx], cnt, y_train[idx], np.argmax(y.data))
plt.show()

# 1層目のパラメータwの可視化
def draw_digit2(data, n, i):
    plt.subplot(10, 10, n)
    Z = data.reshape(pixel_size, pixel_size)
    Z = Z[::-1]
    plt.xlim(0, pixel_size - 1)
    plt.ylim(0, pixel_size - 1)
    plt.imshow(Z)
    plt.title("{0}".format(i), size = 9)
    plt.gray()
    plt.tick_params(labelbottom = "off")
    plt.tick_params(labelleft = "off")

plt.figure(figsize = (10, 10))
cnt = 1
for i in np.random.permutation(1000)[:100]:
    draw_digit2(l1_W[len(l1_W)-1][i].data, cnt, i)
    cnt += 1
plt.show()

# 出力層のパラメータw可視化
def draw_digit(data, n, i):
    size = 32
    plt.subplot(4, 4, n)
    data = np.r_[data, np.zeros(24)]
    Z = data.reshape(size, size)
    Z = Z[::-1]
    plt.xlim(0, size - 1)
    plt.ylim(0, size - 1)
    plt.imshow(Z)
    plt.title("{0}".format(i), size = 9)
    plt.gray()
    plt.tick_params(labelbottom = "off")
    plt.tick_params(labelleft = "off")

plt.figure(figsize = (10, 10))
cnt = 1
for i in range(10):
    draw_digit(l3_W[len(l3_W)-1][i].data, cnt, i)
    cnt += 1
plt.show()

以下結果

fetch MNIST dataset
epoch 1
train mean loss = 0.2787790427667399, accuracy = 0.9150833353027701
test  mean loss = 0.10822272229357623, accuracy = 0.9668000030517578
epoch 2
train mean loss = 0.1389483412137876, accuracy = 0.957716670135657
test  mean loss = 0.08707395097822883, accuracy = 0.9714000058174134
epoch 3
train mean loss = 0.10811293963342905, accuracy = 0.9656666721900304
test  mean loss = 0.07187424160743831, accuracy = 0.9772000056505203
epoch 4
train mean loss = 0.09616058537581315, accuracy = 0.9697166734933853
test  mean loss = 0.07086936612904537, accuracy = 0.9782000070810318
epoch 5
train mean loss = 0.08106055756487573, accuracy = 0.9747833427786827
test  mean loss = 0.0662240801162261, accuracy = 0.9793000066280365
epoch 6
train mean loss = 0.07567919471378749, accuracy = 0.9763500096400579
test  mean loss = 0.06575668895995478, accuracy = 0.9814000076055527
epoch 7
train mean loss = 0.07104070158597703, accuracy = 0.977166676223278
test  mean loss = 0.0631874037714806, accuracy = 0.9805000078678131
epoch 8
train mean loss = 0.06714391826031109, accuracy = 0.9793666768074035
test  mean loss = 0.06452341404175968, accuracy = 0.9805000078678131
epoch 9
train mean loss = 0.06316371884934294, accuracy = 0.9803000109394392
test  mean loss = 0.0631298817406696, accuracy = 0.9821000075340272
epoch 10
train mean loss = 0.0600296038959641, accuracy = 0.9810166761279107
test  mean loss = 0.0611742366065846, accuracy = 0.9823000073432923
epoch 11
train mean loss = 0.0584367322831531, accuracy = 0.9816666774948438
test  mean loss = 0.06964464468501888, accuracy = 0.9815000051259994
epoch 12
train mean loss = 0.054180695233905375, accuracy = 0.9829833447933197
test  mean loss = 0.06639763172657985, accuracy = 0.9812000066041946
epoch 13
train mean loss = 0.05192086359709113, accuracy = 0.9838833445310593
test  mean loss = 0.06607260807076272, accuracy = 0.9822000050544739
epoch 14
train mean loss = 0.05493649407134702, accuracy = 0.9837666768829028
test  mean loss = 0.05558266793857001, accuracy = 0.9854000079631805
epoch 15
train mean loss = 0.048626399218822676, accuracy = 0.9845833440621694
test  mean loss = 0.07036473294138702, accuracy = 0.9816000062227249
epoch 16
train mean loss = 0.046052305203678166, accuracy = 0.9858000108599663
test  mean loss = 0.06581959919883161, accuracy = 0.9840000051259995
epoch 17
train mean loss = 0.05029411458274505, accuracy = 0.9848000103235245
test  mean loss = 0.06846846006497487, accuracy = 0.9842000043392182
epoch 18
train mean loss = 0.04607011582018458, accuracy = 0.9864500098427137
test  mean loss = 0.05997714452689252, accuracy = 0.9852000069618225
epoch 19
train mean loss = 0.047857422590993036, accuracy = 0.9860500098268191
test  mean loss = 0.06725714415920947, accuracy = 0.9841000056266784
epoch 20
train mean loss = 0.04663874042914055, accuracy = 0.986116677025954
test  mean loss = 0.06504758008612953, accuracy = 0.9850000077486039

f:id:turgure:20160804010050p:plain f:id:turgure:20160804010055p:plain f:id:turgure:20160804010101p:plain f:id:turgure:20160804010108p:plain

はてなブログでソースコードをハイライトする方法 -Markdown編-

前に書いた自分の記事でははてな記法について書きました
はてなブログでソースコードをハイライトする方法 - しがないプログラマ(仮)のブログ

今回はMarkdown記法をメモ
バッククォート「`」を使う
日本語キーボードだとShift + @で出せるかな

 ```python
 print "Hello, world!"
 ```

のように書くと

print "Hello, world!"

と表示される
pythonの部分はcppとかすれば別の言語でも書ける
書かなくてもよい

また、文中でも同じやり方でhogehogeって書ける
バッククォート1個でもfugafugaって書けるらしい
はてな記法だとやり方わからないけどこっちならできるっぽい
言語の指定はできないのかな…?

参考URL

Pythonでの演算子のオーバーロード

Pythonにも演算子オーバーロードあるって知ったので、2次元ベクトルクラス作ってみた
Python3系で書いたけど2系でもいけるよね?

# -*- coding: utf-8 -*-

class Vec2D:
	def __init__(self, x, y):
		self.x = x
		self.y = y
	
	def __str__(self):
		return str(list((self.x, self.y)))
		
	def __eq__(self, other):
		return self.x == other.x and self.y == other.y
	def __ne__(self, other):
		return self.x != other.x or self.y != other.y
		
	def __add__(self, other):
		return Vec2D(self.x + other.x, self.x + other.y)
	def __iadd__(self, other):
		self.x += other.x
		self.y += other.y
		return self
		
	def __sub__(self, other):
		return Vec2D(self.x - other.x, self.y - other.y)
	def __isub__(self, other):
		self.x -= other.x
		self.y -= other.y
		return self
		
	def __mul__(self, other):
		return Vec2D(self.x * other, self.y * other)
	def __imul__(self, other):
		self.x *= other
		self.y *= other
		return self
		
	def __div__(self, other):
		return Vec2D(self.x / other, self.y / other)
	def __truediv__(self, other):
		return Vec2D(self.x / other, self.y / other)
	def __idiv__(self, other):
		self.x /= other
		self.y /= other
		return self

p1 = Vec2D(1, 2)
p2 = Vec2D(3, 4)
p3 = Vec2D(1, 2)

print("p1 :", p1)
print("p2 :", p2)
print("p3 :", p3)

print("p1 == p2 :", p1 == p2)
print("p1 == p3 :", p1 == p3)
print("p1 is p2 :", p1 is p2)
print("p1 is p3 :", p1 is p3)

print("p1 + p2 :", p1 + p2)
print("p1 - p2 :", p1 - p2)
p1 += p2
print("p1 += p2(p1) :", p1)
p1 -= p2
print("p1 -= p2(p1) :", p1)

print("p1 * 2 :", p1 * 2)
print("p1 / 2 :", p1 / 2)
p1 *= 2
print("p1 *= 2(p1) :", p1)
p1 /= 2
print("p1 /= 2(p1) :", p1)

下半分がテストコード
基本的なところしか実装してないけどまあこれでも使えるでしょ
演算結果はリストで吐き出しています

割り算は __div__() じゃなくて __truediv__() を定義しないとだめでした
文字の途中のハイライトわかんないのでそのままで許して

実行結果はこんな感じ

p1 : [1, 2]
p2 : [3, 4]
p3 : [1, 2]
p1 == p2 : False
p1 == p3 : True
p1 is p2 : False
p1 is p3 : False
p1 + p2 : [4, 5]
p1 - p2 : [-2, -2]
p1 += p2(p1) : [4, 6]
p1 -= p2(p1) : [1, 2]
p1 * 2 : [2, 4]
p1 / 2 : [0.5, 1.0]
p1 *= 2(p1) : [2, 4]
p1 /= 2(p1) : [1.0, 2.0]


参考URL: