{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using Theano backend.\n", "Using gpu device 0: GeForce GT625M (CNMeM is disabled, cuDNN not available)\n" ] } ], "source": [ "import numpy as np\n", "\n", "from keras.models import Sequential\n", "from keras.layers import Dense, Activation" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## sequential" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "1000/1000 [==============================] - 0s - loss: 2.9005 - acc: 0.5010 \n", "Epoch 2/10\n", "1000/1000 [==============================] - 0s - loss: 4.4020 - acc: 0.3610 \n", "Epoch 3/10\n", "1000/1000 [==============================] - 0s - loss: 7.2061 - acc: 0.5130 \n", "Epoch 4/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 5/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 6/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 7/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 8/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 9/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "Epoch 10/10\n", "1000/1000 [==============================] - 0s - loss: 7.7480 - acc: 0.5140 \n", "CPU times: user 1.7 s, sys: 416 ms, total: 2.12 s\n", "Wall time: 1min 14s\n" ] } ], "source": [ "%%time\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='tanh'))\n", "model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=10,batch_size=32)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "1000/1000 [==============================] - 0s - loss: 4.2552 - acc: 0.4020 \n", "Epoch 2/10\n", "1000/1000 [==============================] - 0s - loss: 5.8849 - acc: 0.3460 \n", "Epoch 3/10\n", "1000/1000 [==============================] - 0s - loss: 3.8482 - acc: 0.4740 \n", "Epoch 4/10\n", "1000/1000 [==============================] - 0s - loss: 2.6821 - acc: 0.5090 \n", "Epoch 5/10\n", "1000/1000 [==============================] - 0s - loss: 2.6482 - acc: 0.4820 \n", "Epoch 6/10\n", "1000/1000 [==============================] - 0s - loss: 2.8464 - acc: 0.4740 \n", "Epoch 7/10\n", "1000/1000 [==============================] - 0s - loss: 3.3461 - acc: 0.4530 \n", "Epoch 8/10\n", "1000/1000 [==============================] - 0s - loss: 3.3146 - acc: 0.4630 \n", "Epoch 9/10\n", "1000/1000 [==============================] - 0s - loss: 3.6025 - acc: 0.4620 \n", "Epoch 10/10\n", "1000/1000 [==============================] - 0s - loss: 2.3228 - acc: 0.5060 \n", "CPU times: user 1.46 s, sys: 212 ms, total: 1.68 s\n", "Wall time: 17.8 s\n" ] } ], "source": [ "%%time\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='linear'))\n", "model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=10,batch_size=32)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 2/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 3/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 4/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 5/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 6/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 7/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 8/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 9/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 10/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 11/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 12/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 13/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 14/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 15/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 16/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 17/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 18/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 19/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 20/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 21/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 22/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 23/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 24/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 25/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 26/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 27/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 28/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 29/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 30/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 31/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 32/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 33/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 34/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 35/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 36/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 37/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 38/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 39/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 40/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 41/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 42/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 43/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 44/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 45/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 46/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 47/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 48/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 49/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "Epoch 50/50\n", "1000/1000 [==============================] - 0s - loss: 7.8817 - acc: 0.5110 \n", "CPU times: user 3.9 s, sys: 612 ms, total: 4.52 s\n", "Wall time: 14.7 s\n" ] } ], "source": [ "%%time\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='relu'))\n", "model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=50,batch_size=32)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "1000/1000 [==============================] - 0s - loss: 0.7277 - acc: 0.5130 \n", "Epoch 2/10\n", "1000/1000 [==============================] - 0s - loss: 0.7125 - acc: 0.5300 \n", "Epoch 3/10\n", "1000/1000 [==============================] - 0s - loss: 0.7040 - acc: 0.5030 \n", "Epoch 4/10\n", "1000/1000 [==============================] - 0s - loss: 0.7012 - acc: 0.5450 \n", "Epoch 5/10\n", "1000/1000 [==============================] - 0s - loss: 0.6945 - acc: 0.5410 \n", "Epoch 6/10\n", "1000/1000 [==============================] - 0s - loss: 0.6837 - acc: 0.5570 \n", "Epoch 7/10\n", "1000/1000 [==============================] - 0s - loss: 0.6777 - acc: 0.5680 \n", "Epoch 8/10\n", "1000/1000 [==============================] - 0s - loss: 0.6756 - acc: 0.5870 \n", "Epoch 9/10\n", "1000/1000 [==============================] - 0s - loss: 0.6645 - acc: 0.6030 \n", "Epoch 10/10\n", "1000/1000 [==============================] - 0s - loss: 0.6560 - acc: 0.6110 \n", "CPU times: user 1.5 s, sys: 188 ms, total: 1.68 s\n", "Wall time: 11.3 s\n" ] } ], "source": [ "%%time\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='sigmoid'))\n", "model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=10,batch_size=32)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "1000/1000 [==============================] - 0s - loss: 0.7120 - acc: 0.5130 \n", "Epoch 2/10\n", "1000/1000 [==============================] - 0s - loss: 0.6932 - acc: 0.5540 \n", "Epoch 3/10\n", "1000/1000 [==============================] - 0s - loss: 0.6892 - acc: 0.5550 \n", "Epoch 4/10\n", "1000/1000 [==============================] - 0s - loss: 0.6840 - acc: 0.5790 \n", "Epoch 5/10\n", "1000/1000 [==============================] - 0s - loss: 0.6763 - acc: 0.5750 \n", "Epoch 6/10\n", "1000/1000 [==============================] - 0s - loss: 0.6795 - acc: 0.5670 \n", "Epoch 7/10\n", "1000/1000 [==============================] - 0s - loss: 0.6643 - acc: 0.5960 \n", "Epoch 8/10\n", "1000/1000 [==============================] - 0s - loss: 0.6600 - acc: 0.5970 \n", "Epoch 9/10\n", "1000/1000 [==============================] - 0s - loss: 0.6530 - acc: 0.6080 \n", "Epoch 10/10\n", "1000/1000 [==============================] - 0s - loss: 0.6490 - acc: 0.6230 \n", "CPU times: user 1.54 s, sys: 220 ms, total: 1.76 s\n", "Wall time: 13.7 s\n" ] } ], "source": [ "%%time\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='hard_sigmoid'))\n", "model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=10,batch_size=32)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/10\n", "1000/1000 [==============================] - 0s - loss: 0.8269 - acc: 0.4830 \n", "Epoch 2/10\n", "1000/1000 [==============================] - 0s - loss: 0.7506 - acc: 0.5540 \n", "Epoch 3/10\n", "1000/1000 [==============================] - 0s - loss: 0.6852 - acc: 0.6180 \n", "Epoch 4/10\n", "1000/1000 [==============================] - 0s - loss: 0.6625 - acc: 0.6510 \n", "Epoch 5/10\n", "1000/1000 [==============================] - 0s - loss: 0.6564 - acc: 0.6460 \n", "Epoch 6/10\n", "1000/1000 [==============================] - 0s - loss: 0.6392 - acc: 0.6700 \n", "Epoch 7/10\n", "1000/1000 [==============================] - 1s - loss: 0.6369 - acc: 0.6810 \n", "Epoch 8/10\n", "1000/1000 [==============================] - 1s - loss: 0.6046 - acc: 0.7180 \n", "Epoch 9/10\n", "1000/1000 [==============================] - 0s - loss: 0.6090 - acc: 0.7220 \n", "Epoch 10/10\n", "1000/1000 [==============================] - 1s - loss: 0.6131 - acc: 0.7120 \n", "CPU times: user 9.64 s, sys: 1.76 s, total: 11.4 s\n", "Wall time: 26.9 s\n" ] } ], "source": [ "%%time\n", "\n", "from keras.regularizers import l1,l2,l1l2, activity_l2\n", "\n", "model = Sequential()\n", "model.add(Dense(1,input_dim=784, activation='sigmoid', W_regularizer=l2()))\n", "# model.add(Dense(1,input_dim=784, activation='sigmoid'))\n", "\n", "model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\n", "\n", "data = np.random.random((1000,784))\n", "labels = np.random.randint(2,size=(1000,1))\n", "\n", "model.fit(data,labels,nb_epoch=10,batch_size=2)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "(1000, 784)" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data.shape" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "labels.shape" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "array([[0],\n", " [0],\n", " [1],\n", " [1],\n", " [0]])" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "labels[:5]" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "1/1 [==============================] - 0s\n", "1/1 [==============================] - 0s\n" ] }, { "data": { "text/plain": [ "(array([[ 0.13410421]], dtype=float32), array([[0]], dtype=int32))" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "test = np.random.random(784).reshape(1,-1)\n", "proba = model.predict_proba(test)\n", "classes = model.predict_classes(test)\n", "proba,classes" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## merge layers" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.12" } }, "nbformat": 4, "nbformat_minor": 0 }