diff --git a/.gitignore b/.gitignore index 151d39a51961..e4bcd0e7490e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,11 @@ *.DS_Store *.pyc +*.swp temp/* dist/* build/* keras/datasets/data/* keras/datasets/temp/* docs/site/* -docs/theme/* \ No newline at end of file +docs/theme/* +tags diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000000..45a9113055df --- /dev/null +++ b/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: python +# Setup anaconda +before_install: + - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh + - chmod +x miniconda.sh + - ./miniconda.sh -b + - export PATH=/home/travis/miniconda/bin:$PATH + - conda update --yes conda +python: + - "3.4" +# command to install dependencies +install: + - conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib pandas pytest h5py + # Coverage packages are on my binstar channel + - python setup.py install +# command to run tests +script: py.test tests/ diff --git a/README.md b/README.md index b3a21484bc06..a6a511766fe9 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,9 @@ Keras is a minimalist, highly modular neural network library in the spirit of To Use Keras if you need a deep learning library that: - allows for easy and fast prototyping (through total modularity, minimalism, and extensibility). -- supports both convolutional networks (for vision) and recurrent networks (for sequence data). As well as combinations of the two. -- runs seamlessly on the CPU and the GPU. +- supports both convolutional networks and recurrent networks, as well as combinations of the two. +- supports arbitrary connectivity schemes (including multi-input and multi-output training). +- runs seamlessly on CPU and GPU. Read the documentation at [Keras.io](http://keras.io). @@ -15,13 +16,13 @@ Keras is compatible with __Python 2.7-3.4__. ## Guiding principles -- __Modularity.__ A model is understood as a sequence of standalone, fully-configurable modules that can be plugged together with as little restrictions as possible. In particular, neural layers, cost functions, optimizers, initialization schemes, activation functions and dropout are all standalone modules that you can combine to create new models. +- __Modularity.__ A model is understood as a sequence or a graph of standalone, fully-configurable modules that can be plugged together with as little restrictions as possible. In particular, neural layers, cost functions, optimizers, initialization schemes, activation functions, regularization schemes are all standalone modules that you can combine to create new models. -- __Minimalism.__ Each module should be kept short and simple (<100 lines of code). Every piece of code should be transparent upon first reading. No black magic: it hurts iteration speed and ability to innovate. +- __Minimalism.__ Each module should be kept short and simple (<100 lines of code). Every piece of code should be transparent upon first reading. No black magic: it hurts iteration speed and ability to innovate. -- __Easy extensibility.__ New features (a new module, per the above definition, or a new way to combine modules together) are dead simple to add (as new classes/functions), and existing modules provide ample examples. +- __Easy extensibility.__ New modules are dead simple to add (as new classes/functions), and existing modules provide ample examples. To be able to easily create new modules allows for total expressiveness, making Keras suitable for advanced research. -- __Work with Python__. No separate models configuration files in a declarative format (like in Caffe or PyLearn2). Models are described in Python code, which is compact, easier to debug, benefits from syntax highlighting, and most of all, allows for ease of extensibility. See for yourself with the examples below. +- __Work with Python__. No separate models configuration files in a declarative format (like in Caffe or PyLearn2). Models are described in Python code, which is compact, easier to debug, and allows for ease of extensibility. ## Examples @@ -171,8 +172,11 @@ model.fit(images, captions, batch_size=16, nb_epoch=100) In the examples folder, you will find example models for real datasets: - CIFAR10 small images classification: Convnet with realtime data augmentation - IMDB movie review sentiment classification: LSTM over sequences of words -- Reuters newswires topic classification: Multilayer Perceptron -- MNIST handwritten digits classification: Multilayer Perceptron +- Reuters newswires topic classification: Multilayer Perceptron (MLP) +- MNIST handwritten digits classification: MLP & CNN +- Character-level text generation with LSTM + +...and more. ## Current capabilities @@ -186,12 +190,10 @@ A few highlights: convnets, LSTM, GRU, word2vec-style embeddings, PReLU, batch n Keras uses the following dependencies: - numpy, scipy - +- pyyaml - Theano - See installation instructions: http://deeplearning.net/software/theano/install.html#install - - HDF5 and h5py (optional, required if you use model saving/loading functions) - - Optional but recommended if you use CNNs: cuDNN. Once you have the dependencies installed, cd to the Keras folder and run the install command: @@ -199,11 +201,16 @@ Once you have the dependencies installed, cd to the Keras folder and run the ins sudo python setup.py install ``` +You can also install Keras from PyPI: +``` +sudo pip install keras +``` + ## Why this name, Keras? Keras (κέρας) means _horn_ in Greek. It is a reference to a literary image from ancient Greek and Latin literature, first found in the _Odyssey_, where dream spirits (_Oneiroi_, singular _Oneiros_) are divided between those who deceive men with false visions, who arrive to Earth through a gate of ivory, and those who announce a future that will come to pass, who arrive through a gate of horn. It's a play on the words κέρας (horn) / κραίνω (fulfill), and ἐλέφας (ivory) / ἐλεφαίρομαι (deceive). Keras was developed as part of the research effort of project ONEIROS (Open-ended Neuro-Electronic Intelligent Robot Operating System). -_"Oneiroi are beyond our unravelling --who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; those that come out through polished horn have truth behind them, to be accomplished for men who see them."_ Homer, Odyssey 19. 562 ff (Shewring translation). +>_"Oneiroi are beyond our unravelling --who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; those that come out through polished horn have truth behind them, to be accomplished for men who see them."_ Homer, Odyssey 19. 562 ff (Shewring translation). diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index f8c2d0c7c754..0bdb1d35f5a6 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -2,11 +2,9 @@ site_name: Keras Documentation theme: readthedocs docs_dir: sources repo_url: http://github.com/fchollet/keras -site_url: / +site_url: http://keras.io/ #theme_dir: theme site_description: Documentation for fast and lightweight Keras Deep Learning library. -include_404: true -include_search: true dev_addr: '0.0.0.0:8000' google_analytics: ['UA-61785484-1', 'keras.io'] @@ -32,11 +30,10 @@ pages: - Advanced Activations Layers: layers/advanced_activations.md - Normalization Layers: layers/normalization.md - Embedding Layers: layers/embeddings.md + - Noise layers: layers/noise.md - Containers: layers/containers.md - Preprocessing: - Sequence Preprocessing: preprocessing/sequence.md - Text Preprocessing: preprocessing/text.md - Image Preprocessing: preprocessing/image.md -- Utils: - - Visualization Utilities: utils/visualization.md diff --git a/docs/sources/callbacks.md b/docs/sources/callbacks.md index a486104ee5d6..c2ec6b395432 100644 --- a/docs/sources/callbacks.md +++ b/docs/sources/callbacks.md @@ -1,6 +1,6 @@ ## Usage of callbacks -A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training. You can pass a list of callback (as the keyword argument `callbacks`) to the `.fit()` method of the `Sequential` model. The relevant methods of the callbacks will then be called at each stage of the training. +A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training. You can pass a list of callbacks (as the keyword argument `callbacks`) to the `.fit()` method of the `Sequential` model. The relevant methods of the callbacks will then be called at each stage of the training. --- @@ -37,10 +37,10 @@ Save the model after every epoch. If `save_best_only=True`, the latest best mode ```python -keras.callbacks.EarlyStopping(patience=0, verbose=0) +keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0) ``` -Stop training after no improvement of the validation loss is seen for `patience` epochs. +Stop training after no improvement of the metric `monitor` is seen for `patience` epochs. --- @@ -52,7 +52,7 @@ You can create a custom callback by extending the base class `keras.callbacks.Ca Here's a simple example saving a list of losses over each batch during training: ```python class LossHistory(keras.callbacks.Callback): - def on_train_begin(self): + def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): @@ -61,7 +61,7 @@ class LossHistory(keras.callbacks.Callback): --- -### Example to record the loss history +### Example: recording loss history ```python class LossHistory(keras.callbacks.Callback): @@ -88,7 +88,7 @@ print history.losses --- -### Example to checkpoint models +### Example: model checkpoints ```python from keras.callbacks import ModelCheckpoint diff --git a/docs/sources/constraints.md b/docs/sources/constraints.md index 6fb046b6ce4e..e736637d0383 100644 --- a/docs/sources/constraints.md +++ b/docs/sources/constraints.md @@ -2,9 +2,12 @@ Functions from the `constraints` module allow setting constraints (eg. non-negativity) on network parameters during optimization. -The keyword arguments used for passing constraints to parameters in a layer will depend on the layer. +The penalties are applied on a per-layer basis. The exact API will depend on the layer, but the layers `Dense`, `TimeDistributedDense`, `MaxoutDense`, `Convolution1D` and `Convolution2D` have a unified API. -In the `Dense` layer it is simply `W_constraint` for the main weights matrix, and `b_constraint` for the bias. +These layers expose 2 keyword arguments: + +- `W_constraint` for the main weights matrix +- `b_constraint` for the bias. ```python diff --git a/docs/sources/documentation.md b/docs/sources/documentation.md index b03c8378ea72..71cea6bfc466 100644 --- a/docs/sources/documentation.md +++ b/docs/sources/documentation.md @@ -15,6 +15,9 @@ - [Models](models.md) - [Activations](activations.md) - [Initializations](initializations.md) +- [Regularizers](regularizers.md) +- [Constraints](constraints.md) +- [Callbacks](callbacks.md) - [Datasets](datasets.md) --- diff --git a/docs/sources/examples.md b/docs/sources/examples.md index 194654774888..05c66a52b8e7 100644 --- a/docs/sources/examples.md +++ b/docs/sources/examples.md @@ -35,7 +35,7 @@ model.add(Dense(20, 64, init='uniform', activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(64, 64, init='uniform', activation='tanh')) model.add(Dropout(0.5)) -model.add(Dense(64, 2, init='uniform', activation='softmax') +model.add(Dense(64, 2, init='uniform', activation='softmax')) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='mean_squared_error', optimizer=sgd) @@ -92,6 +92,7 @@ from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM model = Sequential() +# Add a mask_zero=True to the Embedding connstructor if 0 is a left-padding value in your data model.add(Embedding(max_features, 256)) model.add(LSTM(256, 128, activation='sigmoid', inner_activation='hard_sigmoid')) model.add(Dropout(0.5)) @@ -106,8 +107,9 @@ score = model.evaluate(X_test, Y_test, batch_size=16) --- -### Architecture for learning image captions with a convnet and a Gated Recurrent Unit -(word-level embedding, caption of maximum length 16 words). +### Image captioning + +Architecture for learning image captions with a convnet and a Gated Recurrent Unit (word-level embedding, caption of maximum length 16 words). Note that getting this to actually "work" will require using a bigger convnet, initialized with pre-trained weights. Displaying readable results will also require an embedding decoder. diff --git a/docs/sources/index.md b/docs/sources/index.md index a6b594c4c79e..576521d5f1a2 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -2,23 +2,24 @@ ## Overview -Keras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses [Theano](http://deeplearning.net/software/theano/) under the hood for fast tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation. +Keras is a minimalist, highly modular neural network library in the spirit of Torch, written in Python, that uses [Theano](http://deeplearning.net/software/theano/) under the hood for optimized tensor manipulation on GPU and CPU. It was developed with a focus on enabling fast experimentation. Use Keras if you need a deep learning library that: - allows for easy and fast prototyping (through total modularity, minimalism, and extensibility). -- supports both __convolutional networks__ and __recurrent networks__ (LSTM, GRU, etc). As well as combinations of the two. -- runs seamlessly on the CPU and the GPU. +- supports both convolutional networks and recurrent networks, as well as combinations of the two. +- supports arbitrary connectivity schemes (including multi-input and multi-output training). +- runs seamlessly on CPU and GPU. ## Guiding principles -- __Modularity.__ A model is understood as a sequence of standalone, fully-configurable modules that can be plugged together with as little restrictions as possible. In particular, neural layers, cost functions, optimizers, initialization schemes, activation functions and dropout are all standalone modules that you can combine to create new models. +- __Modularity.__ A model is understood as a sequence or a graph of standalone, fully-configurable modules that can be plugged together with as little restrictions as possible. In particular, neural layers, cost functions, optimizers, initialization schemes, activation functions, regularization schemes are all standalone modules that you can combine to create new models. -- __Minimalism.__ Each module should be kept short and simple (<100 lines of code). Every piece of code should be transparent upon first reading. No black magic: it hurts iteration speed and ability to innovate. +- __Minimalism.__ Each module should be kept short and simple (<100 lines of code). Every piece of code should be transparent upon first reading. No black magic: it hurts iteration speed and ability to innovate. -- __Easy extensibility.__ A new feature (a new module, per the above definition, or a new way to combine modules together) are dead simple to add (as new classes/functions), and existing modules provide ample examples. +- __Easy extensibility.__ New modules are dead simple to add (as new classes/functions), and existing modules provide ample examples. To be able to easily create new modules allows for total expressiveness, making Keras suitable for advanced research. -- __Work with Python__. No separate models configuration files in a declarative format (like in Caffe or PyLearn2). Models are described in Python code, which is compact, easier to debug, benefits from syntax highlighting, and most of all, allows for ease of extensibility. +- __Work with Python__. No separate models configuration files in a declarative format (like in Caffe or PyLearn2). Models are described in Python code, which is compact, easier to debug, and allows for ease of extensibility. ## Code @@ -30,7 +31,9 @@ Keras is licensed under the [MIT license](http://opensource.org/licenses/MIT). ## Getting started: 30 seconds to Keras -The core datastructure of Keras is a __model__, a way to organize layers. Here's a sequential model (a linear pile of layers). +The core datastructure of Keras is a __model__, a way to organize layers. There are two types of models: [`Sequential`](/models/#sequential) and [`Graph`](/models/#graph). + +Here's the `Sequential` model (a linear pile of layers): ```python from keras.models import Sequential @@ -43,9 +46,9 @@ Stacking layers is as easy as `.add()`: ```python from keras.layers.core import Dense, Activation -model.add(Dense(input_dim=100, output_dim=64, init="uniform")) +model.add(Dense(input_dim=100, output_dim=64, init="glorot_uniform")) model.add(Activation("relu")) -model.add(Dense(input_dim=64, output_dim=10, init="uniform")) +model.add(Dense(input_dim=64, output_dim=10, init="glorot_uniform")) model.add(Activation("softmax")) ``` @@ -67,7 +70,7 @@ model.fit(X_train, Y_train, nb_epoch=5, batch_size=32) Alternatively, you can feed batches to your model manually: ```python -model.train(X_batch, Y_batch) +model.train_on_batch(X_batch, Y_batch) ``` Evaluate your performance in one line: @@ -81,7 +84,7 @@ classes = model.predict_classes(X_test, batch_size=32) proba = model.predict_proba(X_test, batch_size=32) ``` -Building a network of LSTMs, a deep CNN, a word2vec embedder or any other model is just as fast. The ideas behind deep learning are simple, so why should their implementation be painful? +Building a network of LSTMs, a deep CNN, a Neural Turing Machine, a word2vec embedder or any other model is just as fast. The ideas behind deep learning are simple, so why should their implementation be painful? Have a look at the [examples](examples.md). @@ -89,11 +92,12 @@ Have a look at the [examples](examples.md). Keras uses the following dependencies: -- numpy, scipy -- Theano +- __numpy__, __scipy__ +- __pyyaml__ +- __Theano__ - See [installation instructions](http://deeplearning.net/software/theano/install.html#install). -- HDF5 and h5py (optional, required if you use model saving/loading functions) -- Optional but recommended if you use CNNs: cuDNN. +- __HDF5__ and __h5py__ (optional, required if you use model saving/loading functions) +- Optional but recommended if you use CNNs: __cuDNN__. Once you have the dependencies installed, clone the repo: ```bash @@ -104,6 +108,10 @@ Go to the Keras folder and run the install command: cd keras sudo python setup.py install ``` +You can also install Keras from PyPI: +``` +sudo pip install keras +``` ## Support @@ -116,7 +124,8 @@ Keras welcomes all contributions from the community. - Keep a pragmatic mindset and avoid bloat. Only add to the source if that is the only path forward. - New features should be documented. Make sure you update the documentation along with your Pull Request. - The documentation for every new feature should include a usage example in the form of a code snippet. -- All changes should be tested. A formal test process will be introduced very soon. +- All changes should be tested. Make sure any new feature you add has a corresponding unit test. +- Please no Pull Requests about coding style. - Even if you don't contribute to the Keras source code, if you have an application of Keras that is concise and powerful, please consider adding it to our collection of [examples](https://github.com/fchollet/keras/tree/master/examples). @@ -124,7 +133,7 @@ Keras welcomes all contributions from the community. Keras (κέρας) means _horn_ in Greek. It is a reference to a literary image from ancient Greek and Latin literature, first found in the _Odyssey_, where dream spirits (_Oneiroi_, singular _Oneiros_) are divided between those who deceive men with false visions, who arrive to Earth through a gate of ivory, and those who announce a future that will come to pass, who arrive through a gate of horn. It's a play on the words κέρας (horn) / κραίνω (fulfill), and ἐλέφας (ivory) / ἐλεφαίρομαι (deceive). -Keras was developed as part of the research effort of project ONEIROS (Open-ended Neuro-Electronic Intelligent Robot Operating System). +Keras was developed as part of the research effort of project __ONEIROS__ (*Open-ended Neuro-Electronic Intelligent Robot Operating System*). > _"Oneiroi are beyond our unravelling --who can be sure what tale they tell? Not all that men look for comes to pass. Two gates there are that give passage to fleeting Oneiroi; one is made of horn, one of ivory. The Oneiroi that pass through sawn ivory are deceitful, bearing a message that will not be fulfilled; those that come out through polished horn have truth behind them, to be accomplished for men who see them."_ diff --git a/docs/sources/initializations.md b/docs/sources/initializations.md index 3a0835933fc4..417c4d9ce531 100644 --- a/docs/sources/initializations.md +++ b/docs/sources/initializations.md @@ -1,4 +1,3 @@ -# Initializations ## Usage of initializations diff --git a/docs/sources/layers/advanced_activations.md b/docs/sources/layers/advanced_activations.md index 8309f8af581c..c9addff953fb 100644 --- a/docs/sources/layers/advanced_activations.md +++ b/docs/sources/layers/advanced_activations.md @@ -32,4 +32,24 @@ Parametrized linear unit. Similar to a LeakyReLU, where each input unit has its - __input_shape__: tuple. - __References__: - - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf) \ No newline at end of file + - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf) + +--- + +## ParametricSoftplus + +```python +keras.layers.advanced_activations.ParametricSoftplus(input_shape) +``` + +Parametric Softplus of the form: (`f(x) = alpha * (1 + exp(beta * x))`). This is essentially a smooth version of ReLU where the parameters control the sharpness of the rectification. The parameters are initialized to more closely approximate a ReLU than the standard `softplus`: `alpha` initialized to `0.2` and `beta` initialized to `5.0`. The parameters are fit separately for each hidden unit. + +- __Input shape__: Same as `input_shape`. This layer cannot be used as first layer in a model. + +- __Output shape__: Same as input. + +- __Arguments__: + - __input_shape__: tuple. + +- __References__: + - [Inferring Nonlinear Neuronal Computation Based on Physiologically Plausible Inputs](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003143) \ No newline at end of file diff --git a/docs/sources/layers/containers.md b/docs/sources/layers/containers.md index 14168b199a86..3a3e9df215e8 100644 --- a/docs/sources/layers/containers.md +++ b/docs/sources/layers/containers.md @@ -1,5 +1,3 @@ -# Containers - Containers are ensembles of layers that can be interacted with through the same API as `Layer` objects. ## Sequential diff --git a/docs/sources/layers/convolutional.md b/docs/sources/layers/convolutional.md index 79045d11e784..b7fe4c7e1d7c 100644 --- a/docs/sources/layers/convolutional.md +++ b/docs/sources/layers/convolutional.md @@ -2,15 +2,35 @@ ## Convolution1D ```python -keras.layers.convolutional.Convolution1D(nb_filter, stack_size, filter_length, +keras.layers.convolutional.Convolution1D(input_dim, nb_filter, filter_length, init='uniform', activation='linear', weights=None, - image_shape=None, border_mode='valid', subsample_length=1, + border_mode='valid', subsample_length=1, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None) ``` Convolution operator for filtering neighborhoods of one-dimensional inputs. + +- __Input shape__: 3D tensor with shape: `(nb_samples, steps, input_dim)`. + +- __Output shape__: 3D tensor with shape: `(nb_samples, steps, nb_filter)`. `steps` value might have changed due to padding. + +- __Arguments__: + - __input_dim__: Number of channels/dimensions in the input. + - __nb_filter__: Number of convolution kernels to use (dimensionality of the output). + - __filter_length__: The extension (spatial or temporal) of each filter. + - __init__: name of initialization function for the weights of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. + - __activation__: name of activation function to use (see: [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). + - __weights__: list of numpy arrays to set as initial weights. + - __border_mode__: 'valid' or 'full'. see scipy.signal.convolve2d. + - __subsample_length__: factor by which to subsample output. + - __W_regularizer__: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. + - __b_regularizer__: instance of [WeightRegularizer](../regularizers.md), applied to the bias. + - __activity_regularizer__: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. + - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. + - __b_constraint__: instance of the [constraints](../constraints.md) module, applied to the bias. + --- ## Convolution2D @@ -18,18 +38,53 @@ Convolution operator for filtering neighborhoods of one-dimensional inputs. ```python keras.layers.convolutional.Convolution2D(nb_filter, stack_size, nb_row, nb_col, init='glorot_uniform', activation='linear', weights=None, - image_shape=None, border_mode='valid', subsample=(1,1)) + border_mode='valid', subsample=(1, 1), + W_regularizer=None, b_regularizer=None, W_constraint=None) ``` -Convolution operator for filtering windows of two-dimensional inputs. This is a wrapper for Theano's [conv2d](http://deeplearning.net/software/theano/library/tensor/nnet/conv.html#theano.tensor.nnet.conv.conv2d). +Convolution operator for filtering windows of two-dimensional inputs. + +- __Input shape__: 4D tensor with shape: `(nb_samples, stack_size, nb_row, nb_col)`. + +- __Output shape__: 4D tensor with shape: `(nb_samples, nb_filter, nb_row, nb_col)`. `nb_row`, `nb_col` might have changed due to padding. + + +- __Arguments__: + + - __nb_filter__: Number of convolution kernels to use. + - __stack_size__: Number of channels in the input. + - __nb_row__: Number of rows in the convolution kernels + - __nb_col__: Number of columns in the convolution kernels + - __init__: name of initialization function for the weights of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. + - __activation__: name of activation function to use (see: [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). + - __weights__: list of numpy arrays to set as initial weights. + - __border_mode__: 'valid', 'full', or 'same'. See scipy.signal.convolve2d. + - __subsample__: tuple of length 2. Factor by which to subsample output. Also called strides elsewhere. + - __W_regularizer__: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. + - __b_regularizer__: instance of [WeightRegularizer](../regularizers.md), applied to the bias. + - __activity_regularizer__: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. + - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. + - __b_constraint__: instance of the [constraints](../constraints.md) module, applied to the bias. + --- ## MaxPooling1D ```python -keras.layers.convolutional.MaxPooling1D(pool_length=2, ignore_border=True) +keras.layers.convolutional.MaxPooling1D(pool_length=2, stride=None, ignore_border=True) ``` + +- __Input shape__: 3D tensor with shape: `(nb_samples, steps, dim)`. + +- __Output shape__: 3D tensor with shape: `(nb_samples, downsampled_steps, dim)`. + +- __Arguments__: + + - __pool_length__: factor by which to downscale. 2 will halve the input. + - __stride__: integer or None. Stride value. + - __ignore_border__: boolean. + --- ## MaxPooling2D @@ -38,4 +93,12 @@ keras.layers.convolutional.MaxPooling1D(pool_length=2, ignore_border=True) keras.layers.convolutional.MaxPooling2D(poolsize=(2, 2), ignore_border=True) ``` -This is a wrapper for Theano's [max_pool_2d](http://deeplearning.net/software/theano/library/tensor/signal/downsample.html). +- __Input shape__: 4D tensor with shape: `(nb_samples, stack_size, nb_row, nb_col)`. + +- __Output shape__: 4D tensor with shape: `(nb_samples, stack_size, new_nb_row, new_nb_col)`. + +- __Arguments__: + + - __pool_size__: factor by which to downscale (vertical ds, horizontal ds). (2, 2) will halve the image in each dimension. + - __ignore_border__: boolean. When True, (5, 5) input with pool_size=(2, 2) will generate a (2, 2) output, (3, 3) otherwise. + diff --git a/docs/sources/layers/core.md b/docs/sources/layers/core.md index 6a892913bac5..af6dd28a2d53 100644 --- a/docs/sources/layers/core.md +++ b/docs/sources/layers/core.md @@ -7,7 +7,7 @@ keras.layers.core.Layer() __Methods__: ```python -connect(previous_layer) +set_previous(previous_layer) ``` Connect the input of the current layer to the output of the argument layer. @@ -20,7 +20,7 @@ Connect the input of the current layer to the output of the argument layer. ```python -output(train) +get_output(train) ``` Get the output of the layer. @@ -65,13 +65,19 @@ Set the weights of the parameters of the layer. - __weights__: List of numpy arrays (one per layer parameter). Should be in the same order as what `get_weights(self)` returns. +```python +get_config() +``` + +- __Return__: Configuration dictionary describing the layer. + --- ## Dense ```python keras.layers.core.Dense(input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None \ -W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None) +W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None) ``` Standard 1D fully-connect layer. @@ -87,8 +93,9 @@ Standard 1D fully-connect layer. - __init__: name of initialization function for the weights of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. - __activation__: name of activation function to use (see: [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - - __W_regularizer__: instance of the [regularizers](../regularizers.md) module (eg. L1 or L2 regularization), applied to the main weights matrix. - - __b_regularizer__: instance of the [regularizers](../regularizers.md) module, applied to the bias. + - __W_regularizer__: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. + - __b_regularizer__: instance of [WeightRegularizer](../regularizers.md), applied to the bias. + - __activity_regularizer__: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. - __b_constraint__: instance of the [constraints](../constraints.md) module, applied to the bias. @@ -97,7 +104,7 @@ Standard 1D fully-connect layer. ## TimeDistributedDense ```python keras.layers.core.TimeDistributedDense(input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None \ -W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None) +W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None) ``` Fully-connected layer distributed over the time dimension. Useful after a recurrent network set to `return_sequences=True`. @@ -110,8 +117,9 @@ Fully-connected layer distributed over the time dimension. Useful after a recurr - __init__: name of initialization function for the weights of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. - __activation__: name of activation function to use (see: [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - - __W_regularizer__: instance of the [regularizers](../regularizers.md) module (eg. L1 or L2 regularization), applied to the main weights matrix. - - __b_regularizer__: instance of the [regularizers](../regularizers.md) module, applied to the bias. + - __W_regularizer__: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. + - __b_regularizer__: instance of [WeightRegularizer](../regularizers.md), applied to the bias. + - __activity_regularizer__: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. - __b_constraint__: instance of the [constraints](../constraints.md) module, applied to the bias. @@ -127,7 +135,7 @@ model.add(TimeDistributedDense(5, 10)) # output shape: (nb_samples, nb_timesteps ## AutoEncoder ```python -keras.layers.core.AutoEncoder(encoder, decoder, output_reconstruction=True, tie_weights=False, weights=None): +keras.layers.core.AutoEncoder(encoder, decoder, output_reconstruction=True, weights=None): ``` A customizable autoencoder model. If `output_reconstruction = True` then dim(input) = dim(output) else dim(output) = dim(hidden) @@ -145,8 +153,6 @@ A customizable autoencoder model. If `output_reconstruction = True` then dim(inp - __output_reconstruction__: If this is False the when .predict() is called the output is the deepest hidden layer's activation. Otherwise the output of the final decoder layer is presented. Be sure your validation data confirms to this logic if you decide to use any. - - __tie_weights__: If True then the encoder bias is tied to the decoder bias. **Note**: This required the encoder layer corresponding to this decoder layer to be of the same time, eg: Dense:Dense - - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - __Example__: @@ -156,45 +162,9 @@ from keras.layers import containers # input shape: (nb_samples, 32) encoder = containers.Sequential([Dense(32, 16), Dense(16, 8)]) decoder = containers.Sequential([Dense(8, 16), Dense(16, 32)]) -autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False, tie_weights=True)) -``` - ---- -## DenoisingAutoEncoder -```python -keras.layers.core.AutoEncoder(encoder, decoder, output_reconstruction=True, tie_weights=False, weights=None, corruption_level=0.3): -``` - -A denoising autoencoder model that inherits the base features from autoencoder. -Since this layer uses similar logic to Dropout it cannot be the first layer in a pipeline. - -- __Input shape__: The layer shape is defined by the encoder definitions - -- __Output shape__: The layer shape is defined by the decoder definitions - -- __Arguments__: - - - __encoder__: A [layer](./) or [layer container](./containers.md). - - - __decoder__: A [layer](./) or [layer container](./containers.md). - - - __output_reconstruction__: If this is False the when .predict() is called the output is the deepest hidden layer's activation. Otherwise the output of the final decoder layer is presented. Be sure your validation data confirms to this logic if you decide to use any. - - - __tie_weights__: If True then the encoder bias is tied to the decoder bias. **Note**: This required the encoder layer corresponding to this decoder layer to be of the same time, eg: Dense:Dense - - - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - - - __corruption_level__: the amount of binomial noise added to the input layer of the model. - -- __Example__: -```python -# input shape: (nb_samples, 32) -autoencoder.add(Dense(32, 32)) -autoencoder.add(DenoisingAutoEncoder(encoder=Dense(32, 16), - decoder=Dense(16, 32), - output_reconstruction=False, tie_weights=True, - corruption_level=0.3)) +autoencoder = Sequential() +autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False)) ``` @@ -231,9 +201,9 @@ Apply dropout to the input. Dropout consists in randomly setting a fraction `p` - __p__: float (0 <= p < 1). Fraction of the input that gets dropped out at training time. - --- + ## Reshape ```python keras.layers.core.Reshape(*dims) @@ -276,7 +246,9 @@ Convert a nD input to 1D. keras.layers.core.RepeatVector(n) ``` -Repeat the 1D input n times. Dimensions of input are assumed to be (nb_samples, dim). Output will have the shape (nb_samples, n, dim). +Repeat the 1D input n times. Dimensions of input are assumed to be `(nb_samples, dim)`. Output will have the shape `(nb_samples, n, dim)`. + +Note that the output is still a single tensor; `RepeatVector` does not split the data flow. - __Input shape__: This layer does not assume a specific input shape. This layer cannot be used as the first layer in a model. @@ -285,14 +257,45 @@ Repeat the 1D input n times. Dimensions of input are assumed to be (nb_samples, - __Arguments__: - __n__: int. +--- + +## Permute +```python +keras.layers.core.Permute(dims) +``` +Permute the dimensions of the input data according to the given tuple. Sometimes useful for connecting RNNs and convnets together. + +- __Input shape: This layer does not assume a specific input shape. + +- __Output shape: Same as the input shape, but with the dimensions re-ordered according to the ordering specified by the tuple. + +- __Argument: tuple specifying the permutation scheme (e.g. `(2, 1)` permutes the first and second dimension of the input). + - __Example__: +```python +# input shape: (nb_samples, 10) +model.add(Dense(10, 50)) # output shape: (nb_samples, 50) +model.add(Reshape(10, 5)) # output shape: (nb_samples, 10, 5) +model.add(Permute((2, 1))) #output shape: (nb_samples, 5, 10) +``` + +--- + +## ActivityRegularization +```python +keras.layers.core.ActivityRegularization(l1=0., l2=0.) +``` + +Leaves the input unchanged, but adds a term to the loss function based on the input activity. L1 and L2 regularization supported. + +This layer can be used, for instance, to induce activation sparsity in the previous layer. --- ## MaxoutDense ```python keras.layers.core.MaxoutDense(input_dim, output_dim, nb_feature=4, init='glorot_uniform', weights=None, \ - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None) + W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None) ``` A dense maxout layer. A `MaxoutDense` layer takes the element-wise maximum of `nb_feature` `Dense(input_dim, output_dim)` linear layers. This allows the layer to learn a convex, piecewise linear activation function over the inputs. See [this paper](http://arxiv.org/pdf/1302.4389.pdf) for more details. Note that this is a *linear* layer -- if you wish to apply activation function (you shouldn't need to -- they are universal function approximators), an `Activation` layer must be added after. @@ -308,8 +311,9 @@ A dense maxout layer. A `MaxoutDense` layer takes the element-wise maximum of `n - __nb_feature__: int >= 0. the number of features to create for the maxout. This is equivalent to the number of piecewise elements to be allowed for the activation function. - __init__: name of initialization function for the weights of the layer (see: [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - - __W_regularizer__: instance of the [regularizers](../regularizers.md) module (eg. L1 or L2 regularization), applied to the main weights matrix. - - __b_regularizer__: instance of the [regularizers](../regularizers.md) module, applied to the bias. + - __W_regularizer__: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. + - __b_regularizer__: instance of [WeightRegularizer](../regularizers.md), applied to the bias. + - __activity_regularizer__: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. - __b_constraint__: instance of the [constraints](../constraints.md) module, applied to the bias. @@ -325,11 +329,11 @@ model.add(RepeatVector(2)) # output shape: (nb_samples, 2, 10) keras.layers.core.Merge(models, mode='sum') ``` -Merge the output of a list of models into a single tensor, following one of two modes: `sum` or `concat`. +Merge the output of a list of layers (or containers) into a single tensor, following one of two modes: `sum` or `concat`. - __Arguments__: - - __models__: List of `Sequential` models. - - __mode__: String, one of `{'sum', 'concat'}`. `sum` will simply sum the outputs of the models (therefore all models should have an output with the same shape). `concat` will concatenate the outputs along the last dimension (therefore all models should have an output that only differ along the last dimension). + - __layers__: List of layers or [containers](/layers/containers/). + - __mode__: String, one of `{'sum', 'concat'}`. `sum` will simply sum the outputs of the layers (therefore all layers should have an output with the same shape). `concat` will concatenate the outputs along the last dimension (therefore all layers should have an output that only differ along the last dimension). - __Example__: @@ -353,3 +357,14 @@ model.compile(loss='categorical_crossentropy', optimizer='rmsprop') model.fit([X_train, X_train], Y_train, batch_size=128, nb_epoch=20, validation_data=([X_test, X_test], Y_test)) ``` +## Masking +```python +keras.layers.core.Masking(mask_value=0.) +``` + +Create a mask for the input data by using `mask_value` as the sentinel value which should be masked out. +Given an input of dimensions `(nb_samples, timesteps, input_dim)`, return the input untouched as output, and supply a mask of shape `(nb_samples, timesteps)` where all timesteps which had *all* their values equal to `mask_value` are masked out. + +- __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, features)`. + +- __Output shape__: 3D tensor with shape: `(nb_samples, timesteps, features)`. diff --git a/docs/sources/layers/embeddings.md b/docs/sources/layers/embeddings.md index 33784a898c9e..6659e89ea912 100644 --- a/docs/sources/layers/embeddings.md +++ b/docs/sources/layers/embeddings.md @@ -2,7 +2,7 @@ ## Embedding ```python -keras.layers.embeddings.Embedding(input_dim, output_dim, init='uniform', weights=None, W_regularizer=None, W_constraint=None) +keras.layers.embeddings.Embedding(input_dim, output_dim, init='uniform', weights=None, W_regularizer=None, W_constraint=None, mask_zero=False) ``` Turn positive integers (indexes) into denses vectors of fixed size, @@ -20,6 +20,7 @@ eg. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]` - __weights__: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. - __W_regularizer__: instance of the [regularizers](../regularizers.md) module (eg. L1 or L2 regularization), applied to the embedding matrix. - __W_constraint__: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the embedding matrix. + - __mask_zero__: Whether or not the input value 0 is a special "padding" value that should be masked out. This is useful for [recurrent layers](recurrent.md) which may take variable length input. If this is `True` then all subsequent layers in the model need to support masking or an exception will be raised. ## WordContextProduct diff --git a/docs/sources/layers/noise.md b/docs/sources/layers/noise.md new file mode 100644 index 000000000000..1f1d9853a254 --- /dev/null +++ b/docs/sources/layers/noise.md @@ -0,0 +1,38 @@ + + +## GaussianNoise +```python +keras.layers.noise.GaussianNoise(sigma) +``` +Apply to the input an additive zero-centred gaussian noise with standard deviation `sigma`. This is useful to mitigate overfitting (you could see it as a kind of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. + +The Gaussian noise is only added at training time. + +- __Input shape__: This layer does not assume a specific input shape. + +- __Output shape__: Same as input. + +- __Arguments__: + + - __sigma__: float, standard deviation of the noise distribution. + +--- + +## GaussianDropout +```python +keras.layers.noise.GaussianDropout(p) +``` +Apply to the input an multiplicative one-centred gaussian noise with standard deviation `sqrt(p/(1-p))`. p refers to drop probability to match Dropout layer syntax. + +http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf + +The Gaussian noise is only used at training time. + +- __Input shape__: This layer does not assume a specific input shape. + +- __Output shape__: Same as input. + +- __Arguments__: + + - __p__: float, drop probability as with Dropout. + diff --git a/docs/sources/layers/recurrent.md b/docs/sources/layers/recurrent.md index 06719e5fe8d7..b58c31e845ba 100644 --- a/docs/sources/layers/recurrent.md +++ b/docs/sources/layers/recurrent.md @@ -6,14 +6,17 @@ keras.layers.recurrent.SimpleRNN(input_dim, output_dim, init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None, truncate_gradient=-1, return_sequences=False) ``` -Fully connected RNN where output is to fed back to input. Not a particularly useful model, included for demonstration purposes. +Fully connected RNN where output is to fed back to input. - __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, input_dim)`. - __Output shape__: - - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, ouput_dim)`. + - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, output_dim)`. - else: 2D tensor with shape: `(nb_samples, output_dim)`. +- __Masking__: This layer supports masking for input data with a variable number of timesteps To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to `True`. + + - __Arguments__: - __input_dim__: dimension of the input. - __output_dim__: dimension of the internal projections and the final output. @@ -44,9 +47,12 @@ Not a particularly useful model, included for demonstration purposes. - __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, input_dim)`. - __Output shape__: - - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, ouput_dim)`. + - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, output_dim)`. - else: 2D tensor with shape: `(nb_samples, output_dim)`. +- __Masking__: This layer supports masking for input data with a variable number of timesteps To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to `True`. + + - __Arguments__: - __input_dim__: dimension of the input. - __output_dim__: dimension of the internal projections and the final output. @@ -76,9 +82,11 @@ Gated Recurrent Unit - Cho et al. 2014. - __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, input_dim)`. - __Output shape__: - - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, ouput_dim)`. + - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, output_dim)`. - else: 2D tensor with shape: `(nb_samples, output_dim)`. +- __Masking__: This layer supports masking for input data with a variable number of timesteps To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to true. + - __Arguments__: - __input_dim__: dimension of the input. - __output_dim__: dimension of the internal projections and the final output. @@ -100,7 +108,7 @@ Gated Recurrent Unit - Cho et al. 2014. ```python keras.layers.recurrent.LSTM(input_dim, output_dim=128, - init='glorot_uniform', inner_init='orthogonal', + init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', weights=None, truncate_gradient=-1, return_sequences=False) ``` @@ -110,14 +118,17 @@ Long-Short Term Memory unit - Hochreiter 1997. - __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, input_dim)`. - __Output shape__: - - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, ouput_dim)`. + - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, output_dim)`. - else: 2D tensor with shape: `(nb_samples, output_dim)`. +- __Masking__: This layer supports masking for input data with a variable number of timesteps To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to true. + - __Arguments__: -- __input_dim__: dimension of the input. + - __input_dim__: dimension of the input. - __output_dim__: dimension of the internal projections and the final output. - __init__: weight initialization function for the output cell. Can be the name of an existing function (str), or a Theano function (see: [initializations](../initializations.md)). - __inner_init__: weight initialization function for the inner cells. + - __forget_bias_init__: initialization function for the bias of the forget gate. [Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) recommend initializing with ones. - __activation__: activation function for the output. Can be the name of an existing function (str), or a Theano function (see: [activations](../activations.md)). - __inner_activation__: activation function for the inner cells. - __weights__: list of numpy arrays to set as initial weights. The list should have 12 elements. @@ -128,6 +139,41 @@ Long-Short Term Memory unit - Hochreiter 1997. - [Long short-term memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf) (original 1997 paper) - [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015) - [Supervised sequence labelling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf) + +--- + +## JZS1, JZS2, JZS3 + +```python +keras.layers.recurrent.JZS1(input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', + activation='tanh', inner_activation='sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False) +``` + +Top 3 RNN architectures evolved from the evaluation of thousands of models. Serves as alternatives to LSTMs and GRUs. Corresponds to `MUT1`, `MUT2`, and `MUT3` architectures described in the paper: An Empirical Exploration of Recurrent Network Architectures, Jozefowicz et al. 2015. + +- __Input shape__: 3D tensor with shape: `(nb_samples, timesteps, input_dim)`. + +- __Output shape__: + - if `return_sequences`: 3D tensor with shape: `(nb_samples, timesteps, output_dim)`. + - else: 2D tensor with shape: `(nb_samples, output_dim)`. + +- __Masking__: This layer supports masking for input data with a variable number of timesteps To introduce masks to your data, use an [Embedding](embeddings.md) layer with the `mask_zero` parameter set to true. + +- __Arguments__: + - __input_dim__: dimension of the input. + - __output_dim__: dimension of the internal projections and the final output. + - __init__: weight initialization function for the output cell. Can be the name of an existing function (str), or a Theano function (see: [initializations](../initializations.md)). + - __inner_init__: weight initialization function for the inner cells. + - __activation__: activation function for the output. Can be the name of an existing function (str), or a Theano function (see: [activations](../activations.md)). + - __inner_activation__: activation function for the inner cells. + - __weights__: list of numpy arrays to set as initial weights. The list should have 9 elements. + - __truncate_gradient__: Number of steps to use in truncated BPTT. See: [Theano "scan"](http://deeplearning.net/software/theano/library/scan.html). + - __return_sequences__: Boolean. Whether to return the last output in the output sequence, or the full sequence. + +- __References__: + - [An Empirical Exploration of Recurrent Network Architectures](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) - \ No newline at end of file + diff --git a/docs/sources/models.md b/docs/sources/models.md index 0d0dfc6c50ee..4a8154de4dd5 100644 --- a/docs/sources/models.md +++ b/docs/sources/models.md @@ -7,40 +7,42 @@ model = keras.models.Sequential() ``` - __Methods__: - __add__(layer): Add a layer to the model. - - __compile__(optimizer, loss, class_mode="categorical"): - - __Arguments__: + - __compile__(optimizer, loss, class_mode="categorical"): + - __Arguments__: - __optimizer__: str (name of optimizer) or optimizer object. See [optimizers](optimizers.md). - __loss__: str (name of objective function) or objective function. See [objectives](objectives.md). - - __class_mode__: one of "categorical", "binary". This is only used for computing classification accuracy or using the predict_classes method. + - __class_mode__: one of "categorical", "binary". This is only used for computing classification accuracy or using the predict_classes method. - __theano_mode__: A `theano.compile.mode.Mode` ([reference](http://deeplearning.net/software/theano/library/compile/mode.html)) instance controlling specifying compilation options. - __fit__(X, y, batch_size=128, nb_epoch=100, verbose=1, validation_split=0., validation_data=None, shuffle=True, show_accuracy=False, callbacks=[], class_weight=None, sample_weight=None): Train a model for a fixed number of epochs. - __Return__: a history dictionary with a record of training loss values at successive epochs, as well as validation loss values (if applicable), accuracy (if applicable), etc. - - __Arguments__: + - __Arguments__: - __X__: data. - __y__: labels. - __batch_size__: int. Number of samples per gradient update. - - __nb_epoch__: int. + - __nb_epoch__: int. - __verbose__: 0 for no logging to stdout, 1 for progress bar logging, 2 for one log line per epoch. + - __callbacks__: `keras.callbacks.Callback` list. List of callbacks to apply during training. See [callbacks](callbacks.md). - __validation_split__: float (0. < x < 1). Fraction of the data to use as held-out validation data. - __validation_data__: tuple (X, y) to be used as held-out validation data. Will override validation_split. - - __shuffle__: boolean. Whether to shuffle the samples at each epoch. + - __shuffle__: boolean or str (for 'batch'). Whether to shuffle the samples at each epoch. 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. - __show_accuracy__: boolean. Whether to display class accuracy in the logs to stdout at each epoch. - - __callbacks__: `keras.callbacks.Callback` list. List of callbacks to apply during training. See [callbacks](callbacks.md). + - __class_weight__: dictionary mapping classes to a weight value, used for scaling the loss function (during training only). + - __sample_weight__: list or numpy array with 1:1 mapping to the training samples, used for scaling the loss function (during training only). For time-distributed data, there is one weight per sample *per timestep*, i.e. if your output data is shaped `(nb_samples, timesteps, output_dim)`, your mask should be of shape `(nb_samples, timesteps)`. This allows you to mask out or reweight individual output timesteps, which is useful in sequence to sequence learning. - __evaluate__(X, y, batch_size=128, show_accuracy=False, verbose=1): Show performance of the model over some validation data. - - __Return__: The loss score over the data. + - __Return__: The loss score over the data, or a `(loss, accuracy)` tuple if `show_accuracy=True`. - __Arguments__: Same meaning as fit method above. verbose is used as a binary flag (progress bar or nothing). - - __predict__(X, batch_size=128, verbose=1): + - __predict__(X, batch_size=128, verbose=1): - __Return__: An array of predictions for some test data. - __Arguments__: Same meaning as fit method above. - __predict_classes__(X, batch_size=128, verbose=1): Return an array of class predictions for some test data. - __Return__: An array of labels for some test data. - __Arguments__: Same meaning as fit method above. verbose is used as a binary flag (progress bar or nothing). - - __train__(X, y, accuracy=False): Single gradient update on one batch. if accuracy==False, return tuple (loss_on_batch, accuracy_on_batch). Else, return loss_on_batch. + - __train_on_batch__(X, y, accuracy=False): Single gradient update on one batch. - __Return__: loss over the data, or tuple `(loss, accuracy)` if `accuracy=True`. - - __test__(X, y, accuracy=False): Single performance evaluation on one batch. if accuracy==False, return tuple (loss_on_batch, accuracy_on_batch). Else, return loss_on_batch. + - __test_on_batch__(X, y, accuracy=False): Single performance evaluation on one batch. - __Return__: loss over the data, or tuple `(loss, accuracy)` if `accuracy=True`. - __save_weights__(fname, overwrite=False): Store the weights of all layers to a HDF5 file. If overwrite==False and the file already exists, an exception will be thrown. - - __load_weights__(fname): Sets the weights of a model, based to weights stored by __save__weights__. You can only __load__weights__ on a savefile from a model with an identical architecture. __load_weights__ can be called either before or after the __compile__ step. + - __load_weights__(fname): Sets the weights of a model, based to weights stored by __save_weights__. You can only __load_weights__ on a savefile from a model with an identical architecture. __load_weights__ can be called either before or after the __compile__ step. __Examples__: @@ -112,3 +114,92 @@ Epoch 2 10960/37800 [=======>......................] - ETA: 4s - loss: 0.0109 - acc.: 0.9420 ''' ``` + +--- + +## Graph + +Arbitrary connection graph. It can have any number of inputs and outputs, with each output trained with its own loss function. The quantity being optimized by a Graph model is the sum of all loss functions over the different outputs. + +```python +model = keras.models.Graph() +``` +- __Methods__: + - __add_input__(name, ndim=2, dtype='float'): Add an input with shape dimensionality `ndim`. + - __Arguments__: + - __ndim__: Use `ndim=2` for vector input `(samples, features)`, ndim=3 for temporal input `(samples, time, features)`, ndim=4 for image input `(samples, channels, height, width)`. + - __dtype__: `float` or `int`. Use `int` if the input is connected to an Embedding layer, `float` otherwise. + - __add_output__(name, input=None, inputs=[], merge_mode='concat'): Add an output connect to `input` or `inputs`. + - __Arguments__: + - __name__: str. unique identifier of the output. + - __input__: str name of the node that the output is connected to. Only specify *one* of either `input` or `inputs`. + - __inputs__: list of str names of the node that the output is connected to. + - __merge_mode__: "sum" or "concat". Only applicable if `inputs` list is specified. Merge mode for the different inputs. + - __add_node__(layer, name, input=None, inputs=[], merge_mode='concat'): Add an output connect to `input` or `inputs`. + - __Arguments__: + - __layer__: Layer instance. + - __name__: str. unique identifier of the node. + - __input__: str name of the node/input that the node is connected to. Only specify *one* of either `input` or `inputs`. + - __inputs__: list of str names of the node that the node is connected to. + - __merge_mode__: "sum" or "concat". Only applicable if `inputs` list is specified. Merge mode for the different inputs. + - __compile__(optimizer, loss): + - __Arguments__: + - __optimizer__: str (name of optimizer) or optimizer object. See [optimizers](optimizers.md). + - __loss__: dictionary mapping the name(s) of the output(s) to a loss function (string name of objective function or objective function. See [objectives](objectives.md)). + - __fit__(data, batch_size=128, nb_epoch=100, verbose=1, validation_split=0., validation_data=None, shuffle=True, callbacks=[]): Train a model for a fixed number of epochs. + - __Return__: a history dictionary with a record of training loss values at successive epochs, as well as validation loss values (if applicable). + - __Arguments__: + - __data__:dictionary mapping input names out outputs names to appropriate numpy arrays. All arrays should contain the same number of samples. + - __batch_size__: int. Number of samples per gradient update. + - __nb_epoch__: int. + - __verbose__: 0 for no logging to stdout, 1 for progress bar logging, 2 for one log line per epoch. + - __callbacks__: `keras.callbacks.Callback` list. List of callbacks to apply during training. See [callbacks](callbacks.md). + - __validation_split__: float (0. < x < 1). Fraction of the data to use as held-out validation data. + - __validation_data__: tuple (X, y) to be used as held-out validation data. Will override validation_split. + - __shuffle__: boolean. Whether to shuffle the samples at each epoch. + - __evaluate__(data, batch_size=128, verbose=1): Show performance of the model over some validation data. + - __Return__: The loss score over the data. + - __Arguments__: Same meaning as fit method above. verbose is used as a binary flag (progress bar or nothing). + - __predict__(data, batch_size=128, verbose=1): + - __Return__: A dictionary mapping output names to arrays of predictions over the data. + - __Arguments__: Same meaning as fit method above. Only inputs need to be specified in `data`. + - __train_on_batch__(data): Single gradient update on one batch. + - __Return__: loss over the data. + - __test_on_batch__(data): Single performance evaluation on one batch. + - __Return__: loss over the data. + - __save_weights__(fname, overwrite=False): Store the weights of all layers to a HDF5 file. If `overwrite==False` and the file already exists, an exception will be thrown. + - __load_weights__(fname): Sets the weights of a model, based to weights stored by __save_weights__. You can only __load_weights__ on a savefile from a model with an identical architecture. __load_weights__ can be called either before or after the __compile__ step. + + +__Examples__: + +```python +# graph model with one input and two outputs +graph = Graph() +graph.add_input(name='input', ndim=2) +graph.add_node(Dense(32, 16), name='dense1', input='input') +graph.add_node(Dense(32, 4), name='dense2', input='input') +graph.add_node(Dense(16, 4), name='dense3', input='dense1') +graph.add_output(name='output1', input='dense2') +graph.add_output(name='output2', input='dense3') + +graph.compile('rmsprop', {'output1':'mse', 'output2':'mse'}) +history = graph.fit({'input':X_train, 'output1':y_train, 'output2':y2_train}, nb_epoch=10) + +``` + +```python +# graph model with two inputs and one output +graph = Graph() +graph.add_input(name='input1', ndim=2) +graph.add_input(name='input2', ndim=2) +graph.add_node(Dense(32, 16), name='dense1', input='input1') +graph.add_node(Dense(32, 4), name='dense2', input='input2') +graph.add_node(Dense(16, 4), name='dense3', input='dense1') +graph.add_output(name='output', inputs=['dense2', 'dense3'], merge_mode='sum') +graph.compile('rmsprop', {'output':'mse'}) + +history = graph.fit({'input1':X_train, 'input2':X2_train, 'output':y_train}, nb_epoch=10) +predictions = graph.predict({'input1':X_test, 'input2':X2_test}) # {'output':...} + +``` diff --git a/docs/sources/objectives.md b/docs/sources/objectives.md index 45b6f0b760a5..7e0b885eaaf0 100644 --- a/docs/sources/objectives.md +++ b/docs/sources/objectives.md @@ -7,18 +7,22 @@ An objective function (or loss function, or optimization score function) is one model.compile(loss='mean_squared_error', optimizer='sgd') ``` -You can either pass the name of an existing objective, or pass a Theano symbolic function that returns a scalar and takes the following two arguments: +You can either pass the name of an existing objective, or pass a Theano symbolic function that returns a scalar for each data-point and takes the following two arguments: - __y_true__: True labels. Theano tensor. - __y_pred__: Predictions. Theano tensor of the same shape as y_true. +The actual optimized objective is the mean of the output array across all datapoints. + For a few examples of such functions, check out the [objectives source](https://github.com/fchollet/keras/blob/master/keras/objectives.py). ## Available objectives - __mean_squared_error__ / __mse__ - __mean_absolute_error__ / __mae__ +- __mean_absolute_percentage_error__ / __mape__ +- __mean_squared_logarithmic_error__ / __msle__ - __squared_hinge__ - __hinge__ - __binary_crossentropy__: Also known as logloss. -- __categorical_crossentropy__: Also known as multiclass logloss. __Note__: using this objective requires that your labels are binary arrays of shape `(nb_samples, nb_classes)`. \ No newline at end of file +- __categorical_crossentropy__: Also known as multiclass logloss. __Note__: using this objective requires that your labels are binary arrays of shape `(nb_samples, nb_classes)`. diff --git a/docs/sources/optimizers.md b/docs/sources/optimizers.md index c6868a699918..35e2269048ab 100644 --- a/docs/sources/optimizers.md +++ b/docs/sources/optimizers.md @@ -103,16 +103,15 @@ __Arguments__: ## Adam ```python -keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8) +keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) ``` -Adam optimizer, proposed by Kingma and Lei Ba in [Adam: A Method For Stochastic Optimization](http://arxiv.org/pdf/1412.6980v4.pdf). Default parameters are those suggested in the paper. The parameter "lambda" from the paper has been renamed kappa, for syntactic reasons. +Adam optimizer, proposed by Kingma and Lei Ba in [Adam: A Method For Stochastic Optimization](http://arxiv.org/pdf/1412.6980v8.pdf). Default parameters are those suggested in the paper. __Arguments__: -- __lr__: float >= 0. Learning rate. +- __lr__: float >= 0. Learning rate. - __beta_1__, __beta_2__: floats, 0 < beta < 1. Generally close to 1. - __epsilon__: float >= 0. Fuzz factor. -- __kappa__: float 0 < kappa < 1. Lambda parameter in the original paper. ---- \ No newline at end of file +--- diff --git a/docs/sources/regularizers.md b/docs/sources/regularizers.md index eac70167ef23..9e54321a9f0c 100644 --- a/docs/sources/regularizers.md +++ b/docs/sources/regularizers.md @@ -1,18 +1,40 @@ ## Usage of regularizers -Regularizers allow to apply penalties on network parameters during optimization. +Regularizers allow to apply penalties on layer parameters or layer activity during optimization. These penalties are incorporated in the loss function that the network optimizes. -The keyword arguments used for passing penalties to parameters in a layer will depend on the layer. +The penalties are applied on a per-layer basis. The exact API will depend on the layer, but the layers `Dense`, `TimeDistributedDense`, `MaxoutDense`, `Convolution1D` and `Convolution2D` have a unified API. -In the `Dense` layer it is simply `W_regularizer` for the main weights matrix, and `b_regularizer` for the bias. +These layers expose 3 keyword arguments: + +- `W_regularizer`: instance of `keras.regularizers.WeightRegularizer` +- `b_regularizer`: instance of `keras.regularizers.WeightRegularizer` +- `activity_regularizer`: instance of `keras.regularizers.ActivityRegularizer` + + +## Example ```python -from keras.regularizers import l2 -model.add(Dense(64, 64, W_regularizer = l2(.01))) +from keras.regularizers import l2, activity_l2 +model.add(Dense(64, 64, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01))) ``` ## Available penalties -- __l1__(l=0.01): L1 regularization penalty, also known as LASSO -- __l2__(l=0.01): L2 regularization penalty, also known as weight decay, or Ridge -- __l1l2__(l1=0.01, l2=0.01): L1-L2 regularization penalty, also known as ElasticNet +```python +keras.regularizers.WeightRegularizer(l1=0., l2=0.) +``` + +```python +keras.regularizers.ActivityRegularizer(l1=0., l2=0.) +``` + +## Shortcuts + +These are shortcut functions available in `keras.regularizers`. + +- __l1__(l=0.01): L1 weight regularization penalty, also known as LASSO +- __l2__(l=0.01): L2 weight regularization penalty, also known as weight decay, or Ridge +- __l1l2__(l1=0.01, l2=0.01): L1-L2 weight regularization penalty, also known as ElasticNet +- __activity_l1__(l=0.01): L1 activity regularization +- __activity_l2__(l=0.01): L2 activity regularization +- __activity_l1l2__(l1=0.01, l2=0.01): L1+L2 activity regularization diff --git a/docs/sources/utils/visualization.md b/docs/sources/utils/visualization.md deleted file mode 100644 index 1c7b67fa3de9..000000000000 --- a/docs/sources/utils/visualization.md +++ /dev/null @@ -1,28 +0,0 @@ -## Grapher - -Creates a visualization of the model structure using `pydot`. - -```python -grapher = keras.utils.dot_utils.Grapher() -``` -- __Methods__: - - __plot__(model, to_file): creates a graph visualizing the structure of `model` and writes it to `to_file`. - - __Arguments__: - - __model__: an instance of a Keras model (e.g. `Sequential`) - - __to_file__: the filename to save the visualization png to. - -__Examples__: - -```python -from keras.models import Sequential -from keras.layers.core import Dense, Activation -from keras.utils.dot_utils import Grapher - -grapher = Grapher() - -model = Sequential() -model.add(Dense(64, 2, init='uniform')) -model.add(Activation('softmax')) -grapher.plot(model, 'model.png') -``` - diff --git a/examples/babi_rnn.py b/examples/babi_rnn.py new file mode 100644 index 000000000000..2b08cb8f4d83 --- /dev/null +++ b/examples/babi_rnn.py @@ -0,0 +1,199 @@ +from __future__ import absolute_import +from __future__ import print_function +from functools import reduce +import re +import tarfile + +import numpy as np +np.random.seed(1337) # for reproducibility + +from keras.datasets.data_utils import get_file +from keras.layers.embeddings import Embedding +from keras.layers.core import Dense, Merge +from keras.layers import recurrent +from keras.models import Sequential +from keras.preprocessing.sequence import pad_sequences + +''' +Trains two recurrent neural networks based upon a story and a question. +The resulting merged vector is then queried to answer a range of bAbI tasks. + +The results are comparable to those for an LSTM model provided in Weston et al.: +"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks" +http://arxiv.org/abs/1502.05698 + +Task Number | FB LSTM Baseline | Keras QA +--- | --- | --- +QA1 - Single Supporting Fact | 50 | 52.1 +QA2 - Two Supporting Facts | 20 | 37.0 +QA3 - Three Supporting Facts | 20 | 20.5 +QA4 - Two Arg. Relations | 61 | 62.9 +QA5 - Three Arg. Relations | 70 | 61.9 +QA6 - Yes/No Questions | 48 | 50.7 +QA7 - Counting | 49 | 78.9 +QA8 - Lists/Sets | 45 | 77.2 +QA9 - Simple Negation | 64 | 64.0 +QA10 - Indefinite Knowledge | 44 | 47.7 +QA11 - Basic Coreference | 72 | 74.9 +QA12 - Conjunction | 74 | 76.4 +QA13 - Compound Coreference | 94 | 94.4 +QA14 - Time Reasoning | 27 | 34.8 +QA15 - Basic Deduction | 21 | 32.4 +QA16 - Basic Induction | 23 | 50.6 +QA17 - Positional Reasoning | 51 | 49.1 +QA18 - Size Reasoning | 52 | 90.8 +QA19 - Path Finding | 8 | 9.0 +QA20 - Agent's Motivations | 91 | 90.7 + +For the resources related to the bAbI project, refer to: +https://research.facebook.com/researchers/1543934539189348 + +Notes: + +- With default word, sentence, and query vector sizes, the GRU model achieves: + - 52.1% test accuracy on QA1 in 20 epochs (2 seconds per epoch on CPU) + - 37.0% test accuracy on QA2 in 20 epochs (16 seconds per epoch on CPU) +In comparison, the Facebook paper achieves 50% and 20% for the LSTM baseline. + +- The task does not traditionally parse the question separately. This likely +improves accuracy and is a good example of merging two RNNs. + +- The word vector embeddings are not shared between the story and question RNNs. + +- See how the accuracy changes given 10,000 training samples (en-10k) instead +of only 1000. 1000 was used in order to be comparable to the original paper. + +- Experiment with GRU, LSTM, and JZS1-3 as they give subtly different results. + +- The length and noise (i.e. 'useless' story components) impact the ability for +LSTMs / GRUs to provide the correct answer. Given only the supporting facts, +these RNNs can achieve 100% accuracy on many tasks. Memory networks and neural +networks that use attentional processes can efficiently search through this +noise to find the relevant statements, improving performance substantially. +This becomes especially obvious on QA2 and QA3, both far longer than QA1. +''' + + +def tokenize(sent): + '''Return the tokens of a sentence including punctuation. + + >>> tokenize('Bob dropped the apple. Where is the apple?') + ['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?'] + ''' + return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()] + + +def parse_stories(lines, only_supporting=False): + '''Parse stories provided in the bAbi tasks format + + If only_supporting is true, only the sentences that support the answer are kept. + ''' + data = [] + story = [] + for line in lines: + line = line.decode('utf-8').strip() + nid, line = line.split(' ', 1) + nid = int(nid) + if nid == 1: + story = [] + if '\t' in line: + q, a, supporting = line.split('\t') + q = tokenize(q) + substory = None + if only_supporting: + # Only select the related substory + supporting = map(int, supporting.split()) + substory = [story[i - 1] for i in supporting] + else: + # Provide all the substories + substory = [x for x in story if x] + data.append((substory, q, a)) + story.append('') + else: + sent = tokenize(line) + story.append(sent) + return data + + +def get_stories(f, only_supporting=False, max_length=None): + '''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story. + + If max_length is supplied, any stories longer than max_length tokens will be discarded. + ''' + data = parse_stories(f.readlines(), only_supporting=only_supporting) + flatten = lambda data: reduce(lambda x, y: x + y, data) + data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length] + return data + + +def vectorize_stories(data): + X = [] + Xq = [] + Y = [] + for story, query, answer in data: + x = [word_idx[w] for w in story] + xq = [word_idx[w] for w in query] + y = np.zeros(vocab_size) + y[word_idx[answer]] = 1 + X.append(x) + Xq.append(xq) + Y.append(y) + return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y) + +RNN = recurrent.GRU +EMBED_HIDDEN_SIZE = 50 +SENT_HIDDEN_SIZE = 100 +QUERY_HIDDEN_SIZE = 100 +BATCH_SIZE = 32 +EPOCHS = 20 +print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN, EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE, QUERY_HIDDEN_SIZE)) + +path = get_file('babi-tasks-v1-2.tar.gz', origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz') +tar = tarfile.open(path) +# Default QA1 with 1000 samples +# challenge = 'tasks_1-20_v1-2/en/qa1_single-supporting-fact_{}.txt' +# QA1 with 10,000 samples +# challenge = 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt' +# QA2 with 1000 samples +challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt' +# QA2 with 10,000 samples +# challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt' +train = get_stories(tar.extractfile(challenge.format('train'))) +test = get_stories(tar.extractfile(challenge.format('test'))) + +vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train + test))) +# Reserve 0 for masking via pad_sequences +vocab_size = len(vocab) + 1 +word_idx = dict((c, i + 1) for i, c in enumerate(vocab)) +story_maxlen = max(map(len, (x for x, _, _ in train + test))) +query_maxlen = max(map(len, (x for _, x, _ in train + test))) + +X, Xq, Y = vectorize_stories(train) +tX, tXq, tY = vectorize_stories(test) + +print('vocab = {}'.format(vocab)) +print('X.shape = {}'.format(X.shape)) +print('Xq.shape = {}'.format(Xq.shape)) +print('Y.shape = {}'.format(Y.shape)) +print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen)) + +print('Build model...') + +sentrnn = Sequential() +sentrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, mask_zero=True)) +sentrnn.add(RNN(EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE, return_sequences=False)) + +qrnn = Sequential() +qrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE)) +qrnn.add(RNN(EMBED_HIDDEN_SIZE, QUERY_HIDDEN_SIZE, return_sequences=False)) + +model = Sequential() +model.add(Merge([sentrnn, qrnn], mode='concat')) +model.add(Dense(SENT_HIDDEN_SIZE + QUERY_HIDDEN_SIZE, vocab_size, activation='softmax')) + +model.compile(optimizer='adam', loss='categorical_crossentropy', class_mode='categorical') + +print('Training') +model.fit([X, Xq], Y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, validation_split=0.05, show_accuracy=True) +loss, acc = model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE, show_accuracy=True) +print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc)) diff --git a/examples/cifar10_cnn.py b/examples/cifar10_cnn.py index 28dd39ef05df..b49524805793 100644 --- a/examples/cifar10_cnn.py +++ b/examples/cifar10_cnn.py @@ -19,7 +19,7 @@ (it's still underfitting at that point, though). Note: the data was pickled with Python 2, and some encoding issues might prevent you - from loading it in Python 3. You might have to load it in Python 2, + from loading it in Python 3. You might have to load it in Python 2, save it in a different format, load it in Python 3 and repickle it. ''' @@ -40,16 +40,16 @@ model = Sequential() -model.add(Convolution2D(32, 3, 3, 3, border_mode='full')) +model.add(Convolution2D(32, 3, 3, 3, border_mode='full')) model.add(Activation('relu')) model.add(Convolution2D(32, 32, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(poolsize=(2, 2))) model.add(Dropout(0.25)) -model.add(Convolution2D(64, 32, 3, 3, border_mode='full')) +model.add(Convolution2D(64, 32, 3, 3, border_mode='full')) model.add(Activation('relu')) -model.add(Convolution2D(64, 64, 3, 3)) +model.add(Convolution2D(64, 64, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling2D(poolsize=(2, 2))) model.add(Dropout(0.25)) @@ -73,7 +73,7 @@ X_test = X_test.astype("float32") X_train /= 255 X_test /= 255 - model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10) + model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch) score = model.evaluate(X_test, Y_test, batch_size=batch_size) print('Test score:', score) @@ -82,18 +82,18 @@ # this will do preprocessing and realtime data augmentation datagen = ImageDataGenerator( - featurewise_center=True, # set input mean to 0 over the dataset - samplewise_center=False, # set each sample mean to 0 - featurewise_std_normalization=True, # divide inputs by std of the dataset - samplewise_std_normalization=False, # divide each input by its std - zca_whitening=False, # apply ZCA whitening - rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180) - width_shift_range=0.2, # randomly shift images horizontally (fraction of total width) - height_shift_range=0.2, # randomly shift images vertically (fraction of total height) - horizontal_flip=True, # randomly flip images - vertical_flip=False) # randomly flip images - - # compute quantities required for featurewise normalization + featurewise_center=True, # set input mean to 0 over the dataset + samplewise_center=False, # set each sample mean to 0 + featurewise_std_normalization=True, # divide inputs by std of the dataset + samplewise_std_normalization=False, # divide each input by its std + zca_whitening=False, # apply ZCA whitening + rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180) + width_shift_range=0.2, # randomly shift images horizontally (fraction of total width) + height_shift_range=0.2, # randomly shift images vertically (fraction of total height) + horizontal_flip=True, # randomly flip images + vertical_flip=False) # randomly flip images + + # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(X_train) @@ -105,20 +105,12 @@ # batch train with realtime data augmentation progbar = generic_utils.Progbar(X_train.shape[0]) for X_batch, Y_batch in datagen.flow(X_train, Y_train): - loss = model.train(X_batch, Y_batch) + loss = model.train_on_batch(X_batch, Y_batch) progbar.add(X_batch.shape[0], values=[("train loss", loss)]) print("Testing...") # test time! progbar = generic_utils.Progbar(X_test.shape[0]) for X_batch, Y_batch in datagen.flow(X_test, Y_test): - score = model.test(X_batch, Y_batch) + score = model.test_on_batch(X_batch, Y_batch) progbar.add(X_batch.shape[0], values=[("test loss", score)]) - - - - - - - - diff --git a/examples/imdb_cnn.py b/examples/imdb_cnn.py new file mode 100644 index 000000000000..997f96bb299a --- /dev/null +++ b/examples/imdb_cnn.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import +from __future__ import print_function +import numpy as np +np.random.seed(1337) # for reproducibility + +from keras.preprocessing import sequence +from keras.optimizers import RMSprop +from keras.models import Sequential +from keras.layers.core import Dense, Dropout, Activation, Flatten +from keras.layers.embeddings import Embedding +from keras.layers.convolutional import Convolution1D, MaxPooling1D +from keras.datasets import imdb + +''' + This example demonstrates the use of Convolution1D + for text classification. + + Run on GPU: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_cnn.py + + Get to 0.8330 test accuracy after 3 epochs. 100s/epoch on K520 GPU. +''' + +# set parameters: +max_features = 5000 +maxlen = 100 +batch_size = 32 +embedding_dims = 100 +nb_filters = 250 +filter_length = 3 +hidden_dims = 250 +nb_epoch = 3 + +print("Loading data...") +(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, + test_split=0.2) +print(len(X_train), 'train sequences') +print(len(X_test), 'test sequences') + +print("Pad sequences (samples x time)") +X_train = sequence.pad_sequences(X_train, maxlen=maxlen) +X_test = sequence.pad_sequences(X_test, maxlen=maxlen) +print('X_train shape:', X_train.shape) +print('X_test shape:', X_test.shape) + +print('Build model...') +model = Sequential() + +# we start off with an efficient embedding layer which maps +# our vocab indices into embedding_dims dimensions +model.add(Embedding(max_features, embedding_dims)) +model.add(Dropout(0.25)) + +# we add a Convolution1D, which will learn nb_filters +# word group filters of size filter_length: +model.add(Convolution1D(input_dim=embedding_dims, + nb_filter=nb_filters, + filter_length=filter_length, + border_mode="valid", + activation="relu", + subsample_length=1)) + +# we use standard max pooling (halving the output of the previous layer): +model.add(MaxPooling1D(pool_length=2)) + +# We flatten the output of the conv layer, so that we can add a vanilla dense layer: +model.add(Flatten()) + +# Computing the output shape of a conv layer can be tricky; +# for a good tutorial, see: http://cs231n.github.io/convolutional-networks/ +output_size = nb_filters * (((maxlen - filter_length) / 1) + 1) / 2 + +# We add a vanilla hidden layer: +model.add(Dense(output_size, hidden_dims)) +model.add(Dropout(0.25)) +model.add(Activation('relu')) + +# We project onto a single unit output layer, and squash it with a sigmoid: +model.add(Dense(hidden_dims, 1)) +model.add(Activation('sigmoid')) + +model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode="binary") +model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, y_test)) diff --git a/examples/imdb_lstm.py b/examples/imdb_lstm.py index b23bd152a630..6dcb38d452ab 100644 --- a/examples/imdb_lstm.py +++ b/examples/imdb_lstm.py @@ -1,6 +1,7 @@ from __future__ import absolute_import from __future__ import print_function import numpy as np +np.random.seed(1337) # for reproducibility from keras.preprocessing import sequence from keras.optimizers import SGD, RMSprop, Adagrad @@ -14,28 +15,25 @@ ''' Train a LSTM on the IMDB sentiment classification task. - The dataset is actually too small for LSTM to be of any advantage + The dataset is actually too small for LSTM to be of any advantage compared to simpler, much faster methods such as TF-IDF+LogReg. - Notes: + Notes: - - RNNs are tricky. Choice of batch size is important, - choice of loss and optimizer is critical, etc. - Most configurations won't converge. + - RNNs are tricky. Choice of batch size is important, + choice of loss and optimizer is critical, etc. + Some configurations won't converge. - - LSTM loss decrease during training can be quite different - from what you see with CNNs/MLPs/etc. It's more or less a sigmoid - instead of an inverse exponential. + - LSTM loss decrease patterns during training can be quite different + from what you see with CNNs/MLPs/etc. GPU command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_lstm.py - - 250s/epoch on GPU (GT 650M), vs. 400s/epoch on CPU (2.4Ghz Core i7). ''' -max_features=20000 -maxlen = 100 # cut texts after this number of words (among top max_features most common words) -batch_size = 16 +max_features = 20000 +maxlen = 100 # cut texts after this number of words (among top max_features most common words) +batch_size = 32 print("Loading data...") (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2) @@ -50,8 +48,8 @@ print('Build model...') model = Sequential() -model.add(Embedding(max_features, 256)) -model.add(LSTM(256, 128)) # try using a GRU instead, for fun +model.add(Embedding(max_features, 128)) +model.add(LSTM(128, 128)) # try using a GRU instead, for fun model.add(Dropout(0.5)) model.add(Dense(128, 1)) model.add(Activation('sigmoid')) @@ -60,11 +58,7 @@ model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary") print("Train...") -model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5, validation_split=0.1, show_accuracy=True) -score = model.evaluate(X_test, y_test, batch_size=batch_size) +model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=4, validation_data=(X_test, y_test), show_accuracy=True) +score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True) print('Test score:', score) - -classes = model.predict_classes(X_test, batch_size=batch_size) -acc = np_utils.accuracy(classes, y_test) print('Test accuracy:', acc) - diff --git a/examples/kaggle_otto_nn.py b/examples/kaggle_otto_nn.py index face3333a47a..1148d0fbcf79 100644 --- a/examples/kaggle_otto_nn.py +++ b/examples/kaggle_otto_nn.py @@ -3,6 +3,7 @@ import numpy as np import pandas as pd +np.random.seed(1337) # for reproducibility from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation @@ -17,7 +18,7 @@ This demonstrates how to reach a score of 0.4890 (local validation) on the Kaggle Otto challenge, with a deep net using Keras. - Compatible Python 2.7-3.4 + Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas. Recommended to run on GPU: Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py @@ -35,19 +36,19 @@ Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data ''' -np.random.seed(1337) # for reproducibility def load_data(path, train=True): df = pd.read_csv(path) X = df.values.copy() if train: - np.random.shuffle(X) # https://youtu.be/uyUXoap67N8 + np.random.shuffle(X) # https://youtu.be/uyUXoap67N8 X, labels = X[:, 1:-1].astype(np.float32), X[:, -1] return X, labels else: X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str) return X, ids + def preprocess_data(X, scaler=None): if not scaler: scaler = StandardScaler() @@ -55,6 +56,7 @@ def preprocess_data(X, scaler=None): X = scaler.transform(X) return X, scaler + def preprocess_labels(labels, encoder=None, categorical=True): if not encoder: encoder = LabelEncoder() @@ -64,6 +66,7 @@ def preprocess_labels(labels, encoder=None, categorical=True): y = np_utils.to_categorical(y) return y, encoder + def make_submission(y_prob, ids, encoder, fname): with open(fname, 'w') as f: f.write('id,') @@ -121,4 +124,3 @@ def make_submission(y_prob, ids, encoder, fname): proba = model.predict_proba(X_test) make_submission(proba, ids, encoder, fname='keras-otto.csv') - diff --git a/examples/lstm_text_generation.py b/examples/lstm_text_generation.py index 589c259ee305..84662193abd2 100644 --- a/examples/lstm_text_generation.py +++ b/examples/lstm_text_generation.py @@ -60,13 +60,10 @@ model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # helper function to sample an index from a probability array -def sample(a, diversity=0.75): - if random.random() > diversity: - return np.argmax(a) - while 1: - i = random.randint(0, len(a)-1) - if a[i] > random.random(): - return i +def sample(a, temperature=1.0): + a = np.log(a)/temperature + a = np.exp(a)/np.sum(np.exp(a)) + return np.argmax(np.random.multinomial(1,a,1)) # train the model, output generated text after each iteration for iteration in range(1, 60): @@ -77,7 +74,7 @@ def sample(a, diversity=0.75): start_index = random.randint(0, len(text) - maxlen - 1) - for diversity in [0.2, 0.4, 0.6, 0.8]: + for diversity in [0.2, 0.5, 1.0, 1.2]: print() print('----- diversity:', diversity) @@ -101,4 +98,4 @@ def sample(a, diversity=0.75): sys.stdout.write(next_char) sys.stdout.flush() - print() \ No newline at end of file + print() diff --git a/examples/mnist_cnn.py b/examples/mnist_cnn.py index e169e78a7908..d406cbabf873 100644 --- a/examples/mnist_cnn.py +++ b/examples/mnist_cnn.py @@ -1,11 +1,13 @@ from __future__ import absolute_import from __future__ import print_function +import numpy as np +np.random.seed(1337) # for reproducibility + from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.utils import np_utils -import numpy as np ''' Train a simple convnet on the MNIST dataset. @@ -39,7 +41,7 @@ model = Sequential() -model.add(Convolution2D(32, 1, 3, 3, border_mode='full')) +model.add(Convolution2D(32, 1, 3, 3, border_mode='full')) model.add(Activation('relu')) model.add(Convolution2D(32, 32, 3, 3)) model.add(Activation('relu')) diff --git a/examples/mnist_irnn.py b/examples/mnist_irnn.py new file mode 100644 index 000000000000..2b07b8850a82 --- /dev/null +++ b/examples/mnist_irnn.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import +from __future__ import print_function +import numpy as np +np.random.seed(1337) # for reproducibility + +from keras.datasets import mnist +from keras.models import Sequential +from keras.layers.core import Dense, Activation +from keras.initializations import normal, identity +from keras.layers.recurrent import SimpleRNN, LSTM +from keras.optimizers import RMSprop +from keras.utils import np_utils + +''' + This is a reproduction of the IRNN experiment + with pixel-by-pixel sequential MNIST in + "A Simple Way to Initialize Recurrent Networks of Rectified Linear Units " + by Quoc V. Le, Navdeep Jaitly, Geoffrey E. Hinton + + arXiv:1504.00941v2 [cs.NE] 7 Apr 201 + http://arxiv.org/pdf/1504.00941v2.pdf + + Optimizer is replaced with RMSprop which yields more stable and steady + improvement. + + Reaches 0.93 train/test accuracy after 900 epochs (which roughly corresponds + to 1687500 steps in the original paper.) +''' + +batch_size = 32 +nb_classes = 10 +nb_epochs = 200 +hidden_units = 100 + +learning_rate = 1e-6 +clip_norm = 1.0 +BPTT_truncate = 28*28 + +# the data, shuffled and split between train and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +X_train = X_train.reshape(X_train.shape[0], -1, 1) +X_test = X_test.reshape(X_test.shape[0], -1, 1) +X_train = X_train.astype("float32") +X_test = X_test.astype("float32") +X_train /= 255 +X_test /= 255 +print('X_train shape:', X_train.shape) +print(X_train.shape[0], 'train samples') +print(X_test.shape[0], 'test samples') + +# convert class vectors to binary class matrices +Y_train = np_utils.to_categorical(y_train, nb_classes) +Y_test = np_utils.to_categorical(y_test, nb_classes) + +print('Evaluate IRNN...') +model = Sequential() +model.add(SimpleRNN(input_dim=1, output_dim=hidden_units, + init=lambda shape: normal(shape, scale=0.001), + inner_init=lambda shape: identity(shape, scale=1.0), + activation='relu', truncate_gradient=BPTT_truncate)) +model.add(Dense(hidden_units, nb_classes)) +model.add(Activation('softmax')) +rmsprop = RMSprop(lr=learning_rate) +model.compile(loss='categorical_crossentropy', optimizer=rmsprop) + +model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs, + show_accuracy=True, verbose=1, validation_data=(X_test, Y_test)) + +scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0) +print('IRNN test score:', scores[0]) +print('IRNN test accuracy:', scores[1]) + +print('Compare to LSTM...') +model = Sequential() +model.add(LSTM(1, hidden_units)) +model.add(Dense(hidden_units, nb_classes)) +model.add(Activation('softmax')) +rmsprop = RMSprop(lr=learning_rate) +model.compile(loss='categorical_crossentropy', optimizer=rmsprop) + +model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs, + show_accuracy=True, verbose=1, validation_data=(X_test, Y_test)) + +scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0) +print('LSTM test score:', scores[0]) +print('LSTM test accuracy:', scores[1]) diff --git a/examples/mnist_mlp.py b/examples/mnist_mlp.py index a1394dc234e8..faca23fa9bd6 100644 --- a/examples/mnist_mlp.py +++ b/examples/mnist_mlp.py @@ -1,11 +1,13 @@ from __future__ import absolute_import from __future__ import print_function +import numpy as np +np.random.seed(1337) # for reproducibility + from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import SGD, Adam, RMSprop from keras.utils import np_utils -import numpy as np ''' Train a simple deep NN on the MNIST dataset. @@ -18,8 +20,6 @@ nb_classes = 10 nb_epoch = 20 -np.random.seed(1337) # for reproducibility - # the data, shuffled and split between tran and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() diff --git a/examples/reuters_mlp.py b/examples/reuters_mlp.py index 43ab864382aa..67106ce902b6 100644 --- a/examples/reuters_mlp.py +++ b/examples/reuters_mlp.py @@ -1,6 +1,7 @@ from __future__ import absolute_import from __future__ import print_function import numpy as np +np.random.seed(1337) # for reproducibility from keras.datasets import reuters from keras.models import Sequential diff --git a/examples/skipgram_word_embeddings.py b/examples/skipgram_word_embeddings.py index 5eb4579eb762..5da77159828d 100644 --- a/examples/skipgram_word_embeddings.py +++ b/examples/skipgram_word_embeddings.py @@ -1,6 +1,6 @@ ''' - We loop over words in a dataset, and for each word, we look at a context window around the word. + We loop over words in a dataset, and for each word, we look at a context window around the word. We generate pairs of (pivot_word, other_word_from_same_context) with label 1, and pairs of (pivot_word, random_word) with label 0 (skip-gram method). @@ -8,23 +8,23 @@ and compute a proximity score between the embeddings (= p(context|word)), trained with our positive and negative labels. - We then use the weights computed by WordContextProduct to encode words - and demonstrate that the geometry of the embedding space + We then use the weights computed by WordContextProduct to encode words + and demonstrate that the geometry of the embedding space captures certain useful semantic properties. - Read more about skip-gram in this particularly gnomic paper by Mikolov et al.: + Read more about skip-gram in this particularly gnomic paper by Mikolov et al.: http://arxiv.org/pdf/1301.3781v3.pdf - Note: you should run this on GPU, otherwise training will be quite slow. + Note: you should run this on GPU, otherwise training will be quite slow. On a EC2 GPU instance, expect 3 hours per 10e6 comments (~10e8 words) per epoch with dim_proj=256. Should be much faster on a modern GPU. GPU command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python skipgram_word_embeddings.py - Dataset: 5,845,908 Hacker News comments. - Obtain the dataset at: - https://mega.co.nz/#F!YohlwD7R!wec0yNO86SeaNGIYQBOR0A + Dataset: 5,845,908 Hacker News comments. + Obtain the dataset at: + https://mega.co.nz/#F!YohlwD7R!wec0yNO86SeaNGIYQBOR0A (HNCommentsAll.1perline.json.bz2) ''' from __future__ import absolute_import @@ -43,16 +43,18 @@ from six.moves import range from six.moves import zip -max_features = 50000 # vocabulary size: top 50,000 most common words in data -skip_top = 100 # ignore top 100 most common words +max_features = 50000 # vocabulary size: top 50,000 most common words in data +skip_top = 100 # ignore top 100 most common words nb_epoch = 1 -dim_proj = 256 # embedding space dimension +dim_proj = 256 # embedding space dimension save = True load_model = False load_tokenizer = False train_model = True save_dir = os.path.expanduser("~/.keras/models") +if not os.path.exists(save_dir): + os.makedirs(save_dir) model_load_fname = "HN_skipgram_model.pkl" model_save_fname = "HN_skipgram_model.pkl" tokenizer_fname = "HN_tokenizer.pkl" @@ -64,6 +66,7 @@ to_replace = [(''', "'")] hex_tags = re.compile(r'&.*?;') + def clean_comment(comment): c = str(comment.encode("utf-8")) c = html_tags.sub(' ', c) @@ -72,6 +75,7 @@ def clean_comment(comment): c = hex_tags.sub(' ', c) return c + def text_generator(path=data_path): f = open(path) for i, l in enumerate(f): @@ -118,7 +122,7 @@ def text_generator(path=data_path): progbar = generic_utils.Progbar(tokenizer.document_count) samples_seen = 0 losses = [] - + for i, seq in enumerate(tokenizer.texts_to_sequences_generator(text_generator())): # get skipgram couples for one text in the dataset couples, labels = sequence.skipgrams(seq, max_features, window_size=4, negative_samples=1., sampling_table=sampling_table) @@ -156,26 +160,29 @@ def text_generator(path=data_path): reverse_word_index = dict([(v, k) for k, v in list(word_index.items())]) word_index = tokenizer.word_index + def embed_word(w): i = word_index.get(w) - if (not i) or (i=max_features): + if (not i) or (i < skip_top) or (i >= max_features): return None return norm_weights[i] + def closest_to_point(point, nb_closest=10): proximities = np.dot(norm_weights, point) tups = list(zip(list(range(len(proximities))), proximities)) tups.sort(key=lambda x: x[1], reverse=True) - return [(reverse_word_index.get(t[0]), t[1]) for t in tups[:nb_closest]] + return [(reverse_word_index.get(t[0]), t[1]) for t in tups[:nb_closest]] + def closest_to_word(w, nb_closest=10): i = word_index.get(w) - if (not i) or (i=max_features): + if (not i) or (i < skip_top) or (i >= max_features): return [] return closest_to_point(norm_weights[i].T, nb_closest) -''' the resuls in comments below were for: +''' the resuls in comments below were for: 5.8M HN comments dim_proj = 256 nb_epoch = 2 @@ -185,26 +192,27 @@ def closest_to_word(w, nb_closest=10): skip_top = 100 negative_samples = 1. window_size = 4 - and frequency subsampling of factor 10e-5. + and frequency subsampling of factor 10e-5. ''' -words = ["article", # post, story, hn, read, comments -"3", # 6, 4, 5, 2 -"two", # three, few, several, each -"great", # love, nice, working, looking -"data", # information, memory, database -"money", # company, pay, customers, spend -"years", # ago, year, months, hours, week, days -"android", # ios, release, os, mobile, beta -"javascript", # js, css, compiler, library, jquery, ruby -"look", # looks, looking -"business", # industry, professional, customers -"company", # companies, startup, founders, startups -"after", # before, once, until -"own", # personal, our, having -"us", # united, country, american, tech, diversity, usa, china, sv -"using", # javascript, js, tools (lol) -"here", # hn, post, comments +words = [ + "article", # post, story, hn, read, comments + "3", # 6, 4, 5, 2 + "two", # three, few, several, each + "great", # love, nice, working, looking + "data", # information, memory, database + "money", # company, pay, customers, spend + "years", # ago, year, months, hours, week, days + "android", # ios, release, os, mobile, beta + "javascript", # js, css, compiler, library, jquery, ruby + "look", # looks, looking + "business", # industry, professional, customers + "company", # companies, startup, founders, startups + "after", # before, once, until + "own", # personal, our, having + "us", # united, country, american, tech, diversity, usa, china, sv + "using", # javascript, js, tools (lol) + "here", # hn, post, comments ] for w in words: @@ -212,4 +220,3 @@ def closest_to_word(w, nb_closest=10): print('====', w) for r in res: print(r) - diff --git a/keras/activations.py b/keras/activations.py index e0bd897eb510..88f1de103101 100644 --- a/keras/activations.py +++ b/keras/activations.py @@ -1,34 +1,44 @@ from __future__ import absolute_import -import theano import theano.tensor as T -import types + def softmax(x): return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape) + def time_distributed_softmax(x): import warnings warnings.warn("time_distributed_softmax is deprecated. Just use softmax!", DeprecationWarning) return softmax(x) + def softplus(x): return T.nnet.softplus(x) + def relu(x): return (x + abs(x)) / 2.0 + def tanh(x): return T.tanh(x) + def sigmoid(x): return T.nnet.sigmoid(x) + def hard_sigmoid(x): return T.nnet.hard_sigmoid(x) + def linear(x): + ''' + The function returns the variable that is passed in, so all types work + ''' return x + from .utils.generic_utils import get_from_module def get(identifier): return get_from_module(identifier, globals(), 'activation function') diff --git a/keras/callbacks.py b/keras/callbacks.py index 5a6ef648ef67..1d784ff36cd1 100644 --- a/keras/callbacks.py +++ b/keras/callbacks.py @@ -1,16 +1,17 @@ from __future__ import absolute_import from __future__ import print_function + import theano import theano.tensor as T import numpy as np -import warnings -import time, json + +import time, json, warnings from collections import deque from .utils.generic_utils import Progbar -class CallbackList(object): +class CallbackList(object): def __init__(self, callbacks=[], queue_length=10): self.callbacks = [c for c in callbacks] self.queue_length = queue_length @@ -43,10 +44,9 @@ def on_batch_begin(self, batch, logs={}): callback.on_batch_begin(batch, logs) self._delta_ts_batch_begin.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_begin) - if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \ - and delta_t_median > 0.1: + if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1: warnings.warn('Method on_batch_begin() is slow compared ' - 'to the batch update (%f). Check your callbacks.' % delta_t_median) + 'to the batch update (%f). Check your callbacks.' % delta_t_median) self._t_enter_batch = time.time() def on_batch_end(self, batch, logs={}): @@ -56,10 +56,9 @@ def on_batch_end(self, batch, logs={}): callback.on_batch_end(batch, logs) self._delta_ts_batch_end.append(time.time() - t_before_callbacks) delta_t_median = np.median(self._delta_ts_batch_end) - if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch \ - and delta_t_median > 0.1: + if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1: warnings.warn('Method on_batch_end() is slow compared ' - 'to the batch update (%f). Check your callbacks.' % delta_t_median) + 'to the batch update (%f). Check your callbacks.' % delta_t_median) def on_train_begin(self, logs={}): for callback in self.callbacks: @@ -99,138 +98,130 @@ def on_train_begin(self, logs={}): def on_train_end(self, logs={}): pass -class BaseLogger(Callback): +class BaseLogger(Callback): def on_train_begin(self, logs={}): self.verbose = self.params['verbose'] def on_epoch_begin(self, epoch, logs={}): if self.verbose: print('Epoch %d' % epoch) - self.progbar = Progbar(target=self.params['nb_sample'], \ - verbose=self.verbose) - self.current = 0 - self.tot_loss = 0. - self.tot_acc = 0. + self.progbar = Progbar(target=self.params['nb_sample'], + verbose=self.verbose) + self.seen = 0 + self.totals = {} def on_batch_begin(self, batch, logs={}): - if self.current < self.params['nb_sample']: + if self.seen < self.params['nb_sample']: self.log_values = [] def on_batch_end(self, batch, logs={}): batch_size = logs.get('size', 0) - self.current += batch_size - - loss = logs.get('loss') - self.log_values.append(('loss', loss)) - self.tot_loss += loss * batch_size - if self.params['show_accuracy']: - accuracy = logs.get('accuracy') - self.log_values.append(('acc.', accuracy)) - self.tot_acc += accuracy * batch_size + self.seen += batch_size + + for k, v in logs.items(): + if k in self.totals: + self.totals[k] += v * batch_size + else: + self.totals[k] = v * batch_size + for k in self.params['metrics']: + if k in logs: + self.log_values.append((k, logs[k])) + # skip progbar update for the last batch; will be handled by on_epoch_end - if self.verbose and self.current < self.params['nb_sample']: - self.progbar.update(self.current, self.log_values) + if self.verbose and self.seen < self.params['nb_sample']: + self.progbar.update(self.seen, self.log_values) def on_epoch_end(self, epoch, logs={}): - self.log_values.append(('loss', self.tot_loss / self.current)) - if self.params['show_accuracy']: - self.log_values.append(('acc.', self.tot_acc / self.current)) - if self.params['do_validation']: - val_loss = logs.get('val_loss') - self.log_values.append(('val. loss', val_loss)) - if self.params['show_accuracy']: - val_acc = logs.get('val_accuracy') - self.log_values.append(('val. acc.', val_acc)) - self.progbar.update(self.current, self.log_values) + for k in self.params['metrics']: + if k in self.totals: + self.log_values.append((k, self.totals[k] / self.seen)) + if k in logs: + self.log_values.append((k, logs[k])) + if self.verbose: + self.progbar.update(self.seen, self.log_values) class History(Callback): def on_train_begin(self, logs={}): self.epoch = [] - self.loss = [] - if self.params['show_accuracy']: - self.accuracy = [] - if self.params['do_validation']: - self.validation_loss = [] - if self.params['show_accuracy']: - self.validation_accuracy = [] + self.history = {} def on_epoch_begin(self, epoch, logs={}): self.seen = 0 - self.tot_loss = 0. - self.tot_accuracy = 0. + self.totals = {} def on_batch_end(self, batch, logs={}): batch_size = logs.get('size', 0) self.seen += batch_size - self.tot_loss += logs.get('loss', 0.) * batch_size - if self.params['show_accuracy']: - self.tot_accuracy += logs.get('accuracy', 0.) * batch_size + for k, v in logs.items(): + if k in self.totals: + self.totals[k] += v * batch_size + else: + self.totals[k] = v * batch_size def on_epoch_end(self, epoch, logs={}): - val_loss = logs.get('val_loss') - val_acc = logs.get('val_accuracy') self.epoch.append(epoch) - self.loss.append(self.tot_loss / self.seen) - if self.params['show_accuracy']: - self.accuracy.append(self.tot_accuracy / self.seen) - if self.params['do_validation']: - self.validation_loss.append(val_loss) - if self.params['show_accuracy']: - self.validation_accuracy.append(val_acc) + for k, v in self.totals.items(): + if k not in self.history: + self.history[k] = [] + self.history[k].append(v / self.seen) + + for k, v in logs.items(): + if k not in self.history: + self.history[k] = [] + self.history[k].append(v) class ModelCheckpoint(Callback): - def __init__(self, filepath, verbose=0, save_best_only=False): + def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False): + super(Callback, self).__init__() - + self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only - self.loss = [] - self.best_loss = np.Inf - self.val_loss = [] - self.best_val_loss = np.Inf + self.best = np.Inf def on_epoch_end(self, epoch, logs={}): - if self.save_best_only and self.params['do_validation']: - cur_val_loss = logs.get('val_loss') - self.val_loss.append(cur_val_loss) - if cur_val_loss < self.best_val_loss: - if self.verbose > 0: - print("Epoch %05d: validation loss improved from %0.5f to %0.5f, saving model to %s" - % (epoch, self.best_val_loss, cur_val_loss, self.filepath)) - self.best_val_loss = cur_val_loss - self.model.save_weights(self.filepath, overwrite=True) + if self.save_best_only: + current = logs.get(self.monitor) + if current is None: + warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning) else: - if self.verbose > 0: - print("Epoch %05d: validation loss did not improve" % (epoch)) - elif self.save_best_only and not self.params['do_validation']: - warnings.warn("Can save best model only with validation data, skipping", RuntimeWarning) - elif not self.save_best_only: + if current < self.best: + if self.verbose > 0: + print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s" + % (epoch, self.monitor, self.best, current, self.filepath)) + self.best = current + self.model.save_weights(self.filepath, overwrite=True) + else: + if self.verbose > 0: + print("Epoch %05d: %s did not improve" % (epoch, self.monitor)) + else: if self.verbose > 0: print("Epoch %05d: saving model to %s" % (epoch, self.filepath)) self.model.save_weights(self.filepath, overwrite=True) class EarlyStopping(Callback): - def __init__(self, patience=0, verbose=0): + def __init__(self, monitor='val_loss', patience=0, verbose=0): super(Callback, self).__init__() + self.monitor = monitor self.patience = patience self.verbose = verbose - self.best_val_loss = np.Inf + self.best = np.Inf self.wait = 0 def on_epoch_end(self, epoch, logs={}): - if not self.params['do_validation']: - warnings.warn("Early stopping requires validation data!", RuntimeWarning) + current = logs.get(self.monitor) + if current is None: + warnings.warn("Early stopping requires %s available!" % (self.monitor), RuntimeWarning) - cur_val_loss = logs.get('val_loss') - if cur_val_loss < self.best_val_loss: - self.best_val_loss = cur_val_loss + if current < self.best: + self.best = current self.wait = 0 else: if self.wait >= self.patience: @@ -243,24 +234,31 @@ def on_epoch_end(self, epoch, logs={}): class RemoteMonitor(Callback): def __init__(self, root='http://localhost:9000'): self.root = root - self.seen = 0 - self.tot_loss = 0. - self.tot_accuracy = 0. def on_epoch_begin(self, epoch, logs={}): self.seen = 0 - self.tot_loss = 0. - self.tot_accuracy = 0. + self.totals = {} def on_batch_end(self, batch, logs={}): batch_size = logs.get('size', 0) self.seen += batch_size - self.tot_loss += logs.get('loss', 0.) * batch_size - if self.params['show_accuracy']: - self.tot_accuracy += logs.get('accuracy', 0.) * batch_size + for k, v in logs.items(): + if k in self.totals: + self.totals[k] += v * batch_size + else: + self.totals[k] = v * batch_size def on_epoch_end(self, epoch, logs={}): import requests - logs['epoch'] = epoch - logs['loss'] = self.tot_loss / self.seen - r = requests.post(self.root + '/publish/epoch/end/', {'data':json.dumps(logs)}) + send = {} + send['epoch'] = epoch + + for k, v in self.totals.items(): + send[k] = v / self.seen + for k, v in logs.items(): + send[k] = v + + try: + r = requests.post(self.root + '/publish/epoch/end/', {'data': json.dumps(send)}) + except: + print('Warning: could not reach RemoteMonitor root server at ' + str(self.root)) diff --git a/keras/constraints.py b/keras/constraints.py index cad750268b08..a7d886105f14 100644 --- a/keras/constraints.py +++ b/keras/constraints.py @@ -3,20 +3,45 @@ import theano.tensor as T import numpy as np -def maxnorm(m=2): - def maxnorm_wrap(p): + +class Constraint(object): + def __call__(self, p): + return p + + def get_config(self): + return {"name": self.__class__.__name__} + + +class MaxNorm(Constraint): + def __init__(self, m=2): + self.m = m + + def __call__(self, p): norms = T.sqrt(T.sum(T.sqr(p), axis=0)) - desired = T.clip(norms, 0, m) + desired = T.clip(norms, 0, self.m) p = p * (desired / (1e-7 + norms)) return p - return maxnorm_wrap -def nonneg(p): - p *= T.ge(p, 0) - return p + def get_config(self): + return {"name": self.__class__.__name__, + "m": self.m} + + +class NonNeg(Constraint): + def __call__(self, p): + p *= T.ge(p, 0) + return p + + +class UnitNorm(Constraint): + def __call__(self, p): + return p / T.sqrt(T.sum(p**2, axis=-1, keepdims=True)) -def identity(g): - return g +identity = Constraint +maxnorm = MaxNorm +nonneg = NonNeg +unitnorm = UnitNorm -def unitnorm(e): - return e / T.sqrt(T.sum(e**2, axis=-1, keepdims=True)) +from .utils.generic_utils import get_from_module +def get(identifier, kwargs=None): + return get_from_module(identifier, globals(), 'constraint', instantiate=True, kwargs=kwargs) diff --git a/keras/datasets/cifar10.py b/keras/datasets/cifar10.py index 2e0d971d026e..92ead6b16709 100644 --- a/keras/datasets/cifar10.py +++ b/keras/datasets/cifar10.py @@ -4,6 +4,7 @@ import numpy as np import os + def load_data(): dirname = "cifar-10-batches-py" origin = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" @@ -20,11 +21,11 @@ def load_data(): data, labels = load_batch(fpath) X_train[(i-1)*10000:i*10000, :, :, :] = data y_train[(i-1)*10000:i*10000] = labels - + fpath = os.path.join(path, 'test_batch') X_test, y_test = load_batch(fpath) y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) - return (X_train, y_train), (X_test, y_test) \ No newline at end of file + return (X_train, y_train), (X_test, y_test) diff --git a/keras/datasets/cifar100.py b/keras/datasets/cifar100.py index ad293f1fae04..d7eead0a862b 100644 --- a/keras/datasets/cifar100.py +++ b/keras/datasets/cifar100.py @@ -4,6 +4,7 @@ import numpy as np import os + def load_data(label_mode='fine'): if label_mode not in ['fine', 'coarse']: raise Exception('label_mode must be one of "fine" "coarse".') @@ -24,4 +25,4 @@ def load_data(label_mode='fine'): y_train = np.reshape(y_train, (len(y_train), 1)) y_test = np.reshape(y_test, (len(y_test), 1)) - return (X_train, y_train), (X_test, y_test) \ No newline at end of file + return (X_train, y_train), (X_test, y_test) diff --git a/keras/datasets/data_utils.py b/keras/datasets/data_utils.py index 06675f733868..23a275c86c3d 100644 --- a/keras/datasets/data_utils.py +++ b/keras/datasets/data_utils.py @@ -1,9 +1,15 @@ from __future__ import absolute_import from __future__ import print_function + import tarfile, inspect, os -from six.moves.urllib.request import urlretrieve +from six.moves.urllib.request import FancyURLopener + from ..utils.generic_utils import Progbar +class ParanoidURLopener(FancyURLopener): + def http_error_default(self, url, fp, errcode, errmsg, headers): + raise Exception('URL fetch failure on {}: {} -- {}'.format(url, errcode, errmsg)) + def get_file(fname, origin, untar=False): datadir = os.path.expanduser(os.path.join('~', '.keras', 'datasets')) if not os.path.exists(datadir): @@ -22,6 +28,7 @@ def get_file(fname, origin, untar=False): global progbar progbar = None + def dl_progress(count, block_size, total_size): global progbar if progbar is None: @@ -29,7 +36,7 @@ def dl_progress(count, block_size, total_size): else: progbar.update(count*block_size) - urlretrieve(origin, fpath, dl_progress) + ParanoidURLopener().retrieve(origin, fpath, dl_progress) progbar = None if untar: @@ -41,4 +48,3 @@ def dl_progress(count, block_size, total_size): return untar_fpath return fpath - diff --git a/keras/datasets/imdb.py b/keras/datasets/imdb.py index 7bfc839038ff..d589cab8959c 100644 --- a/keras/datasets/imdb.py +++ b/keras/datasets/imdb.py @@ -4,8 +4,12 @@ from .data_utils import get_file import random from six.moves import zip +import numpy as np + + +def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, + start_char=1, oov_char=2, index_from=3): -def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113): path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/imdb.pkl") if path.endswith(".gz"): @@ -16,10 +20,15 @@ def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_spli X, labels = six.moves.cPickle.load(f) f.close() - random.seed(seed) - random.shuffle(X) - random.seed(seed) - random.shuffle(labels) + np.random.seed(seed) + np.random.shuffle(X) + np.random.seed(seed) + np.random.shuffle(labels) + + if start_char is not None: + X = [[start_char] + [w + index_from for w in x] for x in X] + elif index_from: + X = [[w + index_from for w in x] for x in X] if maxlen: new_X = [] @@ -34,7 +43,20 @@ def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_spli if not nb_words: nb_words = max([max(x) for x in X]) - X = [[0 if (w >= nb_words or w < skip_top) else w for w in x] for x in X] + # by convention, use 2 as OOV word + # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV) + if oov_char is not None: + X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X] + else: + nX = [] + for x in X: + nx = [] + for w in x: + if (w >= nb_words or w < skip_top): + nx.append(w) + nX.append(nx) + X = nX + X_train = X[:int(len(X)*(1-test_split))] y_train = labels[:int(len(X)*(1-test_split))] @@ -42,4 +64,3 @@ def load_data(path="imdb.pkl", nb_words=None, skip_top=0, maxlen=None, test_spli y_test = labels[int(len(X)*(1-test_split)):] return (X_train, y_train), (X_test, y_test) - diff --git a/keras/datasets/mnist.py b/keras/datasets/mnist.py index 4e96a3fd5041..3b77ba359cf8 100644 --- a/keras/datasets/mnist.py +++ b/keras/datasets/mnist.py @@ -4,6 +4,7 @@ import six.moves.cPickle import sys + def load_data(path="mnist.pkl.gz"): path = get_file(path, origin="https://s3.amazonaws.com/img-datasets/mnist.pkl.gz") @@ -19,4 +20,4 @@ def load_data(path="mnist.pkl.gz"): f.close() - return data # (X_train, y_train), (X_test, y_test) + return data # (X_train, y_train), (X_test, y_test) diff --git a/keras/datasets/reuters.py b/keras/datasets/reuters.py index 17d26e844237..40bb07bc6cc4 100644 --- a/keras/datasets/reuters.py +++ b/keras/datasets/reuters.py @@ -7,6 +7,8 @@ import os import six.moves.cPickle from six.moves import zip +import numpy as np + def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), min_samples_per_topic=15): import re @@ -18,13 +20,12 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), for fname in os.listdir(path): if 'sgm' in fname: - s = open(path + fname).read() + s = open(os.path.join(path, fname)).read() tag = '' while tag in s: s = s[s.find(tag)+len(tag):] topics = s[:s.find('' in topics: + if topics and '' not in topics: topic = topics.replace('', '').replace('', '') wire_topics.append(topic) topic_counts[topic] = topic_counts.get(topic, 0) + 1 @@ -38,7 +39,7 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), # only keep most common topics items = list(topic_counts.items()) - items.sort(key = lambda x: x[1]) + items.sort(key=lambda x: x[1]) kept_topics = set() for x in items: print(x[0] + ': ' + str(x[1])) @@ -70,25 +71,35 @@ def make_reuters_dataset(path=os.path.join('datasets', 'temp', 'reuters21578'), print('Sanity check:') for w in ["banana", "oil", "chocolate", "the", "dsft"]: print('...index of', w, ':', tokenizer.word_index.get(w)) + print('text reconstruction:') + reverse_word_index = dict([(v, k) for k, v in tokenizer.word_index.items()]) + print(' '.join(reverse_word_index[i] for i in X[10])) - dataset = (X, labels) + dataset = (X, labels) print('-') print('Saving...') six.moves.cPickle.dump(dataset, open(os.path.join('datasets', 'data', 'reuters.pkl'), 'w')) - six.moves.cPickle.dump(tokenizer.word_index, open(os.path.join('datasets','data', 'reuters_word_index.pkl'), 'w')) + six.moves.cPickle.dump(tokenizer.word_index, open(os.path.join('datasets', 'data', 'reuters_word_index.pkl'), 'w')) +def load_data(path="reuters.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, + start_char=1, oov_char=2, index_from=3): -def load_data(path="reuters.pkl", nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113): path = get_file(path, origin="https://s3.amazonaws.com/text-datasets/reuters.pkl") f = open(path, 'rb') X, labels = six.moves.cPickle.load(f) f.close() - random.seed(seed) - random.shuffle(X) - random.seed(seed) - random.shuffle(labels) + + np.random.seed(seed) + np.random.shuffle(X) + np.random.seed(seed) + np.random.shuffle(labels) + + if start_char is not None: + X = [[start_char] + [w + index_from for w in x] for x in X] + elif index_from: + X = [[w + index_from for w in x] for x in X] if maxlen: new_X = [] @@ -103,7 +114,20 @@ def load_data(path="reuters.pkl", nb_words=None, skip_top=0, maxlen=None, test_s if not nb_words: nb_words = max([max(x) for x in X]) - X = [[0 if (w >= nb_words or w < skip_top) else w for w in x] for x in X] + # by convention, use 2 as OOV word + # reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV) + if oov_char is not None: + X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X] + else: + nX = [] + for x in X: + nx = [] + for w in x: + if (w >= nb_words or w < skip_top): + nx.append(w) + nX.append(nx) + X = nX + X_train = X[:int(len(X)*(1-test_split))] y_train = labels[:int(len(X)*(1-test_split))] diff --git a/keras/initializations.py b/keras/initializations.py index d6ce5134fe20..1edb484ec9ff 100644 --- a/keras/initializations.py +++ b/keras/initializations.py @@ -3,7 +3,8 @@ import theano.tensor as T import numpy as np -from .utils.theano_utils import sharedX, shared_zeros +from .utils.theano_utils import sharedX, shared_zeros, shared_ones + def get_fans(shape): fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:]) @@ -14,17 +15,20 @@ def get_fans(shape): def uniform(shape, scale=0.05): return sharedX(np.random.uniform(low=-scale, high=scale, size=shape)) + def normal(shape, scale=0.05): return sharedX(np.random.randn(*shape) * scale) + def lecun_uniform(shape): ''' Reference: LeCun 98, Efficient Backprop http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf ''' fan_in, fan_out = get_fans(shape) - scale = 1./np.sqrt(fan_in) + scale = np.sqrt(3. / fan_in) return uniform(shape, scale) + def glorot_normal(shape): ''' Reference: Glorot & Bengio, AISTATS 2010 ''' @@ -32,11 +36,13 @@ def glorot_normal(shape): s = np.sqrt(2. / (fan_in + fan_out)) return normal(shape, s) + def glorot_uniform(shape): fan_in, fan_out = get_fans(shape) - s = np.sqrt(2. / (fan_in + fan_out)) + s = np.sqrt(6. / (fan_in + fan_out)) return uniform(shape, s) - + + def he_normal(shape): ''' Reference: He et al., http://arxiv.org/abs/1502.01852 ''' @@ -44,31 +50,40 @@ def he_normal(shape): s = np.sqrt(2. / fan_in) return normal(shape, s) + def he_uniform(shape): fan_in, fan_out = get_fans(shape) - s = np.sqrt(2. / fan_in) + s = np.sqrt(6. / fan_in) return uniform(shape, s) + def orthogonal(shape, scale=1.1): ''' From Lasagne ''' flat_shape = (shape[0], np.prod(shape[1:])) a = np.random.normal(0.0, 1.0, flat_shape) u, _, v = np.linalg.svd(a, full_matrices=False) - q = u if u.shape == flat_shape else v # pick the one with the correct shape + # pick the one with the correct shape + q = u if u.shape == flat_shape else v q = q.reshape(shape) return sharedX(scale * q[:shape[0], :shape[1]]) + def identity(shape, scale=1): if len(shape) != 2 or shape[0] != shape[1]: raise Exception("Identity matrix initialization can only be used for 2D square matrices") else: return sharedX(scale * np.identity(shape[0])) + def zero(shape): return shared_zeros(shape) +def one(shape): + return shared_ones(shape) + + from .utils.generic_utils import get_from_module def get(identifier): return get_from_module(identifier, globals(), 'initialization') diff --git a/keras/layers/advanced_activations.py b/keras/layers/advanced_activations.py index 3f5e1ba78707..0ec01a897e1f 100644 --- a/keras/layers/advanced_activations.py +++ b/keras/layers/advanced_activations.py @@ -1,9 +1,12 @@ -from ..layers.core import Layer -from ..utils.theano_utils import shared_zeros +from ..layers.core import Layer, MaskedLayer +from ..utils.theano_utils import shared_zeros, shared_ones, sharedX +import theano.tensor as T +import numpy as np -class LeakyReLU(Layer): + +class LeakyReLU(MaskedLayer): def __init__(self, alpha=0.3): - super(LeakyReLU,self).__init__() + super(LeakyReLU, self).__init__() self.alpha = alpha def get_output(self, train): @@ -11,18 +14,18 @@ def get_output(self, train): return ((X + abs(X)) / 2.0) + self.alpha * ((X - abs(X)) / 2.0) def get_config(self): - return {"name":self.__class__.__name__, - "alpha":self.alpha} + return {"name": self.__class__.__name__, + "alpha": self.alpha} -class PReLU(Layer): +class PReLU(MaskedLayer): ''' - Reference: + Reference: Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification http://arxiv.org/pdf/1502.01852v1.pdf ''' def __init__(self, input_shape): - super(PReLU,self).__init__() + super(PReLU, self).__init__() self.alphas = shared_zeros(input_shape) self.params = [self.alphas] self.input_shape = input_shape @@ -34,5 +37,34 @@ def get_output(self, train): return pos + neg def get_config(self): - return {"name":self.__class__.__name__, - "input_shape":self.input_shape} + return {"name": self.__class__.__name__, + "input_shape": self.input_shape} + + +class ParametricSoftplus(MaskedLayer): + ''' + Parametric Softplus of the form: alpha * (1 + exp(beta * X)) + + Reference: + Inferring Nonlinear Neuronal Computation Based on Physiologically Plausible Inputs + http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003143 + ''' + def __init__(self, input_shape, alpha_init=0.2, beta_init=5.0): + + super(ParametricSoftplus, self).__init__() + self.alpha_init = alpha_init + self.beta_init = beta_init + self.alphas = sharedX(alpha_init * np.ones(input_shape)) + self.betas = sharedX(beta_init * np.ones(input_shape)) + self.params = [self.alphas, self.betas] + self.input_shape = input_shape + + def get_output(self, train): + X = self.get_input(train) + return T.nnet.softplus(self.betas * X) * self.alphas + + def get_config(self): + return {"name": self.__class__.__name__, + "input_shape": self.input_shape, + "alpha_init": self.alpha_init, + "beta_init": self.beta_init} diff --git a/keras/layers/containers.py b/keras/layers/containers.py index c3d786368789..8246fd514ae2 100644 --- a/keras/layers/containers.py +++ b/keras/layers/containers.py @@ -1,51 +1,63 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import +from __future__ import print_function import theano.tensor as T -from ..layers.core import Layer +from ..layers.core import Layer, Merge +from ..utils.theano_utils import ndim_tensor from six.moves import range -def ndim_tensor(ndim): - if ndim == 2: - return T.matrix() - elif ndim == 3: - return T.tensor3() - elif ndim == 4: - return T.tensor4() - return T.matrix() class Sequential(Layer): + ''' + Simple linear stack of layers. + + inherited from Layer: + - get_params + - get_output_mask + - supports_masked_input + ''' + def __init__(self, layers=[]): self.layers = [] self.params = [] self.regularizers = [] self.constraints = [] + self.updates = [] + for layer in layers: self.add(layer) - def connect(self, layer): + def set_previous(self, layer): self.layers[0].previous = layer def add(self, layer): self.layers.append(layer) if len(self.layers) > 1: - self.layers[-1].connect(self.layers[-2]) - - params, regularizers, constraints = layer.get_params() + self.layers[-1].set_previous(self.layers[-2]) + if not hasattr(self.layers[0], 'input'): + self.set_input() + layer.init_updates() + + params, regularizers, constraints, updates = layer.get_params() self.params += params self.regularizers += regularizers self.constraints += constraints + self.updates += updates def get_output(self, train=False): return self.layers[-1].get_output(train) + def set_input(self): + for l in self.layers: + if hasattr(l, 'input'): + ndim = l.input.ndim + self.layers[0].input = ndim_tensor(ndim) + break + def get_input(self, train=False): if not hasattr(self.layers[0], 'input'): - for l in self.layers: - if hasattr(l, 'input'): - break - ndim = l.input.ndim - self.layers[0].input = ndim_tensor(ndim) + self.set_input() return self.layers[0].get_input(train) @property @@ -65,5 +77,151 @@ def set_weights(self, weights): weights = weights[nb_param:] def get_config(self): - return {"name":self.__class__.__name__, - "layers":[layer.get_config() for layer in self.layers]} \ No newline at end of file + return {"name": self.__class__.__name__, + "layers": [layer.get_config() for layer in self.layers]} + + +class Graph(Layer): + ''' + Implement a NN graph with arbitrary layer connections, + arbitrary number of inputs and arbitrary number of outputs. + + Note: Graph can only be used as a layer + (connect, input, get_input, get_output) + when it has exactly one input and one output. + + inherited from Layer: + - get_params + - get_output_mask + - supports_masked_input + - get_weights + - set_weights + ''' + def __init__(self): + self.namespace = set() # strings + self.nodes = {} # layer-like + self.inputs = {} # layer-like + self.input_order = [] # strings + self.outputs = {} # layer-like + self.output_order = [] # strings + self.input_config = [] # dicts + self.output_config = [] # dicts + self.node_config = [] # dicts + + self.params = [] + self.regularizers = [] + self.constraints = [] + self.updates = [] + + def set_previous(self, layer): + if len(self.inputs) != 1 or len(self.outputs) != 1: + raise Exception('The Graph container can only be used as a layer \ + when it has exactly one input and one output.') + self.inputs[self.input_order[0]].set_previous(layer) + + def get_input(self, train=False): + if len(self.inputs) != 1 or len(self.outputs) != 1: + raise Exception('The Graph container can only be used as a layer \ + when it has exactly one input and one output.') + return self.inputs[self.input_order[0]].get_input(train) + + @property + def input(self): + return self.get_input() + + def get_output(self, train=False): + if len(self.inputs) != 1 or len(self.outputs) != 1: + raise Exception('The Graph container can only be used as a layer \ + when it has exactly one input and one output.') + return self.outputs[self.output_order[0]].get_output(train) + + def add_input(self, name, ndim=2, dtype='float'): + if name in self.namespace: + raise Exception('Duplicate node identifier: ' + name) + self.namespace.add(name) + self.input_order.append(name) + layer = Layer() # empty layer + if dtype == 'float': + layer.input = ndim_tensor(ndim) + else: + if ndim == 2: + layer.input = T.imatrix() + else: + raise Exception('Type "int" can only be used with ndim==2 (Embedding).') + layer.input.name = name + self.inputs[name] = layer + self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype}) + + def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', create_output=False): + if hasattr(layer, 'set_name'): + layer.set_name(name) + if name in self.namespace: + raise Exception('Duplicate node identifier: ' + name) + if input: + if input not in self.namespace: + raise Exception('Unknown node/input identifier: ' + input) + if input in self.nodes: + layer.set_previous(self.nodes[input]) + elif input in self.inputs: + layer.set_previous(self.inputs[input]) + if inputs: + to_merge = [] + for n in inputs: + if n in self.nodes: + to_merge.append(self.nodes[n]) + elif n in self.inputs: + to_merge.append(self.inputs[n]) + else: + raise Exception('Unknown identifier: ' + n) + merge = Merge(to_merge, mode=merge_mode) + layer.set_previous(merge) + + self.namespace.add(name) + self.nodes[name] = layer + self.node_config.append({'name': name, + 'input': input, + 'inputs': inputs, + 'merge_mode': merge_mode}) + layer.init_updates() + params, regularizers, constraints, updates = layer.get_params() + self.params += params + self.regularizers += regularizers + self.constraints += constraints + self.updates += updates + + if create_output: + self.add_output(name, input=name) + + def add_output(self, name, input=None, inputs=[], merge_mode='concat'): + if name in self.output_order: + raise Exception('Duplicate output identifier: ' + name) + if input: + if input not in self.namespace: + raise Exception('Unknown node/input identifier: ' + input) + if input in self.nodes: + self.outputs[name] = self.nodes[input] + elif input in self.inputs: + self.ouputs[name] = self.inputs[input] + if inputs: + to_merge = [] + for n in inputs: + if n not in self.nodes: + raise Exception('Unknown identifier: ' + n) + to_merge.append(self.nodes[n]) + merge = Merge(to_merge, mode=merge_mode) + self.outputs[name] = merge + + self.output_order.append(name) + self.output_config.append({'name': name, + 'input': input, + 'inputs': inputs, + 'merge_mode': merge_mode}) + + def get_config(self): + return {"name": self.__class__.__name__, + "input_config": self.input_config, + "node_config": self.node_config, + "output_config": self.output_config, + "input_order": self.input_order, + "output_order": self.output_order, + "nodes": dict([(c["name"], self.nodes[c["name"]].get_config()) for c in self.node_config])} diff --git a/keras/layers/convolutional.py b/keras/layers/convolutional.py index 6d4db99ec748..440eedfed954 100644 --- a/keras/layers/convolutional.py +++ b/keras/layers/convolutional.py @@ -3,100 +3,143 @@ import theano import theano.tensor as T -from theano.tensor.signal import downsample -from .. import activations, initializations +from .. import activations, initializations, regularizers, constraints from ..utils.theano_utils import shared_zeros from ..layers.core import Layer class Convolution1D(Layer): - def __init__(self, nb_filter, stack_size, filter_length, - init='uniform', activation='linear', weights=None, - image_shape=None, border_mode='valid', subsample_length=1, - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None): - - nb_row = 1 - nb_col = filter_length - + def __init__(self, input_dim, nb_filter, filter_length, + init='uniform', activation='linear', weights=None, + border_mode='valid', subsample_length=1, + W_regularizer=None, b_regularizer=None, activity_regularizer=None, + W_constraint=None, b_constraint=None): + + if border_mode not in {'valid', 'full', 'same'}: + raise Exception('Invalid border mode for Convolution1D:', border_mode) + + super(Convolution1D, self).__init__() self.nb_filter = nb_filter - self.stack_size = stack_size + self.input_dim = input_dim self.filter_length = filter_length self.subsample_length = subsample_length self.init = initializations.get(init) self.activation = activations.get(activation) self.subsample = (1, subsample_length) self.border_mode = border_mode - self.image_shape = image_shape - self.input = T.tensor4() - self.W_shape = (nb_filter, stack_size, nb_row, nb_col) + self.input = T.tensor3() + self.W_shape = (nb_filter, input_dim, filter_length, 1) self.W = self.init(self.W_shape) self.b = shared_zeros((nb_filter,)) self.params = [self.W, self.b] - self.regularizers = [W_regularizer, b_regularizer] - self.constraints = [W_constraint, b_constraint] + self.regularizers = [] + + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.b_regularizer = regularizers.get(b_regularizer) + if self.b_regularizer: + self.b_regularizer.set_param(self.b) + self.regularizers.append(self.b_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) + + self.W_constraint = constraints.get(W_constraint) + self.b_constraint = constraints.get(b_constraint) + self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) def get_output(self, train): X = self.get_input(train) + X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3) + + border_mode = self.border_mode + if border_mode == 'same': + border_mode = 'full' + + conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample) + if self.border_mode == 'same': + shift_x = (self.filter_length - 1) // 2 + conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :] - conv_out = theano.tensor.nnet.conv.conv2d(X, self.W, - border_mode=self.border_mode, subsample=self.subsample, image_shape=self.image_shape) output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) + output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1) return output def get_config(self): - return {"name":self.__class__.__name__, - "nb_filter":self.nb_filter, - "stack_size":self.stack_size, - "filter_length":self.filter_length, - "init":self.init.__name__, - "activation":self.activation.__name__, - "image_shape":self.image_shape, - "border_mode":self.border_mode, - "subsample_length":self.subsample_length} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "nb_filter": self.nb_filter, + "filter_length": self.filter_length, + "init": self.init.__name__, + "activation": self.activation.__name__, + "border_mode": self.border_mode, + "subsample_length": self.subsample_length, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, + "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} class MaxPooling1D(Layer): - def __init__(self, pool_length=2, ignore_border=True): + def __init__(self, pool_length=2, stride=None, ignore_border=True): + super(MaxPooling1D, self).__init__() self.pool_length = pool_length - self.poolsize = (1, pool_length) + self.stride = stride + if self.stride: + self.st = (self.stride, 1) + else: + self.st = None + + self.input = T.tensor3() + self.poolsize = (pool_length, 1) self.ignore_border = ignore_border - - self.input = T.tensor4() - self.params = [] def get_output(self, train): X = self.get_input(train) - output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border) - return output + X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3) + output = T.signal.downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border) + output = output.dimshuffle(0, 2, 1, 3) + return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])) def get_config(self): - return {"name":self.__class__.__name__, - "pool_length":self.pool_length, - "ignore_border":self.ignore_border} - + return {"name": self.__class__.__name__, + "stride": self.stride, + "pool_length": self.pool_length, + "ignore_border": self.ignore_border, + "subsample_length": self.subsample_length} class Convolution2D(Layer): - def __init__(self, nb_filter, stack_size, nb_row, nb_col, - init='glorot_uniform', activation='linear', weights=None, - image_shape=None, border_mode='valid', subsample=(1,1), - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None): - super(Convolution2D,self).__init__() + def __init__(self, nb_filter, stack_size, nb_row, nb_col, + init='glorot_uniform', activation='linear', weights=None, + border_mode='valid', subsample=(1, 1), + W_regularizer=None, b_regularizer=None, activity_regularizer=None, + W_constraint=None, b_constraint=None): + if border_mode not in {'valid', 'full', 'same'}: + raise Exception('Invalid border mode for Convolution2D:', border_mode) + + super(Convolution2D, self).__init__() self.init = initializations.get(init) self.activation = activations.get(activation) self.subsample = subsample self.border_mode = border_mode - self.image_shape = image_shape self.nb_filter = nb_filter self.stack_size = stack_size + self.nb_row = nb_row self.nb_col = nb_col @@ -107,55 +150,98 @@ def __init__(self, nb_filter, stack_size, nb_row, nb_col, self.params = [self.W, self.b] - self.regularizers = [W_regularizer, b_regularizer] - self.constraints = [W_constraint, b_constraint] + self.regularizers = [] + + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.b_regularizer = regularizers.get(b_regularizer) + if self.b_regularizer: + self.b_regularizer.set_param(self.b) + self.regularizers.append(self.b_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) + + self.W_constraint = constraints.get(W_constraint) + self.b_constraint = constraints.get(b_constraint) + self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) def get_output(self, train): X = self.get_input(train) + border_mode = self.border_mode + if border_mode == 'same': + border_mode = 'full' - conv_out = theano.tensor.nnet.conv.conv2d(X, self.W, - border_mode=self.border_mode, subsample=self.subsample, image_shape=self.image_shape) - output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) - return output + conv_out = T.nnet.conv.conv2d(X, self.W, + border_mode=border_mode, + subsample=self.subsample) + if self.border_mode == 'same': + shift_x = (self.nb_row - 1) // 2 + shift_y = (self.nb_col - 1) // 2 + conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y] + + return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) def get_config(self): - return {"name":self.__class__.__name__, - "nb_filter":self.nb_filter, - "stack_size":self.stack_size, - "nb_row":self.nb_row, - "nb_col":self.nb_col, - "init":self.init.__name__, - "activation":self.activation.__name__, - "image_shape":self.image_shape, - "border_mode":self.border_mode, - "subsample":self.subsample} + return {"name": self.__class__.__name__, + "nb_filter": self.nb_filter, + "stack_size": self.stack_size, + "nb_row": self.nb_row, + "nb_col": self.nb_col, + "init": self.init.__name__, + "activation": self.activation.__name__, + "border_mode": self.border_mode, + "subsample": self.subsample, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, + "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} class MaxPooling2D(Layer): - def __init__(self, poolsize=(2, 2), ignore_border=True): - super(MaxPooling2D,self).__init__() + def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True): + super(MaxPooling2D, self).__init__() self.input = T.tensor4() self.poolsize = poolsize + self.stride = stride self.ignore_border = ignore_border def get_output(self, train): X = self.get_input(train) - output = downsample.max_pool_2d(X, self.poolsize, ignore_border=self.ignore_border) + output = T.signal.downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border) return output def get_config(self): - return {"name":self.__class__.__name__, - "poolsize":self.poolsize, - "ignore_border":self.ignore_border} + return {"name": self.__class__.__name__, + "poolsize": self.poolsize, + "ignore_border": self.ignore_border, + "stride": self.stride} +class ZeroPadding2D(Layer): + def __init__(self, width=1): + super(ZeroPadding2D, self).__init__() + self.width = width + self.input = T.tensor4() -# class ZeroPadding2D(Layer): TODO - -# class Convolution3D: TODO + def get_output(self, train): + X = self.get_input(train) + width = self.width + in_shape = X.shape + out_shape = (in_shape[0], in_shape[1], in_shape[2] + 2 * width, in_shape[3] + 2 * width) + out = T.zeros(out_shape) + indices = (slice(None), slice(None), slice(width, in_shape[2] + width), slice(width, in_shape[3] + width)) + return T.set_subtensor(out[indices], X) -# class MaxPooling3D: TODO - + def get_config(self): + return {"name": self.__class__.__name__, + "width": self.width} diff --git a/keras/layers/core.py b/keras/layers/core.py index f63f2a5730d3..888f3aa287fc 100644 --- a/keras/layers/core.py +++ b/keras/layers/core.py @@ -5,32 +5,58 @@ import theano.tensor as T import numpy as np -from .. import activations, initializations +from .. import activations, initializations, regularizers, constraints from ..utils.theano_utils import shared_zeros, floatX from ..utils.generic_utils import make_tuple -from .. import regularizers -from .. import constraints +from ..regularizers import ActivityRegularizer, Regularizer from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from six.moves import zip -srng = RandomStreams(seed=np.random.randint(10e6)) + class Layer(object): def __init__(self): self.params = [] - def connect(self, layer): + def init_updates(self): + self.updates = [] + + def set_previous(self, layer): + if not self.supports_masked_input() and layer.get_output_mask() is not None: + raise Exception("Attached non-masking layer to layer with masked output") self.previous = layer - def get_output(self, train): - raise NotImplementedError + def get_output(self, train=False): + return self.get_input(train) - def get_input(self, train): + def get_input(self, train=False): if hasattr(self, 'previous'): return self.previous.get_output(train=train) else: return self.input + def supports_masked_input(self): + ''' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try + to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is + an error''' + return False + + def get_output_mask(self, train=None): + ''' + For some models (such as RNNs) you want a way of being able to mark some output data-points as + "masked", so they are not used in future calculations. In such a model, get_output_mask() should return a mask + of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask + is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one. + + If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking) + to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no + such mask, then the Activation's get_output_mask shall return None. + + Some layers have an output_mask even if their input is unmasked, notably Embedding which can turn the entry "0" into + a mask. + ''' + return None + def set_weights(self, weights): for p, w in zip(self.params, weights): if p.eval().shape != w.shape: @@ -44,75 +70,129 @@ def get_weights(self): return weights def get_config(self): - return {"name":self.__class__.__name__} + return {"name": self.__class__.__name__} def get_params(self): - regs = [] consts = [] + updates = [] - if hasattr(self, 'regularizers') and len(self.regularizers) == len(self.params): - for r in self.regularizers: - if r: - regs.append(r) - else: - regs.append(regularizers.identity) - elif hasattr(self, 'regularizer') and self.regularizer: - regs += [self.regularizer for _ in range(len(self.params))] + if hasattr(self, 'regularizers'): + regularizers = self.regularizers else: - regs += [regularizers.identity for _ in range(len(self.params))] + regularizers = [] if hasattr(self, 'constraints') and len(self.constraints) == len(self.params): for c in self.constraints: if c: consts.append(c) else: - consts.append(constraints.identity) + consts.append(constraints.identity()) elif hasattr(self, 'constraint') and self.constraint: consts += [self.constraint for _ in range(len(self.params))] else: - consts += [constraints.identity for _ in range(len(self.params))] + consts += [constraints.identity() for _ in range(len(self.params))] + + if hasattr(self, 'updates') and self.updates: + updates += self.updates + + return self.params, regularizers, consts, updates + + def set_name(self, name): + for i in range(len(self.params)): + self.params[i].name = '%s_p%d' % (name, i) + + +class MaskedLayer(Layer): + ''' + If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer + instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output() + ''' + def supports_masked_input(self): + return True + + def get_input_mask(self, train=False): + if hasattr(self, 'previous'): + return self.previous.get_output_mask(train) + else: + return None + + def get_output_mask(self, train=False): + ''' The default output mask is just the input mask unchanged. Override this in your own + implementations if, for instance, you are reshaping the input''' + return self.get_input_mask(train) - return self.params, regs, consts +class Masking(MaskedLayer): + """Mask an input sequence by using a mask value to identify padding. -class Merge(object): - def __init__(self, models, mode='sum'): - ''' Merge the output of a list of models into a single tensor. + This layer copies the input to the output layer with identified padding + replaced with 0s and creates an output mask in the process. + + At each timestep, if the values all equal `mask_value`, + then the corresponding mask value for the timestep is 0 (skipped), + otherwise it is 1. + + """ + def __init__(self, mask_value=0.): + super(Masking, self).__init__() + self.mask_value = mask_value + self.input = T.tensor3() + + def get_output_mask(self, train=False): + X = self.get_input(train) + return T.any(T.ones_like(X) * (1. - T.eq(X, self.mask_value)), axis=-1) + + def get_output(self, train=False): + X = self.get_input(train) + return X * T.shape_padright(T.any((1. - T.eq(X, self.mask_value)), axis=-1)) + + def get_config(self): + return {"name": self.__class__.__name__, + "mask_value": self.mask_value} + + +class Merge(Layer): + def __init__(self, layers, mode='sum'): + ''' Merge the output of a list of layers or containers into a single tensor. mode: {'sum', 'concat'} ''' - if len(models) < 2: - raise Exception("Please specify two or more input models to merge") + if len(layers) < 2: + raise Exception("Please specify two or more input layers (or containers) to merge") self.mode = mode - self.models = models + self.layers = layers self.params = [] self.regularizers = [] self.constraints = [] - for m in self.models: - for i in range(len(m.params)): - if not m.params[i] in self.params: - self.params.append(m.params[i]) - self.regularizers.append(m.regularizers[i]) - self.constraints.append(m.constraints[i]) + self.updates = [] + for l in self.layers: + params, regs, consts, updates = l.get_params() + self.regularizers += regs + self.updates += updates + # params and constraints have the same size + for p, c in zip(params, consts): + if p not in self.params: + self.params.append(p) + self.constraints.append(c) def get_params(self): - return self.params, self.regularizers, self.constraints + return self.params, self.regularizers, self.constraints, self.updates def get_output(self, train=False): if self.mode == 'sum': - s = self.models[0].get_output(train) - for i in range(1, len(self.models)): - s += self.models[i].get_output(train) + s = self.layers[0].get_output(train) + for i in range(1, len(self.layers)): + s += self.layers[i].get_output(train) return s elif self.mode == 'concat': - inputs = [self.models[i].get_output(train) for i in range(len(self.models))] + inputs = [self.layers[i].get_output(train) for i in range(len(self.layers))] return T.concatenate(inputs, axis=-1) else: raise Exception('Unknown merge mode') def get_input(self, train=False): res = [] - for i in range(len(self.models)): - o = self.models[i].get_input(train) + for i in range(len(self.layers)): + o = self.layers[i].get_input(train) if not type(o) == list: o = [o] for output in o: @@ -124,66 +204,73 @@ def get_input(self, train=False): def input(self): return self.get_input() + def supports_masked_input(self): + return False + + def get_output_mask(self, train=None): + return None + def get_weights(self): weights = [] - for m in self.models: - weights += m.get_weights() + for l in self.layers: + weights += l.get_weights() return weights def set_weights(self, weights): - for i in range(len(self.models)): - nb_param = len(self.models[i].params) - self.models[i].set_weights(weights[:nb_param]) + for i in range(len(self.layers)): + nb_param = len(self.layers[i].params) + self.layers[i].set_weights(weights[:nb_param]) weights = weights[nb_param:] def get_config(self): - return {"name":self.__class__.__name__, - "models":[m.get_config() for m in self.models], - "mode":self.mode} + return {"name": self.__class__.__name__, + "layers": [l.get_config() for l in self.layers], + "mode": self.mode} -class Dropout(Layer): +class Dropout(MaskedLayer): ''' Hinton's dropout. ''' def __init__(self, p): - super(Dropout,self).__init__() + super(Dropout, self).__init__() self.p = p + self.srng = RandomStreams(seed=np.random.randint(10e6)) - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) if self.p > 0.: retain_prob = 1. - self.p if train: - X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX) + X *= self.srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX) else: X *= retain_prob return X def get_config(self): - return {"name":self.__class__.__name__, - "p":self.p} + return {"name": self.__class__.__name__, + "p": self.p} -class Activation(Layer): +class Activation(MaskedLayer): ''' Apply an activation function to an output. ''' def __init__(self, activation, target=0, beta=0.1): - super(Activation,self).__init__() + super(Activation, self).__init__() self.activation = activations.get(activation) self.target = target self.beta = beta - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) return self.activation(X) def get_config(self): - return {"name":self.__class__.__name__, - "activation":self.activation.__name__, - "target":self.target, - "beta":self.beta} + return {"name": self.__class__.__name__, + "activation": self.activation.__name__, + "target": self.target, + "beta": self.beta} class Reshape(Layer): @@ -193,17 +280,34 @@ class Reshape(Layer): First dimension is assumed to be nb_samples. ''' def __init__(self, *dims): - super(Reshape,self).__init__() + super(Reshape, self).__init__() self.dims = dims - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) nshape = make_tuple(X.shape[0], *self.dims) return theano.tensor.reshape(X, nshape) def get_config(self): - return {"name":self.__class__.__name__, - "dims":self.dims} + return {"name": self.__class__.__name__, + "dims": self.dims} + + +class Permute(Layer): + ''' + Permute the dimensions of the data according to the given tuple + ''' + def __init__(self, dims): + super(Permute, self).__init__() + self.dims = dims + + def get_output(self, train): + X = self.get_input(train) + return X.dimshuffle((0,) + self.dims) + + def get_config(self): + return {"name": self.__class__.__name__, + "dims": self.dims} class Flatten(Layer): @@ -212,9 +316,9 @@ class Flatten(Layer): First dimension is assumed to be nb_samples. ''' def __init__(self): - super(Flatten,self).__init__() + super(Flatten, self).__init__() - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) size = theano.tensor.prod(X.shape) // X.shape[0] nshape = (X.shape[0], size) @@ -229,28 +333,29 @@ class RepeatVector(Layer): Return tensor of shape (nb_samples, n, dim). ''' def __init__(self, n): - super(RepeatVector,self).__init__() + super(RepeatVector, self).__init__() self.n = n - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) tensors = [X]*self.n stacked = theano.tensor.stack(*tensors) - return stacked.dimshuffle((1,0,2)) + return stacked.dimshuffle((1, 0, 2)) def get_config(self): - return {"name":self.__class__.__name__, - "n":self.n} + return {"name": self.__class__.__name__, + "n": self.n} class Dense(Layer): ''' Just your regular fully connected NN layer. ''' - def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None): + def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, name=None, + W_regularizer=None, b_regularizer=None, activity_regularizer=None, + W_constraint=None, b_constraint=None): - super(Dense,self).__init__() + super(Dense, self).__init__() self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim @@ -262,26 +367,78 @@ def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='lin self.params = [self.W, self.b] - self.regularizers = [W_regularizer, b_regularizer] - self.constraints = [W_constraint, b_constraint] + self.regularizers = [] + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.b_regularizer = regularizers.get(b_regularizer) + if self.b_regularizer: + self.b_regularizer.set_param(self.b) + self.regularizers.append(self.b_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) + + self.W_constraint = constraints.get(W_constraint) + self.b_constraint = constraints.get(b_constraint) + self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) - def get_output(self, train): + if name is not None: + self.set_name(name) + + def set_name(self, name): + self.W.name = '%s_W' % name + self.b.name = '%s_b' % name + + def get_output(self, train=False): X = self.get_input(train) output = self.activation(T.dot(X, self.W) + self.b) return output def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "activation":self.activation.__name__} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "activation": self.activation.__name__, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, + "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} + + +class ActivityRegularization(Layer): + ''' + Layer that passes through its input unchanged, but applies an update + to the cost function based on the activity. + ''' + def __init__(self, l1=0., l2=0.): + super(ActivityRegularization, self).__init__() + self.l1 = l1 + self.l2 = l2 + + activity_regularizer = ActivityRegularizer(l1=l1, l2=l2) + activity_regularizer.set_layer(self) + self.regularizers = [activity_regularizer] + + def get_output(self, train=False): + return self.get_input(train) + + def get_config(self): + return {"name": self.__class__.__name__, + "l1": self.l1, + "l2": self.l2} -class TimeDistributedDense(Layer): +class TimeDistributedDense(MaskedLayer): ''' Apply a same DenseLayer for each dimension[1] (shared_dimension) input Especially useful after a recurrent network with 'return_sequence=True' @@ -289,10 +446,11 @@ class TimeDistributedDense(Layer): Tensor output dimensions: (nb_sample, shared_dimension, output_dim) ''' - def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None): + def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, + W_regularizer=None, b_regularizer=None, activity_regularizer=None, + W_constraint=None, b_constraint=None): - super(TimeDistributedDense,self).__init__() + super(TimeDistributedDense, self).__init__() self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim @@ -304,29 +462,47 @@ def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='lin self.params = [self.W, self.b] - self.regularizers = [W_regularizer, b_regularizer] - self.constraints = [W_constraint, b_constraint] + self.regularizers = [] + + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.b_regularizer = regularizers.get(b_regularizer) + if self.b_regularizer: + self.b_regularizer.set_param(self.b) + self.regularizers.append(self.b_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) + + self.W_constraint = constraints.get(W_constraint) + self.b_constraint = constraints.get(b_constraint) + self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) - - def act_func(X): - return self.activation(T.dot(X, self.W) + self.b) - - output, _ = theano.scan(fn = act_func, - sequences = X.dimshuffle(1,0,2), - outputs_info=None) - return output.dimshuffle(1,0,2) + output = self.activation(T.dot(X.dimshuffle(1, 0, 2), self.W) + self.b) + return output.dimshuffle(1, 0, 2) def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "activation":self.activation.__name__} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "activation": self.activation.__name__, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, + "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} + class AutoEncoder(Layer): ''' @@ -334,32 +510,34 @@ class AutoEncoder(Layer): If output_reconstruction then dim(input) = dim(output) else dim(output) = dim(hidden) ''' - def __init__(self, encoder, decoder, output_reconstruction=True, tie_weights=False, weights=None): + def __init__(self, encoder, decoder, output_reconstruction=True, weights=None): - super(AutoEncoder,self).__init__() + super(AutoEncoder, self).__init__() self.output_reconstruction = output_reconstruction - self.tie_weights = tie_weights self.encoder = encoder self.decoder = decoder - self.decoder.connect(self.encoder) + self.decoder.set_previous(self.encoder) self.params = [] self.regularizers = [] self.constraints = [] + self.updates = [] for layer in [self.encoder, self.decoder]: - self.params += layer.params - if hasattr(layer, 'regularizers'): - self.regularizers += layer.regularizers - if hasattr(layer, 'constraints'): - self.constraints += layer.constraints + params, regularizers, constraints, updates = layer.get_params() + self.regularizers += regularizers + self.updates += updates + for p, c in zip(params, constraints): + if p not in self.params: + self.params.append(p) + self.constraints.append(c) if weights is not None: self.set_weights(weights) - def connect(self, node): - self.encoder.connect(node) + def set_previous(self, node): + self.encoder.set_previous(node) def get_weights(self): weights = [] @@ -379,59 +557,20 @@ def get_input(self, train=False): def input(self): return self.encoder.input - def _get_hidden(self, train): + def _get_hidden(self, train=False): return self.encoder.get_output(train) - def get_output(self, train): + def get_output(self, train=False): if not train and not self.output_reconstruction: return self.encoder.get_output(train) - decoded = self.decoder.get_output(train) - - if self.tie_weights: - encoder_params = self.encoder.get_weights() - decoder_params = self.decoder.get_weights() - for dec_param, enc_param in zip(decoder_params, encoder_params): - if len(dec_param.shape) > 1: - enc_param = dec_param.T - - return decoded + return self.decoder.get_output(train) def get_config(self): - return {"name":self.__class__.__name__, - "encoder_config":self.encoder.get_config(), - "decoder_config":self.decoder.get_config(), - "output_reconstruction":self.output_reconstruction, - "tie_weights":self.tie_weights} - - -class DenoisingAutoEncoder(AutoEncoder): - ''' - A denoising autoencoder model that inherits the base features from autoencoder - ''' - def __init__(self, encoder=None, decoder=None, output_reconstruction=True, tie_weights=False, weights=None, corruption_level=0.3): - super(DenoisingAutoEncoder, self).__init__(encoder, decoder, output_reconstruction, tie_weights, weights) - self.corruption_level = corruption_level - - def _get_corrupted_input(self, input): - """ - http://deeplearning.net/tutorial/dA.html - """ - return srng.binomial(size=(self.input_dim, 1), n=1, - p=1-self.corruption_level, - dtype=theano.config.floatX) * input - - def get_input(self, train=False): - uncorrupted_input = super(DenoisingAutoEncoder, self).get_input(train) - return self._get_corrupted_input(uncorrupted_input) - - def get_config(self): - return {"name":self.__class__.__name__, - "encoder_config":self.encoder.get_config(), - "decoder_config":self.decoder.get_config(), - "corruption_level":self.corruption_level, - "output_reconstruction":self.output_reconstruction, - "tie_weights":self.tie_weights} + return {"name": self.__class__.__name__, + "encoder_config": self.encoder.get_config(), + "decoder_config": self.decoder.get_config(), + "output_reconstruction": self.output_reconstruction} class MaxoutDense(Layer): @@ -439,10 +578,11 @@ class MaxoutDense(Layer): Max-out layer, nb_feature is the number of pieces in the piecewise linear approx. Refer to http://arxiv.org/pdf/1302.4389.pdf ''' - def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', weights=None, - W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None): + def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', weights=None, + W_regularizer=None, b_regularizer=None, activity_regularizer=None, + W_constraint=None, b_constraint=None): - super(MaxoutDense,self).__init__() + super(MaxoutDense, self).__init__() self.init = initializations.get(init) self.input_dim = input_dim self.output_dim = output_dim @@ -454,21 +594,44 @@ def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', w self.params = [self.W, self.b] - self.regularizers = [W_regularizer, b_regularizer] - self.constraints = [W_constraint, b_constraint] + self.regularizers = [] + + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.b_regularizer = regularizers.get(b_regularizer) + if self.b_regularizer: + self.b_regularizer.set_param(self.b) + self.regularizers.append(self.b_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) + + self.W_constraint = constraints.get(W_constraint) + self.b_constraint = constraints.get(b_constraint) + self.constraints = [self.W_constraint, self.b_constraint] if weights is not None: self.set_weights(weights) - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) # -- don't need activation since it's just linear. output = T.max(T.dot(X, self.W) + self.b, axis=1) return output def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "nb_feature" : self.nb_feature} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "nb_feature": self.nb_feature, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None, + "b_constraint": self.b_constraint.get_config() if self.b_constraint else None} diff --git a/keras/layers/embeddings.py b/keras/layers/embeddings.py index d5a7c3990b6f..1465f0f822fa 100644 --- a/keras/layers/embeddings.py +++ b/keras/layers/embeddings.py @@ -2,55 +2,85 @@ import theano import theano.tensor as T -from .. import activations, initializations -from ..layers.core import Layer +from .. import activations, initializations, regularizers, constraints +from ..layers.core import Layer, MaskedLayer +from ..utils.theano_utils import sharedX + from ..constraints import unitnorm class Embedding(Layer): ''' - Turn positive integers (indexes) into denses vectors of fixed size. + Turn positive integers (indexes) into denses vectors of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]] @input_dim: size of vocabulary (highest input integer + 1) @out_dim: size of dense representation ''' - def __init__(self, input_dim, output_dim, init='uniform', weights=None, W_regularizer=None, W_constraint=None): - super(Embedding,self).__init__() + def __init__(self, input_dim, output_dim, init='uniform', + W_regularizer=None, activity_regularizer=None, W_constraint=None, + mask_zero=False, weights=None): + + super(Embedding, self).__init__() self.init = initializations.get(init) self.input_dim = input_dim self.output_dim = output_dim self.input = T.imatrix() self.W = self.init((self.input_dim, self.output_dim)) + self.mask_zero = mask_zero + self.params = [self.W] - self.constraints = [W_constraint] - self.regularizers = [W_regularizer] + + self.W_constraint = constraints.get(W_constraint) + self.constraints = [self.W_constraint] + + self.regularizers = [] + + self.W_regularizer = regularizers.get(W_regularizer) + if self.W_regularizer: + self.W_regularizer.set_param(self.W) + self.regularizers.append(self.W_regularizer) + + self.activity_regularizer = regularizers.get(activity_regularizer) + if self.activity_regularizer: + self.activity_regularizer.set_layer(self) + self.regularizers.append(self.activity_regularizer) if weights is not None: self.set_weights(weights) + def get_output_mask(self, train=None): + X = self.get_input(train) + if not self.mask_zero: + return None + else: + return T.ones_like(X) * (1 - T.eq(X, 0)) + def get_output(self, train=False): X = self.get_input(train) out = self.W[X] return out def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None, + "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None, + "W_constraint": self.W_constraint.get_config() if self.W_constraint else None} class WordContextProduct(Layer): ''' - This layer turns a pair of words (a pivot word + a context word, + This layer turns a pair of words (a pivot word + a context word, ie. a word from the same context, or a random, out-of-context word), indentified by their index in a vocabulary, into two dense reprensentations (word representation and context representation). Then it returns activation(dot(pivot_embedding, context_embedding)), - which can be trained to encode the probability + which can be trained to encode the probability of finding the context word in the context of the pivot word (or reciprocally depending on your training procedure). @@ -66,9 +96,10 @@ class WordContextProduct(Layer): Efficient Estimation of Word reprensentations in Vector Space http://arxiv.org/pdf/1301.3781v3.pdf ''' - def __init__(self, input_dim, proj_dim=128, - init='uniform', activation='sigmoid', weights=None): - super(WordContextProduct,self).__init__() + def __init__(self, input_dim, proj_dim=128, + init='uniform', activation='sigmoid', weights=None): + + super(WordContextProduct, self).__init__() self.input_dim = input_dim self.proj_dim = proj_dim self.init = initializations.get(init) @@ -85,20 +116,18 @@ def __init__(self, input_dim, proj_dim=128, if weights is not None: self.set_weights(weights) - def get_output(self, train=False): X = self.get_input(train) - w = self.W_w[X[:, 0]] # nb_samples, proj_dim - c = self.W_c[X[:, 1]] # nb_samples, proj_dim + w = self.W_w[X[:, 0]] # nb_samples, proj_dim + c = self.W_c[X[:, 1]] # nb_samples, proj_dim dot = T.sum(w * c, axis=1) dot = theano.tensor.reshape(dot, (X.shape[0], 1)) return self.activation(dot) def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "proj_dim":self.proj_dim, - "init":self.init.__name__, - "activation":self.activation.__name__} - + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "proj_dim": self.proj_dim, + "init": self.init.__name__, + "activation": self.activation.__name__} diff --git a/keras/layers/noise.py b/keras/layers/noise.py new file mode 100644 index 000000000000..99d9a8d7a504 --- /dev/null +++ b/keras/layers/noise.py @@ -0,0 +1,53 @@ +from __future__ import absolute_import +import numpy as np +from .core import MaskedLayer +import theano +import theano.tensor as T +from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams + + +class GaussianNoise(MaskedLayer): + ''' + Corruption process with GaussianNoise + ''' + def __init__(self, sigma): + super(GaussianNoise, self).__init__() + self.sigma = sigma + self.srng = RandomStreams(seed=np.random.randint(10e6)) + + def get_output(self, train=False): + X = self.get_input(train) + if not train or self.sigma == 0: + return X + else: + return X + self.srng.normal(size=X.shape, avg=0.0, std=self.sigma, + dtype=theano.config.floatX) + + def get_config(self): + return {"name": self.__class__.__name__, + "sigma": self.sigma} + + +class GaussianDropout(MaskedLayer): + ''' + Multiplicative Gaussian Noise + Reference: + Dropout: A Simple Way to Prevent Neural Networks from Overfitting + Srivastava, Hinton, et al. 2014 + http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf + ''' + def __init__(self, p): + super(GaussianDropout, self).__init__() + self.p = p + self.srng = RandomStreams(seed=np.random.randint(10e6)) + + def get_output(self, train): + X = self.get_input(train) + if train: + # self.p refers to drop probability rather than retain probability (as in paper) to match Dropout layer syntax + X *= self.srng.normal(size=X.shape, avg=1.0, std=T.sqrt(self.p / (1.0 - self.p)), dtype=theano.config.floatX) + return X + + def get_config(self): + return {"name": self.__class__.__name__, + "p": self.p} diff --git a/keras/layers/normalization.py b/keras/layers/normalization.py index 08c9871a31e6..ed1eab21560b 100644 --- a/keras/layers/normalization.py +++ b/keras/layers/normalization.py @@ -1,12 +1,13 @@ from ..layers.core import Layer -from ..utils.theano_utils import shared_zeros +from ..utils.theano_utils import shared_zeros, shared_ones, ndim_tensor from .. import initializations import theano.tensor as T + class BatchNormalization(Layer): ''' - Reference: + Reference: Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift http://arxiv.org/pdf/1502.03167v3.pdf @@ -16,43 +17,36 @@ class BatchNormalization(Layer): momentum: momentum term in the computation of a running estimate of the mean and std of the data ''' def __init__(self, input_shape, epsilon=1e-6, mode=0, momentum=0.9, weights=None): - super(BatchNormalization,self).__init__() + super(BatchNormalization, self).__init__() self.init = initializations.get("uniform") self.input_shape = input_shape self.epsilon = epsilon self.mode = mode self.momentum = momentum + self.input = ndim_tensor(len(self.input_shape)) self.gamma = self.init((self.input_shape)) self.beta = shared_zeros(self.input_shape) - self.running_mean = None - self.running_std = None - self.params = [self.gamma, self.beta] if weights is not None: self.set_weights(weights) + def init_updates(self): + self.running_mean = shared_zeros(self.input_shape) + self.running_std = shared_ones((self.input_shape)) + X = self.get_input(train=True) + m = X.mean(axis=0) + std = T.mean((X - m) ** 2 + self.epsilon, axis=0) ** 0.5 + mean_update = self.momentum * self.running_mean + (1-self.momentum) * m + std_update = self.momentum * self.running_std + (1-self.momentum) * std + self.updates = [(self.running_mean, mean_update), (self.running_std, std_update)] + def get_output(self, train): X = self.get_input(train) if self.mode == 0: - if train: - m = X.mean(axis=0) - # manual computation of std to prevent NaNs - std = T.mean((X-m)**2 + self.epsilon, axis=0) ** 0.5 - X_normed = (X - m) / (std + self.epsilon) - - if self.running_mean is None: - self.running_mean = m - self.running_std = std - else: - self.running_mean *= self.momentum - self.running_mean += (1-self.momentum) * m - self.running_std *= self.momentum - self.running_std += (1-self.momentum) * std - else: - X_normed = (X - self.running_mean) / (self.running_std + self.epsilon) + X_normed = (X - self.running_mean) / (self.running_std + self.epsilon) elif self.mode == 1: m = X.mean(axis=-1, keepdims=True) @@ -63,7 +57,43 @@ def get_output(self, train): return out def get_config(self): - return {"name":self.__class__.__name__, - "input_shape":self.input_shape, - "epsilon":self.epsilon, - "mode":self.mode} + return {"name": self.__class__.__name__, + "input_shape": self.input_shape, + "epsilon": self.epsilon, + "mode": self.mode} + + +class LRN2D(Layer): + """ + This code is adapted from pylearn2. + License at: https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt + """ + + def __init__(self, alpha=1e-4, k=2, beta=0.75, n=5): + if n % 2 == 0: + raise NotImplementedError("LRN2D only works with odd n. n provided: " + str(n)) + super(LRN2D, self).__init__() + self.alpha = alpha + self.k = k + self.beta = beta + self.n = n + + def get_output(self, train): + X = self.get_input(train) + b, ch, r, c = X.shape + half_n = self.n // 2 + input_sqr = T.sqr(X) + extra_channels = T.alloc(0., b, ch + 2*half_n, r, c) + input_sqr = T.set_subtensor(extra_channels[:, half_n:half_n+ch, :, :], input_sqr) + scale = self.k + for i in range(self.n): + scale += self.alpha * input_sqr[:, i:i+ch, :, :] + scale = scale ** self.beta + return X / scale + + def get_config(self): + return {"name": self.__class__.__name__, + "alpha": self.alpha, + "k": self.k, + "beta": self.beta, + "n": self.n} diff --git a/keras/layers/recurrent.py b/keras/layers/recurrent.py index 504eaec0d788..a8bd6243705d 100644 --- a/keras/layers/recurrent.py +++ b/keras/layers/recurrent.py @@ -5,22 +5,48 @@ import numpy as np from .. import activations, initializations -from ..utils.theano_utils import shared_zeros, alloc_zeros_matrix -from ..layers.core import Layer +from ..utils.theano_utils import shared_scalar, shared_zeros, alloc_zeros_matrix +from ..layers.core import Layer, MaskedLayer from six.moves import range -class SimpleRNN(Layer): + +class Recurrent(MaskedLayer): + def get_output_mask(self, train=None): + if self.return_sequences: + return super(Recurrent, self).get_output_mask(train) + else: + return None + + def get_padded_shuffled_mask(self, train, X, pad=0): + mask = self.get_input_mask(train) + if mask is None: + mask = T.ones_like(X.sum(axis=-1)) # is there a better way to do this without a sum? + + # mask is (nb_samples, time) + mask = T.shape_padright(mask) # (nb_samples, time, 1) + mask = T.addbroadcast(mask, -1) # (time, nb_samples, 1) matrix. + mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1) + + if pad > 0: + # left-pad in time with 0 + padding = alloc_zeros_matrix(pad, mask.shape[1], 1) + mask = T.concatenate([padding, mask], axis=0) + return mask.astype('int8') + + +class SimpleRNN(Recurrent): ''' Fully connected RNN where output is to fed back to input. - Not a particularly useful model, - included for demonstration purposes + Not a particularly useful model, + included for demonstration purposes (demonstrates how to use theano.scan to build a basic RNN). ''' - def __init__(self, input_dim, output_dim, - init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None, - truncate_gradient=-1, return_sequences=False): - super(SimpleRNN,self).__init__() + def __init__(self, input_dim, output_dim, + init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None, + truncate_gradient=-1, return_sequences=False): + + super(SimpleRNN, self).__init__() self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.input_dim = input_dim @@ -38,62 +64,63 @@ def __init__(self, input_dim, output_dim, if weights is not None: self.set_weights(weights) - def _step(self, x_t, h_tm1, u): + def _step(self, x_t, mask_tm1, h_tm1, u): ''' - Variable names follow the conventions from: + Variable names follow the conventions from: http://deeplearning.net/software/theano/library/scan.html ''' - return self.activation(x_t + T.dot(h_tm1, u)) + return self.activation(x_t + mask_tm1 * T.dot(h_tm1, u)) - def get_output(self, train): - X = self.get_input(train) # shape: (nb_samples, time (padded with zeros at the end), input_dim) + def get_output(self, train=False): + X = self.get_input(train) # shape: (nb_samples, time (padded with zeros), input_dim) # new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension - X = X.dimshuffle((1,0,2)) - + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) x = T.dot(X, self.W) + self.b - + # scan = theano symbolic loop. # See: http://deeplearning.net/software/theano/library/scan.html # Iterate over the first dimension of the x array (=time). outputs, updates = theano.scan( - self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i]) - sequences=x, # tensors to iterate over, inputs to _step + self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i]) + sequences=[x, dict(input=padded_mask, taps=[-1])], # tensors to iterate over, inputs to _step # initialization of the output. Input to _step with default tap=-1. outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), - non_sequences=self.U, # static inputs to _step - truncate_gradient=self.truncate_gradient - ) + non_sequences=self.U, # static inputs to _step + truncate_gradient=self.truncate_gradient) + if self.return_sequences: - return outputs.dimshuffle((1,0,2)) + return outputs.dimshuffle((1, 0, 2)) return outputs[-1] def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "inner_init":self.inner_init.__name__, - "activation":self.activation.__name__, - "truncate_gradient":self.truncate_gradient, - "return_sequences":self.return_sequences} + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} -class SimpleDeepRNN(Layer): +class SimpleDeepRNN(Recurrent): ''' - Fully connected RNN where the output of multiple timesteps + Fully connected RNN where the output of multiple timesteps (up to "depth" steps in the past) is fed back to the input: output = activation( W.x_t + b + inner_activation(U_1.h_tm1) + inner_activation(U_2.h_tm2) + ... ) - This demonstrates how to build RNNs with arbitrary lookback. + This demonstrates how to build RNNs with arbitrary lookback. Also (probably) not a super useful model. ''' def __init__(self, input_dim, output_dim, depth=3, - init='glorot_uniform', inner_init='orthogonal', - activation='sigmoid', inner_activation='hard_sigmoid', - weights=None, truncate_gradient=-1, return_sequences=False): - super(SimpleDeepRNN,self).__init__() + init='glorot_uniform', inner_init='orthogonal', + activation='sigmoid', inner_activation='hard_sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): + + super(SimpleDeepRNN, self).__init__() self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.input_dim = input_dim @@ -113,47 +140,59 @@ def __init__(self, input_dim, output_dim, depth=3, if weights is not None: self.set_weights(weights) - def _step(self, *args): - o = args[0] - for i in range(1, self.depth+1): - o += self.inner_activation(T.dot(args[i], args[i+self.depth])) + def _step(self, x_t, *args): + o = x_t + for i in range(self.depth): + mask_tmi = args[i] + h_tmi = args[i + self.depth] + U_tmi = args[i + 2*self.depth] + o += mask_tmi*self.inner_activation(T.dot(h_tmi, U_tmi)) return self.activation(o) - def get_output(self, train): + def get_output(self, train=False): X = self.get_input(train) - X = X.dimshuffle((1,0,2)) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth) + X = X.dimshuffle((1, 0, 2)) x = T.dot(X, self.W) + self.b - + + if self.depth == 1: + initial = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) + else: + initial = T.unbroadcast(T.unbroadcast(alloc_zeros_matrix(self.depth, X.shape[1], self.output_dim), 0), 2) + outputs, updates = theano.scan( self._step, - sequences=x, + sequences=[x, dict( + input=padded_mask, + taps=[(-i) for i in range(self.depth)] + )], outputs_info=[dict( - initial=T.alloc(np.cast[theano.config.floatX](0.), self.depth, X.shape[1], self.output_dim), - taps = [(-i-1) for i in range(self.depth)] + initial=initial, + taps=[(-i-1) for i in range(self.depth)] )], non_sequences=self.Us, truncate_gradient=self.truncate_gradient ) + if self.return_sequences: - return outputs.dimshuffle((1,0,2)) + return outputs.dimshuffle((1, 0, 2)) return outputs[-1] def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "depth":self.depth, - "init":self.init.__name__, - "inner_init":self.inner_init.__name__, - "activation":self.activation.__name__, - "inner_activation":self.inner_activation.__name__, - "truncate_gradient":self.truncate_gradient, - "return_sequences":self.return_sequences} - - - -class GRU(Layer): + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "depth": self.depth, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} + + +class GRU(Recurrent): ''' Gated Recurrent Unit - Cho et al. 2014 @@ -175,12 +214,12 @@ class GRU(Layer): Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling http://arxiv.org/pdf/1412.3555v1.pdf ''' - def __init__(self, input_dim, output_dim=128, - init='glorot_uniform', inner_init='orthogonal', - activation='sigmoid', inner_activation='hard_sigmoid', - weights=None, truncate_gradient=-1, return_sequences=False): + def __init__(self, input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', + activation='sigmoid', inner_activation='hard_sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): - super(GRU,self).__init__() + super(GRU, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.truncate_gradient = truncate_gradient @@ -200,7 +239,7 @@ def __init__(self, input_dim, output_dim=128, self.U_r = self.inner_init((self.output_dim, self.output_dim)) self.b_r = shared_zeros((self.output_dim)) - self.W_h = self.init((self.input_dim, self.output_dim)) + self.W_h = self.init((self.input_dim, self.output_dim)) self.U_h = self.inner_init((self.output_dim, self.output_dim)) self.b_h = shared_zeros((self.output_dim)) @@ -213,48 +252,49 @@ def __init__(self, input_dim, output_dim=128, if weights is not None: self.set_weights(weights) - def _step(self, - xz_t, xr_t, xh_t, - h_tm1, - u_z, u_r, u_h): - z = self.inner_activation(xz_t + T.dot(h_tm1, u_z)) - r = self.inner_activation(xr_t + T.dot(h_tm1, u_r)) - hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h)) - h_t = z * h_tm1 + (1 - z) * hh_t + def _step(self, + xz_t, xr_t, xh_t, mask_tm1, + h_tm1, + u_z, u_r, u_h): + h_mask_tm1 = mask_tm1 * h_tm1 + z = self.inner_activation(xz_t + T.dot(h_mask_tm1, u_z)) + r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r)) + hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h)) + h_t = z * h_mask_tm1 + (1 - z) * hh_t return h_t - def get_output(self, train): - X = self.get_input(train) - X = X.dimshuffle((1,0,2)) + def get_output(self, train=False): + X = self.get_input(train) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) x_z = T.dot(X, self.W_z) + self.b_z x_r = T.dot(X, self.W_r) + self.b_r x_h = T.dot(X, self.W_h) + self.b_h outputs, updates = theano.scan( - self._step, - sequences=[x_z, x_r, x_h], + self._step, + sequences=[x_z, x_r, x_h, padded_mask], outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), non_sequences=[self.U_z, self.U_r, self.U_h], - truncate_gradient=self.truncate_gradient - ) + truncate_gradient=self.truncate_gradient) + if self.return_sequences: - return outputs.dimshuffle((1,0,2)) + return outputs.dimshuffle((1, 0, 2)) return outputs[-1] def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "inner_init":self.inner_init.__name__, - "activation":self.activation.__name__, - "inner_activation":self.inner_activation.__name__, - "truncate_gradient":self.truncate_gradient, - "return_sequences":self.return_sequences} - - - -class LSTM(Layer): + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} + + +class LSTM(Recurrent): ''' Acts as a spatiotemporal projection, turning a sequence of vectors into a single vector. @@ -279,12 +319,12 @@ class LSTM(Layer): Supervised sequence labelling with recurrent neural networks http://www.cs.toronto.edu/~graves/preprint.pdf ''' - def __init__(self, input_dim, output_dim=128, - init='glorot_uniform', inner_init='orthogonal', - activation='tanh', inner_activation='hard_sigmoid', - weights=None, truncate_gradient=-1, return_sequences=False): - - super(LSTM,self).__init__() + def __init__(self, input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', + activation='tanh', inner_activation='hard_sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): + + super(LSTM, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.truncate_gradient = truncate_gradient @@ -292,6 +332,7 @@ def __init__(self, input_dim, output_dim=128, self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) + self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.input = T.tensor3() @@ -302,7 +343,7 @@ def __init__(self, input_dim, output_dim=128, self.W_f = self.init((self.input_dim, self.output_dim)) self.U_f = self.inner_init((self.output_dim, self.output_dim)) - self.b_f = shared_zeros((self.output_dim)) + self.b_f = self.forget_bias_init((self.output_dim)) self.W_c = self.init((self.input_dim, self.output_dim)) self.U_c = self.inner_init((self.output_dim, self.output_dim)) @@ -322,49 +363,363 @@ def __init__(self, input_dim, output_dim=128, if weights is not None: self.set_weights(weights) - def _step(self, - xi_t, xf_t, xo_t, xc_t, - h_tm1, c_tm1, - u_i, u_f, u_o, u_c): - i_t = self.inner_activation(xi_t + T.dot(h_tm1, u_i)) - f_t = self.inner_activation(xf_t + T.dot(h_tm1, u_f)) - c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1, u_c)) - o_t = self.inner_activation(xo_t + T.dot(h_tm1, u_o)) + def _step(self, + xi_t, xf_t, xo_t, xc_t, mask_tm1, + h_tm1, c_tm1, + u_i, u_f, u_o, u_c): + h_mask_tm1 = mask_tm1 * h_tm1 + c_mask_tm1 = mask_tm1 * c_tm1 + + i_t = self.inner_activation(xi_t + T.dot(h_mask_tm1, u_i)) + f_t = self.inner_activation(xf_t + T.dot(h_mask_tm1, u_f)) + c_t = f_t * c_mask_tm1 + i_t * self.activation(xc_t + T.dot(h_mask_tm1, u_c)) + o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o)) h_t = o_t * self.activation(c_t) return h_t, c_t - def get_output(self, train): - X = self.get_input(train) - X = X.dimshuffle((1,0,2)) + def get_output(self, train=False): + X = self.get_input(train) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) xi = T.dot(X, self.W_i) + self.b_i xf = T.dot(X, self.W_f) + self.b_f xc = T.dot(X, self.W_c) + self.b_c xo = T.dot(X, self.W_o) + self.b_o - + [outputs, memories], updates = theano.scan( - self._step, - sequences=[xi, xf, xo, xc], + self._step, + sequences=[xi, xf, xo, xc, padded_mask], outputs_info=[ T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1) - ], - non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c], - truncate_gradient=self.truncate_gradient - ) + ], + non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c], + truncate_gradient=self.truncate_gradient) + + if self.return_sequences: + return outputs.dimshuffle((1, 0, 2)) + return outputs[-1] + + def get_config(self): + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "forget_bias_init": self.forget_bias_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} + + +class JZS1(Recurrent): + ''' + Evolved recurrent neural network architectures from the evaluation of thousands + of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015. + + This corresponds to the `MUT1` architecture described in the paper. + + Takes inputs with shape: + (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim) + + and returns outputs with shape: + if not return_sequences: + (nb_samples, output_dim) + if return_sequences: + (nb_samples, max_sample_length, output_dim) + + References: + An Empirical Exploration of Recurrent Network Architectures + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf + ''' + def __init__(self, input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', + activation='tanh', inner_activation='sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): + + super(JZS1, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.truncate_gradient = truncate_gradient + self.return_sequences = return_sequences + + self.init = initializations.get(init) + self.inner_init = initializations.get(inner_init) + self.activation = activations.get(activation) + self.inner_activation = activations.get(inner_activation) + self.input = T.tensor3() + + self.W_z = self.init((self.input_dim, self.output_dim)) + self.b_z = shared_zeros((self.output_dim)) + + self.W_r = self.init((self.input_dim, self.output_dim)) + self.U_r = self.inner_init((self.output_dim, self.output_dim)) + self.b_r = shared_zeros((self.output_dim)) + + self.U_h = self.inner_init((self.output_dim, self.output_dim)) + self.b_h = shared_zeros((self.output_dim)) + + # P_h used to project X onto different dimension, using sparse random projections + if self.input_dim == self.output_dim: + self.Pmat = theano.shared(np.identity(self.output_dim, dtype=theano.config.floatX), name=None) + else: + P = np.random.binomial(1, 0.5, size=(self.input_dim, self.output_dim)).astype(theano.config.floatX) * 2 - 1 + P = 1 / np.sqrt(self.input_dim) * P + self.Pmat = theano.shared(P, name=None) + + self.params = [ + self.W_z, self.b_z, + self.W_r, self.U_r, self.b_r, + self.U_h, self.b_h, + ] + + if weights is not None: + self.set_weights(weights) + + def _step(self, + xz_t, xr_t, xh_t, mask_tm1, + h_tm1, + u_r, u_h): + h_mask_tm1 = mask_tm1 * h_tm1 + z = self.inner_activation(xz_t) + r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r)) + hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h)) + h_t = hh_t * z + h_mask_tm1 * (1 - z) + return h_t + + def get_output(self, train=False): + X = self.get_input(train) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) + + x_z = T.dot(X, self.W_z) + self.b_z + x_r = T.dot(X, self.W_r) + self.b_r + x_h = T.tanh(T.dot(X, self.Pmat)) + self.b_h + outputs, updates = theano.scan( + self._step, + sequences=[x_z, x_r, x_h, padded_mask], + outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), + non_sequences=[self.U_r, self.U_h], + truncate_gradient=self.truncate_gradient) + if self.return_sequences: + return outputs.dimshuffle((1, 0, 2)) + return outputs[-1] + + def get_config(self): + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} + + +class JZS2(Recurrent): + ''' + Evolved recurrent neural network architectures from the evaluation of thousands + of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015. + + This corresponds to the `MUT2` architecture described in the paper. + + Takes inputs with shape: + (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim) + + and returns outputs with shape: + if not return_sequences: + (nb_samples, output_dim) + if return_sequences: + (nb_samples, max_sample_length, output_dim) + + References: + An Empirical Exploration of Recurrent Network Architectures + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf + ''' + def __init__(self, input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', + activation='tanh', inner_activation='sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): + + super(JZS2, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.truncate_gradient = truncate_gradient + self.return_sequences = return_sequences + + self.init = initializations.get(init) + self.inner_init = initializations.get(inner_init) + self.activation = activations.get(activation) + self.inner_activation = activations.get(inner_activation) + self.input = T.tensor3() + + self.W_z = self.init((self.input_dim, self.output_dim)) + self.U_z = self.inner_init((self.output_dim, self.output_dim)) + self.b_z = shared_zeros((self.output_dim)) + + self.U_r = self.inner_init((self.output_dim, self.output_dim)) + self.b_r = shared_zeros((self.output_dim)) + + self.W_h = self.init((self.input_dim, self.output_dim)) + self.U_h = self.inner_init((self.output_dim, self.output_dim)) + self.b_h = shared_zeros((self.output_dim)) + + # P_h used to project X onto different dimension, using sparse random projections + if self.input_dim == self.output_dim: + self.Pmat = theano.shared(np.identity(self.output_dim, dtype=theano.config.floatX), name=None) + else: + P = np.random.binomial(1, 0.5, size=(self.input_dim, self.output_dim)).astype(theano.config.floatX) * 2 - 1 + P = 1 / np.sqrt(self.input_dim) * P + self.Pmat = theano.shared(P, name=None) + + self.params = [ + self.W_z, self.U_z, self.b_z, + self.U_r, self.b_r, + self.W_h, self.U_h, self.b_h, + ] + + if weights is not None: + self.set_weights(weights) + + def _step(self, + xz_t, xr_t, xh_t, mask_tm1, + h_tm1, + u_z, u_r, u_h): + h_mask_tm1 = mask_tm1 * h_tm1 + z = self.inner_activation(xz_t + T.dot(h_mask_tm1, u_z)) + r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r)) + hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h)) + h_t = hh_t * z + h_mask_tm1 * (1 - z) + return h_t + + def get_output(self, train=False): + X = self.get_input(train) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) + + x_z = T.dot(X, self.W_z) + self.b_z + x_r = T.dot(X, self.Pmat) + self.b_r + x_h = T.dot(X, self.W_h) + self.b_h + outputs, updates = theano.scan( + self._step, + sequences=[x_z, x_r, x_h, padded_mask], + outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), + non_sequences=[self.U_z, self.U_r, self.U_h], + truncate_gradient=self.truncate_gradient) if self.return_sequences: - return outputs.dimshuffle((1,0,2)) + return outputs.dimshuffle((1, 0, 2)) return outputs[-1] def get_config(self): - return {"name":self.__class__.__name__, - "input_dim":self.input_dim, - "output_dim":self.output_dim, - "init":self.init.__name__, - "inner_init":self.inner_init.__name__, - "activation":self.activation.__name__, - "inner_activation":self.inner_activation.__name__, - "truncate_gradient":self.truncate_gradient, - "return_sequences":self.return_sequences} - + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} + + +class JZS3(Recurrent): + ''' + Evolved recurrent neural network architectures from the evaluation of thousands + of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015. + + This corresponds to the `MUT3` architecture described in the paper. + Takes inputs with shape: + (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim) + + and returns outputs with shape: + if not return_sequences: + (nb_samples, output_dim) + if return_sequences: + (nb_samples, max_sample_length, output_dim) + + References: + An Empirical Exploration of Recurrent Network Architectures + http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf + ''' + def __init__(self, input_dim, output_dim=128, + init='glorot_uniform', inner_init='orthogonal', + activation='tanh', inner_activation='sigmoid', + weights=None, truncate_gradient=-1, return_sequences=False): + + super(JZS3, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.truncate_gradient = truncate_gradient + self.return_sequences = return_sequences + + self.init = initializations.get(init) + self.inner_init = initializations.get(inner_init) + self.activation = activations.get(activation) + self.inner_activation = activations.get(inner_activation) + self.input = T.tensor3() + + self.W_z = self.init((self.input_dim, self.output_dim)) + self.U_z = self.inner_init((self.output_dim, self.output_dim)) + self.b_z = shared_zeros((self.output_dim)) + + self.W_r = self.init((self.input_dim, self.output_dim)) + self.U_r = self.inner_init((self.output_dim, self.output_dim)) + self.b_r = shared_zeros((self.output_dim)) + + self.W_h = self.init((self.input_dim, self.output_dim)) + self.U_h = self.inner_init((self.output_dim, self.output_dim)) + self.b_h = shared_zeros((self.output_dim)) + + self.params = [ + self.W_z, self.U_z, self.b_z, + self.W_r, self.U_r, self.b_r, + self.W_h, self.U_h, self.b_h, + ] + + if weights is not None: + self.set_weights(weights) + + def _step(self, + xz_t, xr_t, xh_t, mask_tm1, + h_tm1, + u_z, u_r, u_h): + h_mask_tm1 = mask_tm1 * h_tm1 + z = self.inner_activation(xz_t + T.dot(T.tanh(h_mask_tm1), u_z)) + r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r)) + hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h)) + h_t = hh_t * z + h_mask_tm1 * (1 - z) + return h_t + + def get_output(self, train=False): + X = self.get_input(train) + padded_mask = self.get_padded_shuffled_mask(train, X, pad=1) + X = X.dimshuffle((1, 0, 2)) + + x_z = T.dot(X, self.W_z) + self.b_z + x_r = T.dot(X, self.W_r) + self.b_r + x_h = T.dot(X, self.W_h) + self.b_h + outputs, updates = theano.scan( + self._step, + sequences=[x_z, x_r, x_h, padded_mask], + outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1), + non_sequences=[self.U_z, self.U_r, self.U_h], + truncate_gradient=self.truncate_gradient + ) + if self.return_sequences: + return outputs.dimshuffle((1, 0, 2)) + return outputs[-1] + + def get_config(self): + return {"name": self.__class__.__name__, + "input_dim": self.input_dim, + "output_dim": self.output_dim, + "init": self.init.__name__, + "inner_init": self.inner_init.__name__, + "activation": self.activation.__name__, + "inner_activation": self.inner_activation.__name__, + "truncate_gradient": self.truncate_gradient, + "return_sequences": self.return_sequences} diff --git a/keras/models.py b/keras/models.py index 1fc737401244..ba59103565be 100644 --- a/keras/models.py +++ b/keras/models.py @@ -3,32 +3,52 @@ import theano import theano.tensor as T import numpy as np -import warnings, time, copy +import warnings, time, copy, pprint +from six.moves import range +import six from . import optimizers from . import objectives +from . import regularizers +from . import constraints from . import callbacks as cbks +from .utils.layer_utils import container_from_config from .utils.generic_utils import Progbar, printv from .layers import containers -from six.moves import range + def standardize_y(y): if not hasattr(y, 'shape'): y = np.asarray(y) if len(y.shape) == 1: - y = np.reshape(y, (len(y), 1)) + y = np.expand_dims(y, 1) return y + +def batch_shuffle(index_array, batch_size): + batch_count = int(len(index_array)/batch_size) + # to reshape we need to be cleanly divisible by batch size + # we stash extra items and reappend them after shuffling + last_batch = index_array[batch_count*batch_size:] + index_array = index_array[:batch_count*batch_size] + index_array = index_array.reshape((batch_count, batch_size)) + np.random.shuffle(index_array) + index_array = index_array.flatten() + return np.append(index_array, last_batch) + + def make_batches(size, batch_size): nb_batch = int(np.ceil(size/float(batch_size))) return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)] + def standardize_X(X): if type(X) == list: return X else: return [X] + def slice_X(X, start=None, stop=None): if type(X) == list: if hasattr(start, '__len__'): @@ -41,31 +61,290 @@ def slice_X(X, start=None, stop=None): else: return X[start:stop] -def calculate_loss_weights(Y, sample_weight=None, class_weight=None): + +def weighted_objective(fn): + def weighted(y_true, y_pred, weights): + # it's important that 0 * Inf == 0, not NaN, so I need to mask first + masked_y_true = y_true[weights.nonzero()[:-1]] + masked_y_pred = y_pred[weights.nonzero()[:-1]] + masked_weights = weights[weights.nonzero()] + obj_output = fn(masked_y_true, masked_y_pred) + return (masked_weights.flatten() * obj_output.flatten()).mean() + return weighted + + +def standardize_weights(y, sample_weight=None, class_weight=None): if sample_weight is not None: - if isinstance(sample_weight, list): - w = np.array(sample_weight) - else: - w = sample_weight + return standardize_y(sample_weight) elif isinstance(class_weight, dict): - if Y.shape[1] > 1: - y_classes = Y.argmax(axis=1) - elif Y.shape[1] == 1: - y_classes = np.reshape(Y, Y.shape[0]) + if len(y.shape) > 2: + raise Exception('class_weight not supported for 3+ dimensional targets.') + if y.shape[1] > 1: + y_classes = y.argmax(axis=1) + elif y.shape[1] == 1: + y_classes = np.reshape(y, y.shape[0]) else: - y_classes = Y - w = np.array(list(map(lambda x: class_weight[x], y_classes))) + y_classes = y + return np.expand_dims(np.array(list(map(lambda x: class_weight[x], y_classes))), 1) + else: + return np.ones(y.shape[:-1] + (1,)) + + +def model_from_yaml(yaml_string): + ''' + Returns a model generated from a local yaml file, + which is either created by hand or from to_yaml method of Sequential or Graph + ''' + import yaml + config = yaml.load(yaml_string) + return model_from_config(config) + + +def model_from_json(json_string): + import json + config = json.loads(json_string) + return model_from_config(config) + + +def model_from_config(config): + model_name = config.get('name') + if model_name not in {'Graph', 'Sequential'}: + raise Exception('Unrecognized model:', model_name) + + # Create a container then set class to appropriate model + model = container_from_config(config) + if model_name == 'Graph': + model.__class__ = Graph + elif model_name == 'Sequential': + model.__class__ = Sequential + + if 'optimizer' in config: + # if it has an optimizer, the model is assumed to be compiled + loss = config.get('loss') + class_mode = config.get('class_mode') + theano_mode = config.get('theano_mode') + + optimizer_params = dict([(k, v) for k, v in config.get('optimizer').items()]) + optimizer_name = optimizer_params.pop('name') + optimizer = optimizers.get(optimizer_name, optimizer_params) + + if model_name == 'Sequential': + model.compile(loss=loss, optimizer=optimizer, class_mode=class_mode, theano_mode=theano_mode) + elif model_name == 'Graph': + model.compile(loss=loss, optimizer=optimizer, theano_mode=theano_mode) + + return model + + +def get_function_name(o): + if isinstance(o, six.string_types): + return o else: - w = np.ones((Y.shape[0])) - return w + return o.__name__ + class Model(object): + def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[], + validation_split=0., val_f=None, val_ins=None, shuffle=True, metrics=[]): + ''' + Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels. + ''' + do_validation = False + if val_f and val_ins: + do_validation = True + if verbose: + print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0]))) + else: + if 0 < validation_split < 1: + do_validation = True + split_at = int(len(ins[0]) * (1 - validation_split)) + (ins, val_ins) = (slice_X(ins, 0, split_at), slice_X(ins, split_at)) + if verbose: + print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0]))) + + nb_train_sample = len(ins[0]) + index_array = np.arange(nb_train_sample) + + history = cbks.History() + if verbose: + callbacks = [history, cbks.BaseLogger()] + callbacks + else: + callbacks = [history] + callbacks + callbacks = cbks.CallbackList(callbacks) + + callbacks._set_model(self) + callbacks._set_params({ + 'batch_size': batch_size, + 'nb_epoch': nb_epoch, + 'nb_sample': nb_train_sample, + 'verbose': verbose, + 'do_validation': do_validation, + 'metrics': metrics, + }) + callbacks.on_train_begin() + + self.stop_training = False + for epoch in range(nb_epoch): + callbacks.on_epoch_begin(epoch) + if shuffle == 'batch': + index_array = batch_shuffle(index_array, batch_size) + elif shuffle: + np.random.shuffle(index_array) + + batches = make_batches(nb_train_sample, batch_size) + for batch_index, (batch_start, batch_end) in enumerate(batches): + batch_ids = index_array[batch_start:batch_end] + try: + ins_batch = slice_X(ins, batch_ids) + except TypeError as err: + print('TypeError while preparing batch. \ + If using HDF5 input data, pass shuffle="batch".\n') + raise + + batch_logs = {} + batch_logs['batch'] = batch_index + batch_logs['size'] = len(batch_ids) + callbacks.on_batch_begin(batch_index, batch_logs) + outs = f(*ins_batch) + if type(outs) != list: + outs = [outs] + for l, o in zip(out_labels, outs): + batch_logs[l] = o + + callbacks.on_batch_end(batch_index, batch_logs) + + epoch_logs = {} + if batch_index == len(batches) - 1: # last batch + # validation + if do_validation: + # replace with self._evaluate + val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0) + if type(val_outs) != list: + val_outs = [val_outs] + # same labels assumed + for l, o in zip(out_labels, val_outs): + epoch_logs['val_' + l] = o + + callbacks.on_epoch_end(epoch, epoch_logs) + if self.stop_training: + break + + callbacks.on_train_end() + return history + + def _predict_loop(self, f, ins, batch_size=128, verbose=0): + ''' + Abstract method to loop over some data in batches. + ''' + nb_sample = len(ins[0]) + outs = [] + if verbose == 1: + progbar = Progbar(target=nb_sample) + batches = make_batches(nb_sample, batch_size) + index_array = np.arange(nb_sample) + for batch_index, (batch_start, batch_end) in enumerate(batches): + batch_ids = index_array[batch_start:batch_end] + ins_batch = slice_X(ins, batch_ids) + + batch_outs = f(*ins_batch) + if type(batch_outs) != list: + batch_outs = [batch_outs] + if batch_index == 0: + for batch_out in batch_outs: + shape = (nb_sample,) + batch_out.shape[1:] + outs.append(np.zeros(shape)) + + for i, batch_out in enumerate(batch_outs): + outs[i][batch_start:batch_end] = batch_out + if verbose == 1: + progbar.update(batch_end) + return outs + + def _test_loop(self, f, ins, batch_size=128, verbose=0): + ''' + Abstract method to loop over some data in batches. + ''' + nb_sample = len(ins[0]) + outs = [] + if verbose == 1: + progbar = Progbar(target=nb_sample) + batches = make_batches(nb_sample, batch_size) + index_array = np.arange(nb_sample) + for batch_index, (batch_start, batch_end) in enumerate(batches): + batch_ids = index_array[batch_start:batch_end] + ins_batch = slice_X(ins, batch_ids) + + batch_outs = f(*ins_batch) + if type(batch_outs) == list: + if batch_index == 0: + for batch_out in enumerate(batch_outs): + outs.append(0.) + for i, batch_out in enumerate(batch_outs): + outs[i] += batch_out * len(batch_ids) + else: + if batch_index == 0: + outs.append(0.) + outs[0] += batch_outs * len(batch_ids) + + if verbose == 1: + progbar.update(batch_end) + for i, out in enumerate(outs): + outs[i] /= nb_sample + return outs + + def get_config(self, verbose=0): + config = super(Model, self).get_config() + for p in ['class_mode', 'theano_mode']: + if hasattr(self, p): + config[p] = getattr(self, p) + if hasattr(self, 'optimizer'): + config['optimizer'] = self.optimizer.get_config() + if hasattr(self, 'loss'): + if type(self.loss) == dict: + config['loss'] = dict([(k, get_function_name(v)) for k, v in self.loss.items()]) + else: + config['loss'] = get_function_name(self.loss) + + if verbose: + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(config) + return config + + def to_yaml(self): + # dump model configuration to yaml string + import yaml + config = self.get_config() + return yaml.dump(config) + + def to_json(self): + # dump model configuration to json string + import json + config = self.get_config() + return json.dump(config) + + +class Sequential(Model, containers.Sequential): + ''' + Inherits from Model the following methods: + - _fit + - _predict + - _evaluate + Inherits from containers.Sequential the following methods: + - __init__ + - add + - get_output + - get_input + - get_weights + - set_weights + ''' def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None): self.optimizer = optimizers.get(optimizer) + self.loss = objectives.get(loss) + weighted_loss = weighted_objective(objectives.get(loss)) - # input of model + # input of model self.X_train = self.get_input(train=True) self.X_test = self.get_input(train=False) @@ -75,8 +354,14 @@ def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None): # target of model self.y = T.zeros_like(self.y_train) - train_loss = self.loss(self.y, self.y_train) - test_score = self.loss(self.y, self.y_test) + self.weights = T.ones_like(self.y_train) + + train_loss = weighted_loss(self.y, self.y_train, self.weights) + test_loss = weighted_loss(self.y, self.y_test, self.weights) + + train_loss.name = 'train_loss' + test_loss.name = 'test_loss' + self.y.name = 'y' if class_mode == "categorical": train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1))) @@ -88,162 +373,101 @@ def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None): else: raise Exception("Invalid class mode:" + str(class_mode)) self.class_mode = class_mode + self.theano_mode = theano_mode - updates = self.optimizer.get_updates(self.params, self.regularizers, self.constraints, train_loss) + for r in self.regularizers: + train_loss = r(train_loss) + updates = self.optimizer.get_updates(self.params, self.constraints, train_loss) + updates += self.updates if type(self.X_train) == list: - train_ins = self.X_train + [self.y] - test_ins = self.X_test + [self.y] + train_ins = self.X_train + [self.y, self.weights] + test_ins = self.X_test + [self.y, self.weights] predict_ins = self.X_test else: - train_ins = [self.X_train, self.y] - test_ins = [self.X_test, self.y] + train_ins = [self.X_train, self.y, self.weights] + test_ins = [self.X_test, self.y, self.weights] predict_ins = [self.X_test] - self._train = theano.function(train_ins, train_loss, - updates=updates, allow_input_downcast=True, mode=theano_mode) - self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy], - updates=updates, allow_input_downcast=True, mode=theano_mode) - self._predict = theano.function(predict_ins, self.y_test, - allow_input_downcast=True, mode=theano_mode) - self._test = theano.function(test_ins, test_score, - allow_input_downcast=True, mode=theano_mode) - self._test_with_acc = theano.function(test_ins, [test_score, test_accuracy], - allow_input_downcast=True, mode=theano_mode) - - - def train(self, X, y, accuracy=False): + self._train = theano.function(train_ins, train_loss, updates=updates, + allow_input_downcast=True, mode=theano_mode) + self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy], updates=updates, + allow_input_downcast=True, mode=theano_mode) + self._predict = theano.function(predict_ins, self.y_test, + allow_input_downcast=True, mode=theano_mode) + self._test = theano.function(test_ins, test_loss, + allow_input_downcast=True, mode=theano_mode) + self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy], + allow_input_downcast=True, mode=theano_mode) + + def train_on_batch(self, X, y, accuracy=False, class_weight=None, sample_weight=None): X = standardize_X(X) y = standardize_y(y) + sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight) - ins = X + [y] + ins = X + [y, sample_weight] if accuracy: return self._train_with_acc(*ins) else: return self._train(*ins) - - def test(self, X, y, accuracy=False): + def test_on_batch(self, X, y, accuracy=False, sample_weight=None): X = standardize_X(X) y = standardize_y(y) - ins = X + [y] + sample_weight = standardize_weights(y, sample_weight=sample_weight) + + ins = X + [y, sample_weight] if accuracy: return self._test_with_acc(*ins) else: return self._test(*ins) + def predict_on_batch(self, X): + ins = standardize_X(X) + return self._predict(*ins) def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[], - validation_split=0., validation_data=None, shuffle=True, show_accuracy=False): + validation_split=0., validation_data=None, shuffle=True, show_accuracy=False, + class_weight=None, sample_weight=None): X = standardize_X(X) y = standardize_y(y) + sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight) - do_validation = False + val_f = None + val_ins = None + if validation_data or validation_split: + if show_accuracy: + val_f = self._test_with_acc + else: + val_f = self._test if validation_data: try: X_val, y_val = validation_data except: raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val). \ X_val may be a numpy array or a list of numpy arrays depending on your model input.") - do_validation = True X_val = standardize_X(X_val) y_val = standardize_y(y_val) - if verbose: - print("Train on %d samples, validate on %d samples" % (len(y), len(y_val))) - else: - if 0 < validation_split < 1: - # If a validation split size is given (e.g. validation_split=0.2) - # then split X into smaller X and X_val, - # and split y into smaller y and y_val. - do_validation = True - split_at = int(len(y) * (1 - validation_split)) - (X, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at)) - (y, y_val) = (y[0:split_at], y[split_at:]) - if verbose: - print("Train on %d samples, validate on %d samples" % (len(y), len(y_val))) - - index_array = np.arange(len(y)) - - if verbose: - callbacks = [cbks.BaseLogger()] + callbacks - callbacks = cbks.CallbackList([cbks.History()] + callbacks) - - callbacks._set_model(self) - callbacks._set_params({ - 'batch_size': batch_size, - 'nb_epoch': nb_epoch, - 'nb_sample': len(y), - 'verbose': verbose, - 'do_validation': do_validation, - 'show_accuracy': show_accuracy - }) - callbacks.on_train_begin() - - self.stop_training = False - for epoch in range(nb_epoch): - callbacks.on_epoch_begin(epoch) - if shuffle: - np.random.shuffle(index_array) - - batches = make_batches(len(y), batch_size) - for batch_index, (batch_start, batch_end) in enumerate(batches): - batch_ids = index_array[batch_start:batch_end] - X_batch = slice_X(X, batch_ids) - y_batch = y[batch_ids] - - batch_logs = {} - batch_logs['batch'] = batch_index - batch_logs['size'] = len(batch_ids) - callbacks.on_batch_begin(batch_index, batch_logs) - - ins = X_batch + [y_batch] - if show_accuracy: - loss, acc = self._train_with_acc(*ins) - batch_logs['accuracy'] = acc - else: - loss = self._train(*ins) - batch_logs['loss'] = loss - - callbacks.on_batch_end(batch_index, batch_logs) - - if batch_index == len(batches) - 1: # last batch - # validation - epoch_logs = {} - if do_validation: - if show_accuracy: - val_loss, val_acc = self.evaluate(X_val, y_val, batch_size=batch_size, \ - verbose=0, show_accuracy=True) - epoch_logs['val_accuracy'] = val_acc - else: - val_loss = self.evaluate(X_val, y_val, batch_size=batch_size, verbose=0) - epoch_logs['val_loss'] = val_loss + val_ins = X_val + [y_val, np.ones(y_val.shape[:-1] + (1,))] - callbacks.on_epoch_end(epoch, epoch_logs) - if self.stop_training: - break + if show_accuracy: + f = self._train_with_acc + out_labels = ['loss', 'acc'] + else: + f = self._train + out_labels = ['loss'] - callbacks.on_train_end() - return callbacks.callbacks[0] # return history + ins = X + [y, sample_weight] + metrics = ['loss', 'acc', 'val_loss', 'val_acc'] + return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, + verbose=verbose, callbacks=callbacks, + validation_split=validation_split, val_f=val_f, val_ins=val_ins, + shuffle=shuffle, metrics=metrics) - def predict(self, X, batch_size=128, verbose=1): + def predict(self, X, batch_size=128, verbose=0): X = standardize_X(X) - batches = make_batches(len(X[0]), batch_size) - if verbose == 1: - progbar = Progbar(target=len(X[0])) - for batch_index, (batch_start, batch_end) in enumerate(batches): - X_batch = slice_X(X, batch_start, batch_end) - batch_preds = self._predict(*X_batch) - - if batch_index == 0: - shape = (len(X[0]),) + batch_preds.shape[1:] - preds = np.zeros(shape) - preds[batch_start:batch_end] = batch_preds - - if verbose == 1: - progbar.update(batch_end) - - return preds + return self._predict_loop(self._predict, X, batch_size, verbose)[0] def predict_proba(self, X, batch_size=128, verbose=1): preds = self.predict(X, batch_size, verbose) @@ -251,7 +475,6 @@ def predict_proba(self, X, batch_size=128, verbose=1): warnings.warn("Network returning invalid probability values.") return preds - def predict_classes(self, X, batch_size=128, verbose=1): proba = self.predict(X, batch_size=batch_size, verbose=verbose) if self.class_mode == "categorical": @@ -259,78 +482,21 @@ def predict_classes(self, X, batch_size=128, verbose=1): else: return (proba > 0.5).astype('int32') - - def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1): + def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1, sample_weight=None): X = standardize_X(X) y = standardize_y(y) + sample_weight = standardize_weights(y, sample_weight=sample_weight) + ins = X + [y, sample_weight] if show_accuracy: - tot_acc = 0. - tot_score = 0. - seen = 0 - - batches = make_batches(len(y), batch_size) - if verbose: - progbar = Progbar(target=len(y), verbose=verbose) - for batch_index, (batch_start, batch_end) in enumerate(batches): - X_batch = slice_X(X, batch_start, batch_end) - y_batch = y[batch_start:batch_end] - - ins = X_batch + [y_batch] - if show_accuracy: - loss, acc = self._test_with_acc(*ins) - tot_acc += acc * len(y_batch) - log_values = [('loss', loss), ('acc.', acc)] - else: - loss = self._test(*ins) - log_values = [('loss', loss)] - tot_score += loss * len(y_batch) - seen += len(y_batch) - - # logging - if verbose: - progbar.update(batch_end, log_values) - + f = self._test_with_acc + else: + f = self._test + outs = self._test_loop(f, ins, batch_size, verbose) if show_accuracy: - return tot_score / seen, tot_acc / seen + return outs else: - return tot_score / seen - - -class Sequential(Model, containers.Sequential): - ''' - Inherits from Model the following methods: - - compile - - train - - test - - evaluate - - fit - - predict - - predict_proba - - predict_classes - Inherits from containers.Sequential the following methods: - - add - - get_output - - get_input - - get_weights - - set_weights - ''' - def __init__(self): - self.layers = [] - self.params = [] # learnable - self.regularizers = [] # same size as params - self.constraints = [] # same size as params - - - def get_config(self, verbose=0): - layers = [] - for i, l in enumerate(self.layers): - config = l.get_config() - layers.append(config) - if verbose: - printv(layers) - return layers - + return outs[0] def save_weights(self, filepath, overwrite=False): # Save weights from all layers to HDF5 @@ -363,6 +529,10 @@ def save_weights(self, filepath, overwrite=False): f.close() def load_weights(self, filepath): + ''' + This method does not make use of Sequential.set_weights() + for backwards compatibility. + ''' # Loads weights from HDF5 file import h5py f = h5py.File(filepath) @@ -370,4 +540,145 @@ def load_weights(self, filepath): g = f['layer_{}'.format(k)] weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])] self.layers[k].set_weights(weights) - f.close() \ No newline at end of file + f.close() + + +class Graph(Model, containers.Graph): + def compile(self, optimizer, loss, theano_mode=None): + # loss is a dictionary mapping output name to loss functions + ys = [] + ys_train = [] + ys_test = [] + weights = [] + train_loss = 0. + test_loss = 0. + for output_name in self.output_order: + loss_fn = loss[output_name] + output = self.outputs[output_name] + y_train = output.get_output(True) + y_test = output.get_output(False) + y = T.zeros_like(y_test) + ys.append(y) + ys_train.append(y_train) + ys_test.append(y_test) + + weight = T.ones_like(y_test) + weights.append(weight) + weighted_loss = weighted_objective(objectives.get(loss_fn)) + train_loss += weighted_loss(y, y_train, weight) + test_loss += weighted_loss(y, y_test, weight) + + train_loss.name = 'train_loss' + test_loss.name = 'test_loss' + + ins = [self.inputs[name].input for name in self.input_order] + train_ins = ins + ys + weights + test_ins = ins + ys + weights + + for r in self.regularizers: + train_loss = r(train_loss) + self.optimizer = optimizers.get(optimizer) + updates = self.optimizer.get_updates(self.params, self.constraints, train_loss) + updates += self.updates + self.theano_mode = theano_mode + self.loss = loss + + self._train = theano.function(train_ins, train_loss, updates=updates, + allow_input_downcast=True, mode=theano_mode) + self._test = theano.function(test_ins, test_loss, + allow_input_downcast=True, mode=theano_mode) + self._predict = theano.function(inputs=ins, outputs=ys_test, + allow_input_downcast=True, mode=theano_mode) + + def train_on_batch(self, data, class_weight={}, sample_weight={}): + # data is a dictionary mapping output and input names to arrays + sample_weight = [standardize_weights(data[name], + sample_weight=sample_weight.get(name), + class_weight=class_weight.get(name)) for name in self.output_order] + ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight + return self._train(*ins) + + def test_on_batch(self, data, sample_weight={}): + # data is a dictionary mapping input names to arrays + sample_weight = [standardize_weights(data[name]) for name in self.output_order] + + ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight + return self._test(*ins) + + def predict_on_batch(self, data): + # data is a dictionary mapping input names to arrays + ins = [data[name] for name in self.input_order] + return self._predict(*ins) + + def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[], + validation_split=0., validation_data=None, shuffle=True, class_weight={}, sample_weight={}): + sample_weight = [standardize_weights(data[name], + sample_weight=sample_weight.get(name), + class_weight=class_weight.get(name)) for name in self.output_order] + ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight + + val_f = None + val_ins = None + if validation_data or validation_split: + val_f = self._test + if validation_data: + sample_weight = [standardize_weights(validation_data[name]) for name in self.output_order] + val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order] + sample_weight + + f = self._train + out_labels = self.output_order + metrics = self.output_order + ['val_' + m for m in self.output_order] + history = self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, + verbose=verbose, callbacks=callbacks, + validation_split=validation_split, val_f=val_f, val_ins=val_ins, + shuffle=shuffle, metrics=metrics) + return history + + def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}): + sample_weight = [standardize_weights(data[name], sample_weight.get(name)) for name in self.output_order] + + ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight + outs = self._test_loop(self._test, ins, batch_size, verbose) + return outs[0] + + def predict(self, data, batch_size=128, verbose=0): + ins = [data[name] for name in self.input_order] + outs = self._predict_loop(self._predict, ins, batch_size, verbose) + return dict(zip(self.output_order, outs)) + + def save_weights(self, filepath, overwrite=False): + # Save weights from all layers to HDF5 + import h5py + import os.path + # if file exists and should not be overwritten + if not overwrite and os.path.isfile(filepath): + import sys + get_input = input + if sys.version_info[:2] <= (2, 7): + get_input = raw_input + overwrite = get_input('[WARNING] %s already exists - overwrite? [y/n]' % (filepath)) + while overwrite not in ['y', 'n']: + overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).') + if overwrite == 'n': + return + print('[TIP] Next time specify overwrite=True in save_weights!') + + f = h5py.File(filepath, 'w') + g = f.create_group('graph') + weights = self.get_weights() + g.attrs['nb_params'] = len(weights) + for n, param in enumerate(weights): + param_name = 'param_{}'.format(n) + param_dset = g.create_dataset(param_name, param.shape, dtype=param.dtype) + param_dset[:] = param + f.flush() + f.close() + + def load_weights(self, filepath): + # Loads weights from HDF5 file + import h5py + f = h5py.File(filepath) + g = f['graph'] + weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])] + self.set_weights(weights) + f.close() diff --git a/keras/objectives.py b/keras/objectives.py index 4388406b805e..85d3367bcf6c 100644 --- a/keras/objectives.py +++ b/keras/objectives.py @@ -4,38 +4,61 @@ import numpy as np from six.moves import range -epsilon = 1.0e-9 +if theano.config.floatX == 'float64': + epsilon = 1.0e-9 +else: + epsilon = 1.0e-7 + def mean_squared_error(y_true, y_pred): - return T.sqr(y_pred - y_true).mean() + return T.sqr(y_pred - y_true).mean(axis=-1) + def mean_absolute_error(y_true, y_pred): - return T.abs_(y_pred - y_true).mean() + return T.abs_(y_pred - y_true).mean(axis=-1) + + +def mean_absolute_percentage_error(y_true, y_pred): + return T.abs_((y_true - y_pred) / T.clip(T.abs_(y_true), epsilon, np.inf)).mean(axis=-1) * 100. + + +def mean_squared_logarithmic_error(y_true, y_pred): + return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1) + def squared_hinge(y_true, y_pred): - return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean() + return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1) + def hinge(y_true, y_pred): - return T.maximum(1. - y_true * y_pred, 0.).mean() + return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1) + def categorical_crossentropy(y_true, y_pred): '''Expects a binary class matrix instead of a vector of scalar classes ''' y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon) # scale preds so that the class probas of each sample sum to 1 - y_pred /= y_pred.sum(axis=1, keepdims=True) + y_pred /= y_pred.sum(axis=-1, keepdims=True) cce = T.nnet.categorical_crossentropy(y_pred, y_true) - return cce.mean() + return cce + def binary_crossentropy(y_true, y_pred): y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon) - bce = T.nnet.binary_crossentropy(y_pred, y_true) - return bce.mean() + bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1) + return bce + + +def poisson_loss(y_true, y_pred): + return T.mean(y_pred - y_true * T.log(y_pred), axis=-1) # aliases mse = MSE = mean_squared_error mae = MAE = mean_absolute_error +mape = MAPE = mean_absolute_percentage_error +msle = MSLE = mean_squared_logarithmic_error from .utils.generic_utils import get_from_module def get(identifier): - return get_from_module(identifier, globals(), 'objective') \ No newline at end of file + return get_from_module(identifier, globals(), 'objective') diff --git a/keras/optimizers.py b/keras/optimizers.py index 59b61d4cec79..c52ad7194181 100644 --- a/keras/optimizers.py +++ b/keras/optimizers.py @@ -3,102 +3,129 @@ import theano.tensor as T import numpy as np -from .utils.theano_utils import shared_zeros, shared_scalar +from .utils.theano_utils import shared_zeros, shared_scalar, floatX from six.moves import zip + def clip_norm(g, c, n): if c > 0: - g = T.switch(T.ge(n, c), g*c/n, g) + g = T.switch(T.ge(n, c), g * c / n, g) return g + def kl_divergence(p, p_hat): - return p_hat - p + p*T.log(p/p_hat) + return p_hat - p + p * T.log(p / p_hat) + class Optimizer(object): - - def get_updates(self, params, grads): + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + self.updates = [] + + def get_state(self): + return [u[0].get_value() for u in self.updates] + + def set_state(self, value_list): + assert len(self.updates) == len(value_list) + for u, v in zip(self.updates, value_list): + u[0].set_value(floatX(v)) + + def get_updates(self, params, constraints, loss): raise NotImplementedError - def get_gradients(self, cost, params, regularizers): - grads = T.grad(cost, params) + def get_gradients(self, loss, params): + + grads = T.grad(loss, params) if hasattr(self, 'clipnorm') and self.clipnorm > 0: - norm = T.sqrt(sum([T.sum(g**2) for g in grads])) + norm = T.sqrt(sum([T.sum(g ** 2) for g in grads])) grads = [clip_norm(g, self.clipnorm, norm) for g in grads] - new_grads = [] - for p, g, r in zip(params, grads, regularizers): - g = r(g, p) - new_grads.append(g) + return grads - return new_grads + def get_config(self): + return {"name": self.__class__.__name__} class SGD(Optimizer): def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, *args, **kwargs): - self.__dict__.update(kwargs) + super(SGD, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = shared_scalar(0) - def get_updates(self, params, regularizers, constraints, cost): - grads = self.get_gradients(cost, params, regularizers) + def get_updates(self, params, constraints, loss): + grads = self.get_gradients(loss, params) lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations)) - updates = [(self.iterations, self.iterations+1.)] + self.updates = [(self.iterations, self.iterations + 1.)] for p, g, c in zip(params, grads, constraints): - m = shared_zeros(p.get_value().shape) # momentum - v = self.momentum * m - lr * g # velocity - updates.append((m, v)) + m = shared_zeros(p.get_value().shape) # momentum + v = self.momentum * m - lr * g # velocity + self.updates.append((m, v)) if self.nesterov: new_p = p + self.momentum * v - lr * g else: new_p = p + v - updates.append((p, c(new_p))) # apply constraints - return updates + self.updates.append((p, c(new_p))) # apply constraints + return self.updates + def get_config(self): + return {"name": self.__class__.__name__, + "lr": self.lr, + "momentum": self.momentum, + "decay": self.decay, + "nesterov": self.nesterov} -class RMSprop(Optimizer): +class RMSprop(Optimizer): def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs): - self.__dict__.update(kwargs) + super(RMSprop, self).__init__(**kwargs) self.__dict__.update(locals()) - def get_updates(self, params, regularizers, constraints, cost): - grads = self.get_gradients(cost, params, regularizers) + def get_updates(self, params, constraints, loss): + grads = self.get_gradients(loss, params) accumulators = [shared_zeros(p.get_value().shape) for p in params] - updates = [] + self.updates = [] for p, g, a, c in zip(params, grads, accumulators, constraints): - new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator - updates.append((a, new_a)) + new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator + self.updates.append((a, new_a)) new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon) - updates.append((p, c(new_p))) # apply constraints - - return updates + self.updates.append((p, c(new_p))) # apply constraints + return self.updates + def get_config(self): + return {"name": self.__class__.__name__, + "lr": self.lr, + "rho": self.rho, + "epsilon": self.epsilon} -class Adagrad(Optimizer): +class Adagrad(Optimizer): def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs): - self.__dict__.update(kwargs) + super(Adagrad, self).__init__(**kwargs) self.__dict__.update(locals()) - def get_updates(self, params, regularizers, constraints, cost): - grads = self.get_gradients(cost, params, regularizers) + def get_updates(self, params, constraints, loss): + grads = self.get_gradients(loss, params) accumulators = [shared_zeros(p.get_value().shape) for p in params] - updates = [] + self.updates = [] for p, g, a, c in zip(params, grads, accumulators, constraints): - new_a = a + g ** 2 # update accumulator - updates.append((a, new_a)) - + new_a = a + g ** 2 # update accumulator + self.updates.append((a, new_a)) new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon) - updates.append((p, c(new_p))) # apply constraints - return updates + self.updates.append((p, c(new_p))) # apply constraints + return self.updates + + def get_config(self): + return {"name": self.__class__.__name__, + "lr": self.lr, + "epsilon": self.epsilon} class Adadelta(Optimizer): @@ -106,70 +133,74 @@ class Adadelta(Optimizer): Reference: http://arxiv.org/abs/1212.5701 ''' def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs): - self.__dict__.update(kwargs) + super(Adadelta, self).__init__(**kwargs) self.__dict__.update(locals()) - def get_updates(self, params, regularizers, constraints, cost): - grads = self.get_gradients(cost, params, regularizers) + def get_updates(self, params, constraints, loss): + grads = self.get_gradients(loss, params) accumulators = [shared_zeros(p.get_value().shape) for p in params] delta_accumulators = [shared_zeros(p.get_value().shape) for p in params] - updates = [] + self.updates = [] for p, g, a, d_a, c in zip(params, grads, accumulators, delta_accumulators, constraints): - new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator - updates.append((a, new_a)) + new_a = self.rho * a + (1 - self.rho) * g ** 2 # update accumulator + self.updates.append((a, new_a)) # use the new accumulator and the *old* delta_accumulator update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a + self.epsilon) new_p = p - self.lr * update - updates.append((p, c(new_p))) # apply constraints + self.updates.append((p, c(new_p))) # apply constraints # update delta_accumulator new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2 - updates.append((d_a, new_d_a)) - return updates + self.updates.append((d_a, new_d_a)) + return self.updates + + def get_config(self): + return {"name": self.__class__.__name__, + "lr": self.lr, + "rho": self.rho, + "epsilon": self.epsilon} class Adam(Optimizer): ''' - Reference: http://arxiv.org/abs/1412.6980 - - Default parameters follow those provided in the original paper + Reference: http://arxiv.org/abs/1412.6980v8 - lambda is renamed kappa. + Default parameters follow those provided in the original paper. ''' - def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, kappa=1-1e-8, *args, **kwargs): - self.__dict__.update(kwargs) + def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, *args, **kwargs): + super(Adam, self).__init__(**kwargs) self.__dict__.update(locals()) self.iterations = shared_scalar(0) - def get_updates(self, params, regularizers, constraints, cost): - grads = self.get_gradients(cost, params, regularizers) - updates = [(self.iterations, self.iterations+1.)] - - i = self.iterations - beta_1_t = self.beta_1 * (self.kappa**i) + def get_updates(self, params, constraints, loss): + grads = self.get_gradients(loss, params) + self.updates = [(self.iterations, self.iterations+1.)] - # the update below seems missing from the paper, but is obviously required - beta_2_t = self.beta_2 * (self.kappa**i) + t = self.iterations + 1 + lr_t = self.lr * T.sqrt(1-self.beta_2**t)/(1-self.beta_1**t) for p, g, c in zip(params, grads, constraints): - m = theano.shared(p.get_value() * 0.) # zero init of moment - v = theano.shared(p.get_value() * 0.) # zero init of velocity + m = theano.shared(p.get_value() * 0.) # zero init of moment + v = theano.shared(p.get_value() * 0.) # zero init of velocity - m_t = (beta_1_t * m) + (1 - beta_1_t) * g - v_t = (beta_2_t * v) + (1 - beta_2_t) * (g**2) + m_t = (self.beta_1 * m) + (1 - self.beta_1) * g + v_t = (self.beta_2 * v) + (1 - self.beta_2) * (g**2) + p_t = p - lr_t * m_t / (T.sqrt(v_t) + self.epsilon) - m_b_t = m_t / (1 - beta_1_t) - v_b_t = v_t / (1 - beta_2_t) + self.updates.append((m, m_t)) + self.updates.append((v, v_t)) + self.updates.append((p, c(p_t))) # apply constraints + return self.updates - p_t = p - self.lr * m_b_t / (T.sqrt(v_b_t) + self.epsilon) - - updates.append((m, m_t)) - updates.append((v, v_t)) - updates.append((p, c(p_t))) # apply constraints - return updates + def get_config(self): + return {"name": self.__class__.__name__, + "lr": self.lr, + "beta_1": self.beta_1, + "beta_2": self.beta_2, + "epsilon": self.epsilon} # aliases sgd = SGD @@ -179,5 +210,5 @@ def get_updates(self, params, regularizers, constraints, cost): adam = Adam from .utils.generic_utils import get_from_module -def get(identifier): - return get_from_module(identifier, globals(), 'optimizer', instantiate=True) +def get(identifier, kwargs=None): + return get_from_module(identifier, globals(), 'optimizer', instantiate=True, kwargs=kwargs) diff --git a/keras/preprocessing/sequence.py b/keras/preprocessing/sequence.py index 197288c7561a..4becfaf1486b 100644 --- a/keras/preprocessing/sequence.py +++ b/keras/preprocessing/sequence.py @@ -4,15 +4,17 @@ import random from six.moves import range -def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre'): +def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.): """ Pad each sequence to the same length: the length of the longuest sequence. If maxlen is provided, any sequence longer - than maxlen is truncated to maxlen. + than maxlen is truncated to maxlen. Truncation happens off either the beginning (default) or + the end of the sequence. + + Supports post-padding and pre-padding (default). - Support post-padding and pre-padding (default). """ lengths = [len(s) for s in sequences] @@ -20,12 +22,21 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre'): if maxlen is None: maxlen = np.max(lengths) - x = np.zeros((nb_samples, maxlen)).astype(dtype) + x = (np.ones((nb_samples, maxlen)) * value).astype(dtype) for idx, s in enumerate(sequences): + if truncating == 'pre': + trunc = s[-maxlen:] + elif truncating == 'post': + trunc = s[:maxlen] + else: + raise ValueError("Truncating type '%s' not understood" % padding) + if padding == 'post': - x[idx, :lengths[idx]] = s[:maxlen] + x[idx, :len(trunc)] = trunc + elif padding == 'pre': + x[idx, -len(trunc):] = trunc else: - x[idx, -min(maxlen, lengths[idx]):] = s[:maxlen] + raise ValueError("Padding type '%s' not understood" % padding) return x diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py index bf47015f0c06..03a9f02c9f2a 100644 --- a/keras/preprocessing/text.py +++ b/keras/preprocessing/text.py @@ -32,7 +32,7 @@ def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "): def one_hot(text, n, filters=base_filter(), lower=True, split=" "): - seq = text_to_word_sequence(text) + seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split) return [(abs(hash(w))%(n-1)+1) for w in seq] diff --git a/keras/regularizers.py b/keras/regularizers.py index f5a9becf4234..223e3096b842 100644 --- a/keras/regularizers.py +++ b/keras/regularizers.py @@ -1,26 +1,84 @@ from __future__ import absolute_import -import theano import theano.tensor as T -import numpy as np - -def l1(l=.01): - def l1wrap(g, p): - g += T.sgn(p) * l - return g - return l1wrap - -def l2(l=.01): - def l2wrap(g, p): - g += p * l - return g - return l2wrap - -def l1l2(l1=.01, l2=.01): - def l1l2wrap(g, p): - g += T.sgn(p) * l1 - g += p * l2 - return g - return l1l2wrap - -def identity(g, p): - return g \ No newline at end of file + + +class Regularizer(object): + def set_param(self, p): + self.p = p + + def set_layer(self, layer): + self.layer = layer + + def __call__(self, loss): + return loss + + def get_config(self): + return {"name": self.__class__.__name__} + + +class WeightRegularizer(Regularizer): + def __init__(self, l1=0., l2=0.): + self.l1 = l1 + self.l2 = l2 + + def set_param(self, p): + self.p = p + + def __call__(self, loss): + loss += T.sum(abs(self.p)) * self.l1 + loss += T.sum(self.p ** 2) * self.l2 + return loss + + def get_config(self): + return {"name": self.__class__.__name__, + "l1": self.l1, + "l2": self.l2} + + +class ActivityRegularizer(Regularizer): + def __init__(self, l1=0., l2=0.): + self.l1 = l1 + self.l2 = l2 + + def set_layer(self, layer): + self.layer = layer + + def __call__(self, loss): + loss += self.l1 * T.sum(T.mean(abs(self.layer.get_output(True)), axis=0)) + loss += self.l2 * T.sum(T.mean(self.layer.get_output(True) ** 2, axis=0)) + return loss + + def get_config(self): + return {"name": self.__class__.__name__, + "l1": self.l1, + "l2": self.l2} + + +def l1(l=0.01): + return WeightRegularizer(l1=l) + + +def l2(l=0.01): + return WeightRegularizer(l2=l) + + +def l1l2(l1=0.01, l2=0.01): + return WeightRegularizer(l1=l1, l2=l2) + + +def activity_l1(l=0.01): + return ActivityRegularizer(l1=l) + + +def activity_l2(l=0.01): + return ActivityRegularizer(l2=l) + + +def activity_l1l2(l1=0.01, l2=0.01): + return ActivityRegularizer(l1=l1, l2=l2) + +identity = Regularizer + +from .utils.generic_utils import get_from_module +def get(identifier, kwargs=None): + return get_from_module(identifier, globals(), 'regularizer', instantiate=True, kwargs=kwargs) diff --git a/keras/utils/dot_utils.py b/keras/utils/dot_utils.py deleted file mode 100644 index 5e6d516acfb5..000000000000 --- a/keras/utils/dot_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -import pydot -from keras.layers.core import Merge -from keras.models import Model -from collections import Counter - -class Grapher(object): - - def __init__(self): - self.names = {} - self.class_counts = Counter() - - def get_name(self, model): - """ - returns the name of the model instance. If model does not have a `name` attribute, then it will be assigned - a generic (and unique) identifier based on its class - """ - if hasattr(model, 'name'): - return model.name - clz = model.__class__.__name__ - if model not in self.names: - self.class_counts[clz] += 1 - self.names[model] = clz + str(self.class_counts[clz]) - return self.names[model] - - def add_edge(self, f, t, graph): - if f: graph.add_edge(pydot.Edge(f, t)) - return t - - def add_model(self, model, graph, parent=None): - """ - Recursively adds `model` and its components to the pydot graph - """ - this = self.get_name(model) - if isinstance(model, Model): - parent = self.add_edge(parent, this, graph) - for child in reversed(model.layers): - parent = self.add_model(child, graph, parent) - elif isinstance(model, Merge): - for child in model.models: - self.add_model(child, graph, this) - return self.add_edge(parent, this, graph) - else: - return self.add_edge(parent, this, graph) - - def plot(self, model, to_file): - """ - creates a graph visualizing the structure of `model` and writes it to `to_file` - """ - graph = pydot.Dot(graph_type='graph') - self.add_model(model, graph) - graph.write_png(to_file) diff --git a/keras/utils/generic_utils.py b/keras/utils/generic_utils.py index dbcbb9a7b92b..8dbbe4fadcfd 100644 --- a/keras/utils/generic_utils.py +++ b/keras/utils/generic_utils.py @@ -4,20 +4,25 @@ import sys import six -def get_from_module(identifier, module_params, module_name, instantiate=False): + +def get_from_module(identifier, module_params, module_name, instantiate=False, kwargs=None): if isinstance(identifier, six.string_types): res = module_params.get(identifier) if not res: raise Exception('Invalid ' + str(module_name) + ': ' + str(identifier)) - if instantiate: + if instantiate and not kwargs: return res() + elif instantiate and kwargs: + return res(**kwargs) else: return res return identifier + def make_tuple(*args): return args + def printv(v, prefix=''): if type(v) == dict: if 'name' in v: @@ -34,11 +39,12 @@ def printv(v, prefix=''): prefix += '...' for i, nv in enumerate(v): print(prefix + '#' + str(i)) - printv(nv, prefix) + printv(nv, prefix) else: prefix += '...' print(prefix + str(v)) + class Progbar(object): def __init__(self, target, width=30, verbose=1): ''' @@ -61,11 +67,11 @@ def update(self, current, values=[]): ''' for k, v in values: if k not in self.sum_values: - self.sum_values[k] = [v * (current-self.seen_so_far), current-self.seen_so_far] + self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far] self.unique_values.append(k) else: - self.sum_values[k][0] += v * (current-self.seen_so_far) - self.sum_values[k][1] += (current-self.seen_so_far) + self.sum_values[k][0] += v * (current - self.seen_so_far) + self.sum_values[k][1] += (current - self.seen_so_far) self.seen_so_far = current now = time.time() @@ -89,7 +95,7 @@ def update(self, current, values=[]): bar += ']' sys.stdout.write(bar) self.total_width = len(bar) - + if current: time_per_unit = (now - self.start) / current else: @@ -101,7 +107,7 @@ def update(self, current, values=[]): else: info += ' - %ds' % (now - self.start) for k in self.unique_values: - info += ' - %s: %.4f' % (k, self.sum_values[k][0]/ max(1, self.sum_values[k][1])) + info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) self.total_width += len(info) if prev_total_width > self.total_width: @@ -117,9 +123,8 @@ def update(self, current, values=[]): if current >= self.target: info = '%ds' % (now - self.start) for k in self.unique_values: - info += ' - %s: %.4f' % (k, self.sum_values[k][0]/ max(1, self.sum_values[k][1])) + info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1])) sys.stdout.write(info + "\n") - def add(self, n, values=[]): self.update(self.seen_so_far+n, values) diff --git a/keras/utils/io_utils.py b/keras/utils/io_utils.py index ceac7d15b3ba..c36abefb43a1 100644 --- a/keras/utils/io_utils.py +++ b/keras/utils/io_utils.py @@ -3,9 +3,10 @@ import numpy as np from collections import defaultdict -class HDF5Matrix: - - refs = defaultdict(int) + +class HDF5Matrix(): + def __init__(self): + self.refs = defaultdict(int) def __init__(self, datapath, dataset, start, end, normalizer=None): if datapath not in list(self.refs.keys()): @@ -17,7 +18,7 @@ def __init__(self, datapath, dataset, start, end, normalizer=None): self.end = end self.data = f[dataset] self.normalizer = normalizer - + def __len__(self): return self.end - self.start @@ -60,11 +61,12 @@ def save_array(array, name): ds[:] = array f.close() + def load_array(name): import tables f = tables.open_file(name) array = f.root.data - a=np.empty(shape=array.shape, dtype=array.dtype) - a[:]=array[:] + a = np.empty(shape=array.shape, dtype=array.dtype) + a[:] = array[:] f.close() return a diff --git a/keras/utils/layer_utils.py b/keras/utils/layer_utils.py new file mode 100644 index 000000000000..c8ee53f52d8b --- /dev/null +++ b/keras/utils/layer_utils.py @@ -0,0 +1,117 @@ +from __future__ import print_function +import inspect +import numpy as np +import theano +import copy + +from ..layers.advanced_activations import LeakyReLU, PReLU +from ..layers.core import Dense, Merge, Dropout, Activation, Reshape, Flatten, RepeatVector, Layer +from ..layers.core import ActivityRegularization, TimeDistributedDense, AutoEncoder, MaxoutDense +from ..layers.convolutional import Convolution1D, Convolution2D, MaxPooling1D, MaxPooling2D, ZeroPadding2D +from ..layers.embeddings import Embedding, WordContextProduct +from ..layers.noise import GaussianNoise, GaussianDropout +from ..layers.normalization import BatchNormalization +from ..layers.recurrent import SimpleRNN, SimpleDeepRNN, GRU, LSTM, JZS1, JZS2, JZS3 +from ..layers import containers +from .. import regularizers +from .. import constraints + + +def container_from_config(original_layer_dict): + layer_dict = copy.deepcopy(original_layer_dict) + name = layer_dict.get('name') + + if name == 'Merge': + mode = layer_dict.get('mode') + layers = layer_dict.get('layers') + layer_list = [] + for layer in layers: + init_layer = container_from_config(layer) + layer_list.append(init_layer) + merge_layer = Merge(layer_list, mode) + return merge_layer + + elif name == 'Sequential': + layers = layer_dict.get('layers') + layer_list = [] + for layer in layers: + init_layer = container_from_config(layer) + layer_list.append(init_layer) + seq_layer = containers.Sequential(layer_list) + return seq_layer + + elif name == 'Graph': + graph_layer = containers.Graph() + inputs = layer_dict.get('input_config') + + for input in inputs: + graph_layer.add_input(**input) + + nodes = layer_dict.get('node_config') + for node in nodes: + layer = container_from_config(layer_dict['nodes'].get(node['name'])) + node['layer'] = layer + graph_layer.add_node(**node) + + outputs = layer_dict.get('output_config') + for output in outputs: + graph_layer.add_output(**output) + return graph_layer + + else: + layer_dict.pop('name') + + for k, v in layer_dict.items(): + # For now, this can only happen for regularizers and constraints + if isinstance(v, dict): + vname = v.get('name') + v.pop('name') + if vname in [x for x, y in inspect.getmembers(constraints, predicate=inspect.isclass)]: + layer_dict[k] = constraints.get(vname, v) + if vname in [x for x, y in inspect.getmembers(regularizers, predicate=inspect.isclass)]: + layer_dict[k] = regularizers.get(vname, v) + + base_layer = get_layer(name, layer_dict) + return base_layer + + +def print_layer_shapes(model, input_shapes): + """ + Utility function to print the shape of the output at each layer of a Model + + Arguments: + model: instance of Model / Merge + input_shapes: dict (Graph), list of tuples (Merge) or tuple (Sequential) + """ + if model.__class__.__name__ in ['Sequential', 'Merge']: + # in this case input_shapes is a tuple, or a list [shape1, shape2] + if not isinstance(input_shapes[0], tuple): + input_shapes = [input_shapes] + + inputs = model.get_input(train=False) + if not isinstance(inputs, list): + inputs = [inputs] + input_dummy = [np.zeros(shape, dtype=np.float32) + for shape in input_shapes] + layers = model.layers + + elif model.__class__.__name__ == 'Graph': + # in this case input_shapes is a dictionary + inputs = [model.inputs[name].input + for name in model.input_order] + input_dummy = [np.zeros(input_shapes[name], dtype=np.float32) + for name in model.input_order] + layers = [model.nodes[c['name']] for c in model.node_config] + + print("input shapes : ", input_shapes) + for l in layers: + shape_f = theano.function(inputs, l.get_output(train=False).shape, + on_unused_input='ignore') + out_shape = tuple(shape_f(*input_dummy)) + config = l.get_config() + print('shape after %s: %s' % (config['name'], out_shape)) + + +from .generic_utils import get_from_module +def get_layer(identifier, kwargs=None): + return get_from_module(identifier, globals(), 'layer', instantiate=True, kwargs=kwargs) diff --git a/keras/utils/np_utils.py b/keras/utils/np_utils.py index c25165d88b3b..280ffa913019 100644 --- a/keras/utils/np_utils.py +++ b/keras/utils/np_utils.py @@ -4,6 +4,7 @@ from six.moves import range from six.moves import zip + def to_categorical(y, nb_classes=None): '''Convert class vector (integers from 0 to nb_classes) to binary class matrix, for use with categorical_crossentropy @@ -16,9 +17,10 @@ def to_categorical(y, nb_classes=None): Y[i, y[i]] = 1. return Y + def normalize(a, axis=-1, order=2): l2 = np.atleast_1d(np.linalg.norm(a, order, axis)) - l2[l2==0] = 1 + l2[l2 == 0] = 1 return a / np.expand_dims(l2, axis) @@ -26,23 +28,27 @@ def binary_logloss(p, y): epsilon = 1e-15 p = sp.maximum(epsilon, p) p = sp.minimum(1-epsilon, p) - res = sum(y*sp.log(p) + sp.subtract(1,y)*sp.log(sp.subtract(1,p))) + res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p))) res *= -1.0/len(y) return res + def multiclass_logloss(P, Y): score = 0. npreds = [P[i][Y[i]-1] for i in range(len(Y))] - score = -(1./len(Y)) * np.sum(np.log(npreds)) + score = -(1. / len(Y)) * np.sum(np.log(npreds)) return score + def accuracy(p, y): - return np.mean([a==b for a, b in zip(p, y)]) + return np.mean([a == b for a, b in zip(p, y)]) + def probas_to_classes(y_pred): if len(y_pred.shape) > 1 and y_pred.shape[1] > 1: return categorical_probas_to_classes(y_pred) return np.array([1 if p > 0.5 else 0 for p in y_pred]) + def categorical_probas_to_classes(p): return np.argmax(p, axis=1) diff --git a/keras/utils/test_utils.py b/keras/utils/test_utils.py new file mode 100644 index 000000000000..a290c8d830f4 --- /dev/null +++ b/keras/utils/test_utils.py @@ -0,0 +1,27 @@ +import numpy as np + + +def get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_shape=(2,), + classification=True, nb_class=2): + ''' + classification=True overrides output_shape + (i.e. output_shape is set to (1,)) and the output + consists in integers in [0, nb_class-1]. + + Otherwise: float output with shape output_shape. + ''' + nb_sample = nb_train + nb_test + if classification: + y = np.random.randint(0, nb_class, size=(nb_sample, 1)) + X = np.zeros((nb_sample,) + input_shape) + for i in range(nb_sample): + X[i] = np.random.normal(loc=y[i], scale=1.0, size=input_shape) + else: + y_loc = np.random.random((nb_sample,)) + X = np.zeros((nb_sample,) + input_shape) + y = np.zeros((nb_sample,) + output_shape) + for i in range(nb_sample): + X[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=input_shape) + y[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=output_shape) + + return (X[:nb_train], y[:nb_train]), (X[nb_train:], y[nb_train:]) diff --git a/keras/utils/theano_utils.py b/keras/utils/theano_utils.py index 5044f34cf930..f9dbe8262986 100644 --- a/keras/utils/theano_utils.py +++ b/keras/utils/theano_utils.py @@ -3,20 +3,36 @@ import theano import theano.tensor as T + def floatX(X): return np.asarray(X, dtype=theano.config.floatX) + def sharedX(X, dtype=theano.config.floatX, name=None): return theano.shared(np.asarray(X, dtype=dtype), name=name) + def shared_zeros(shape, dtype=theano.config.floatX, name=None): return sharedX(np.zeros(shape), dtype=dtype, name=name) + def shared_scalar(val=0., dtype=theano.config.floatX, name=None): return theano.shared(np.cast[dtype](val)) + def shared_ones(shape, dtype=theano.config.floatX, name=None): return sharedX(np.ones(shape), dtype=dtype, name=name) + def alloc_zeros_matrix(*dims): return T.alloc(np.cast[theano.config.floatX](0.), *dims) + + +def ndim_tensor(ndim): + if ndim == 2: + return T.matrix() + elif ndim == 3: + return T.tensor3() + elif ndim == 4: + return T.tensor4() + return T.matrix() diff --git a/setup.py b/setup.py index 55306f208a37..673540762b08 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,14 @@ from setuptools import setup from setuptools import find_packages -setup(name = 'Keras', - version = '0.1.1', - description = 'Theano-based Deep Learning library', - author = 'Francois Chollet', - author_email = 'francois.chollet@gmail.com', - url = 'https://github.com/fchollet/keras', - download_url = 'https://github.com/fchollet/keras/tarball/0.1.1', - license = 'MIT', - install_requires = ['theano'], - packages = find_packages(), -) \ No newline at end of file + +setup(name='Keras', + version='0.1.2', + description='Theano-based Deep Learning library', + author='Francois Chollet', + author_email='francois.chollet@gmail.com', + url='https://github.com/fchollet/keras', + download_url='https://github.com/fchollet/keras/tarball/0.1.2', + license='MIT', + install_requires=['theano', 'pyyaml', 'h5py'], + packages=find_packages()) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/auto/keras/layers/test_core.py b/tests/auto/keras/layers/test_core.py new file mode 100644 index 000000000000..e8034c55c53a --- /dev/null +++ b/tests/auto/keras/layers/test_core.py @@ -0,0 +1,162 @@ +import unittest +import numpy as np +from numpy.testing import assert_allclose +import theano + +from keras.layers import core + + +class TestLayerBase(unittest.TestCase): + def test_input_output(self): + nb_samples = 10 + input_dim = 5 + layer = core.Layer() + + # As long as there is no input, an error should be raised. + for train in [True, False]: + self.assertRaises(AttributeError, layer.get_input, train) + self.assertRaises(AttributeError, layer.get_output, train) + + # Once an input is provided, it should be reachable through the + # appropriate getters + input = np.ones((nb_samples, input_dim)) + layer.input = theano.shared(value=input) + for train in [True, False]: + assert_allclose(layer.get_input(train).eval(), input) + assert_allclose(layer.get_output(train).eval(), input) + + def test_connections(self): + nb_samples = 10 + input_dim = 5 + layer1 = core.Layer() + layer2 = core.Layer() + + input = np.ones((nb_samples, input_dim)) + layer1.input = theano.shared(value=input) + + # As long as there is no previous layer, an error should be raised. + for train in [True, False]: + self.assertRaises(AttributeError, layer2.get_input, train) + + # After connecting, input of layer1 should be passed through + layer2.set_previous(layer1) + for train in [True, False]: + assert_allclose(layer2.get_input(train).eval(), input) + assert_allclose(layer2.get_output(train).eval(), input) + + +class TestConfigParams(unittest.TestCase): + """ + Test the constructor, config and params functions of all layers in core. + """ + + def _runner(self, layer): + conf = layer.get_config() + assert (type(conf) == dict) + + param = layer.get_params() + # Typically a list or a tuple, but may be any iterable + assert hasattr(param, '__iter__') + + def test_base(self): + layer = core.Layer() + self._runner(layer) + + def test_masked(self): + layer = core.MaskedLayer() + self._runner(layer) + + def test_merge(self): + layer_1 = core.Layer() + layer_2 = core.Layer() + layer = core.Merge([layer_1, layer_2]) + self._runner(layer) + + def test_dropout(self): + layer = core.Dropout(0.5) + self._runner(layer) + + def test_activation(self): + layer = core.Activation('linear') + self._runner(layer) + + def test_reshape(self): + layer = core.Reshape(10, 10) + self._runner(layer) + + def test_flatten(self): + layer = core.Flatten() + self._runner(layer) + + def test_repeat_vector(self): + layer = core.RepeatVector(10) + self._runner(layer) + + def test_dense(self): + layer = core.Dense(10, 10) + self._runner(layer) + + def test_act_reg(self): + layer = core.ActivityRegularization(0.5, 0.5) + self._runner(layer) + + def test_time_dist_dense(self): + layer = core.TimeDistributedDense(10, 10) + self._runner(layer) + + def test_autoencoder(self): + layer_1 = core.Layer() + layer_2 = core.Layer() + + layer = core.AutoEncoder(layer_1, layer_2) + self._runner(layer) + + def test_maxout_dense(self): + layer = core.MaxoutDense(10, 10) + self._runner(layer) + + +class TestMasking(unittest.TestCase): + """Test the Masking class""" + + def test_sequences(self): + """Test masking sequences with zeroes as padding""" + # integer inputs, one per timestep, like embeddings + layer = core.Masking() + func = theano.function([layer.input], layer.get_output_mask()) + self.assertTrue(np.all( + # get mask for this input + func(np.array( + [[[1], [2], [3], [0]], + [[0], [4], [5], [0]]], dtype=np.int32)) == + # This is the expected output mask, one dimension less + np.array([[1, 1, 1, 0], [0, 1, 1, 0]]))) + + def test_non_zero(self): + """Test masking with non-zero mask value""" + layer = core.Masking(5) + func = theano.function([layer.input], layer.get_output_mask()) + self.assertTrue(np.all( + # get mask for this input, if not all the values are 5, shouldn't masked + func(np.array( + [[[1, 1], [2, 1], [3, 1], [5, 5]], + [[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) == + # This is the expected output mask, one dimension less + np.array([[1, 1, 1, 0], [1, 1, 1, 1]]))) + + def test_non_zero_output(self): + """Test output of masking layer with non-zero mask value""" + layer = core.Masking(5) + func = theano.function([layer.input], layer.get_output()) + self.assertTrue(np.all( + # get output for this input, replace padding with 0 + func(np.array( + [[[1, 1], [2, 1], [3, 1], [5, 5]], + [[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) == + # This is the expected output + np.array([[[1, 1], [2, 1], [3, 1], [0, 0]], + [[1, 5], [5, 0], [0, 0], [0, 0]]]))) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/auto/keras/layers/test_recurrent.py b/tests/auto/keras/layers/test_recurrent.py new file mode 100644 index 000000000000..bb2dfeee489a --- /dev/null +++ b/tests/auto/keras/layers/test_recurrent.py @@ -0,0 +1,59 @@ +import unittest +import numpy as np +import theano + +from keras.layers import recurrent + +nb_samples, timesteps, input_dim, output_dim = 3, 3, 10, 5 + + +def _runner(layer_class): + """ + All the recurrent layers share the same interface, so we can run through them with a single + function. + """ + for weights in [None, [np.ones((input_dim, output_dim))]]: + for ret_seq in [True, False]: + layer = layer_class(input_dim, output_dim, return_sequences=ret_seq, weights=weights) + layer.input = theano.shared(value=np.ones((nb_samples, timesteps, input_dim))) + config = layer.get_config() + + for train in [True, False]: + out = layer.get_output(train).eval() + # Make sure the output has the desired shape + if ret_seq: + assert(out.shape == (nb_samples, timesteps, output_dim)) + else: + assert(out.shape == (nb_samples, output_dim)) + + mask = layer.get_output_mask(train) + + +class TestRNNS(unittest.TestCase): + """ + Test all the RNNs using a generic test runner function defined above. + """ + def test_simple(self): + _runner(recurrent.SimpleRNN) + + def test_simple_deep(self): + _runner(recurrent.SimpleDeepRNN) + + def test_gru(self): + _runner(recurrent.GRU) + + def test_lstm(self): + _runner(recurrent.LSTM) + + def test_jzs1(self): + _runner(recurrent.JZS1) + + def test_jzs2(self): + _runner(recurrent.JZS2) + + def test_jzs3(self): + _runner(recurrent.JZS3) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/auto/keras/test_activations.py b/tests/auto/keras/test_activations.py new file mode 100644 index 000000000000..60d908d8adc1 --- /dev/null +++ b/tests/auto/keras/test_activations.py @@ -0,0 +1,104 @@ +import math + +import keras +import theano +import theano.tensor as T + +import numpy + +def list_assert_equal(a, b, round_to=7): + ''' + This will do a pairwise, rounded equality test across two lists of + numbers. + ''' + pairs = zip(a, b) + for i, j in pairs: + assert round(i, round_to) == round(j, round_to) + +def get_standard_values(): + ''' + These are just a set of floats used for testing the activation + functions, and are useful in multiple tests. + ''' + + return [0,0.1,0.5,0.9,1.0] + +def test_softmax(): + + from keras.activations import softmax as s + + # Test using a reference implementation of softmax + def softmax(values): + m = max(values) + values = numpy.array(values) + e = numpy.exp(values - m) + dist = list(e / numpy.sum(e)) + + return dist + + x = T.vector() + exp = s(x) + f = theano.function([x], exp) + test_values=get_standard_values() + + result = f(test_values) + expected = softmax(test_values) + + print(str(result)) + print(str(expected)) + + list_assert_equal(result, expected) + +def test_relu(): + ''' + Relu implementation doesn't depend on the value being + a theano variable. Testing ints, floats and theano tensors. + ''' + + from keras.activations import relu as r + + assert r(5) == 5 + assert r(-5) == 0 + assert r(-0.1) == 0 + assert r(0.1) == 0.1 + + x = T.vector() + exp = r(x) + f = theano.function([x], exp) + + test_values = get_standard_values() + result = f(test_values) + + list_assert_equal(result, test_values) # because no negatives in test values + + +def test_tanh(): + + from keras.activations import tanh as t + test_values = get_standard_values() + + x = T.vector() + exp = t(x) + f = theano.function([x], exp) + + result = f(test_values) + expected = [math.tanh(v) for v in test_values] + + print(result) + print(expected) + + list_assert_equal(result, expected) + + +def test_linear(): + ''' + This function does no input validation, it just returns the thing + that was passed in. + ''' + + from keras.activations import linear as l + + xs = [1, 5, True, None, 'foo'] + + for x in xs: + assert x == l(x) diff --git a/tests/auto/keras/test_constraints.py b/tests/auto/keras/test_constraints.py new file mode 100644 index 000000000000..dbfba0f73967 --- /dev/null +++ b/tests/auto/keras/test_constraints.py @@ -0,0 +1,69 @@ +import unittest +import numpy as np +from numpy.testing import assert_allclose +from theano import tensor as T + + +class TestConstraints(unittest.TestCase): + def setUp(self): + self.some_values = [0.1, 0.5, 3, 8, 1e-7] + np.random.seed(3537) + self.example_array = np.random.random((100, 100)) * 100. - 50. + self.example_array[0, 0] = 0. # 0 could possibly cause trouble + + def test_maxnorm(self): + from keras.constraints import maxnorm + + for m in self.some_values: + norm_instance = maxnorm(m) + normed = norm_instance(self.example_array) + assert (np.all(normed.eval() < m)) + + # a more explicit example + norm_instance = maxnorm(2.0) + x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T + x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0], [2.0, 0, 0], [2./np.sqrt(3), 2./np.sqrt(3), 2./np.sqrt(3)]]).T + x_normed_actual = norm_instance(x).eval() + assert_allclose(x_normed_actual, x_normed_target) + + def test_nonneg(self): + from keras.constraints import nonneg + + nonneg_instance = nonneg() + + normed = nonneg_instance(self.example_array) + assert (np.all(np.min(normed.eval(), axis=1) == 0.)) + + def test_identity(self): + from keras.constraints import identity + + identity_instance = identity() + + normed = identity_instance(self.example_array) + assert (np.all(normed == self.example_array)) + + def test_identity_oddballs(self): + """ + test the identity constraint on some more exotic input. + this does not need to pass for the desired real life behaviour, + but it should in the current implementation. + """ + from keras.constraints import identity + identity_instance = identity() + + oddball_examples = ["Hello", [1], -1, None] + assert(oddball_examples == identity_instance(oddball_examples)) + + def test_unitnorm(self): + from keras.constraints import unitnorm + unitnorm_instance = unitnorm() + + normalized = unitnorm_instance(self.example_array) + + norm_of_normalized = np.sqrt(np.sum(normalized.eval()**2, axis=1)) + difference = norm_of_normalized - 1. #in the unit norm constraint, it should be equal to 1. + largest_difference = np.max(np.abs(difference)) + self.assertAlmostEqual(largest_difference, 0.) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/auto/keras/test_normalization.py b/tests/auto/keras/test_normalization.py new file mode 100644 index 000000000000..b84a2dcf24a5 --- /dev/null +++ b/tests/auto/keras/test_normalization.py @@ -0,0 +1,99 @@ +import unittest +import numpy as np +from numpy.testing import assert_allclose +from theano import tensor as T +from keras.layers import normalization +from keras.models import Sequential + + +class TestBatchNormalization(unittest.TestCase): + def setUp(self): + self.input_1 = np.arange(10) + self.input_2 = np.zeros(10) + self.input_3 = np.ones((10)) + + self.input_shapes = [np.ones((10, 10)), np.ones((10, 10, 10))] + + def test_setup(self): + norm_m0 = normalization.BatchNormalization((10, 10)) + norm_m1 = normalization.BatchNormalization((10, 10), mode=1) + + # mode 3 does not exist + self.assertRaises(Exception, normalization.BatchNormalization((10, 10), mode=3)) + + def test_mode_0(self): + model = Sequential() + norm_m0 = normalization.BatchNormalization((10,)) + model.add(norm_m0) + model.compile(loss='mse', optimizer='sgd') + + # centered on 5.0, variance 10.0 + X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10)) + model.fit(X, X, nb_epoch=5, verbose=0) + norm_m0.input = X + out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma + + self.assertAlmostEqual(out.mean().eval(), 0.0, places=1) + self.assertAlmostEqual(out.std().eval(), 1.0, places=1) + + def test_mode_1(self): + norm_m1 = normalization.BatchNormalization((10,), mode=1) + norm_m1.init_updates() + + for inp in [self.input_1, self.input_2, self.input_3]: + norm_m1.input = inp + out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma + self.assertAlmostEqual(out.mean().eval(), 0.0) + if inp.std() > 0.: + self.assertAlmostEqual(out.std().eval(), 1.0, places=2) + else: + self.assertAlmostEqual(out.std().eval(), 0.0, places=2) + + def test_shapes(self): + """ + Test batch normalization with various input shapes + """ + for inp in self.input_shapes: + norm_m0 = normalization.BatchNormalization(inp.shape, mode=0) + norm_m0.init_updates() + norm_m0.input = inp + out = (norm_m0.get_output(train=True) - norm_m0.beta) / norm_m0.gamma + + norm_m1 = normalization.BatchNormalization(inp.shape, mode=1) + norm_m1.input = inp + out = (norm_m1.get_output(train=True) - norm_m1.beta) / norm_m1.gamma + + def test_weight_init(self): + """ + Test weight initialization + """ + + norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10), np.ones(10)]) + norm_m1.init_updates() + + for inp in [self.input_1, self.input_2, self.input_3]: + norm_m1.input = inp + out = (norm_m1.get_output(train=True) - np.ones(10)) / 1. + self.assertAlmostEqual(out.mean().eval(), 0.0) + if inp.std() > 0.: + self.assertAlmostEqual(out.std().eval(), 1.0, places=2) + else: + self.assertAlmostEqual(out.std().eval(), 0.0, places=2) + + assert_allclose(norm_m1.gamma.eval(), np.ones(10)) + assert_allclose(norm_m1.beta.eval(), np.ones(10)) + + # Weights must be an iterable of gamma AND beta. + self.assertRaises(Exception, normalization.BatchNormalization((10,)), weights=np.ones(10)) + + def test_config(self): + norm = normalization.BatchNormalization((10, 10), mode=1, epsilon=0.1) + conf = norm.get_config() + conf_target = {"input_shape": (10, 10), "name": normalization.BatchNormalization.__name__, + "epsilon": 0.1, "mode": 1} + + self.assertDictEqual(conf, conf_target) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/auto/test_datasets.py b/tests/auto/test_datasets.py new file mode 100644 index 000000000000..9f1676fe71a1 --- /dev/null +++ b/tests/auto/test_datasets.py @@ -0,0 +1,48 @@ +from __future__ import print_function +import unittest +from keras.datasets import cifar10, cifar100, reuters, imdb, mnist + + +class TestDatasets(unittest.TestCase): + def test_cifar(self): + print('cifar10') + (X_train, y_train), (X_test, y_test) = cifar10.load_data() + print(X_train.shape) + print(X_test.shape) + print(y_train.shape) + print(y_test.shape) + + print('cifar100 fine') + (X_train, y_train), (X_test, y_test) = cifar100.load_data('fine') + print(X_train.shape) + print(X_test.shape) + print(y_train.shape) + print(y_test.shape) + + print('cifar100 coarse') + (X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse') + print(X_train.shape) + print(X_test.shape) + print(y_train.shape) + print(y_test.shape) + + def test_reuters(self): + print('reuters') + (X_train, y_train), (X_test, y_test) = reuters.load_data() + + def test_mnist(self): + print('mnist') + (X_train, y_train), (X_test, y_test) = mnist.load_data() + print(X_train.shape) + print(X_test.shape) + print(y_train.shape) + print(y_test.shape) + + def test_imdb(self): + print('imdb') + (X_train, y_train), (X_test, y_test) = imdb.load_data() + + +if __name__ == '__main__': + print('Test datasets') + unittest.main() diff --git a/tests/auto/test_embeddings.py b/tests/auto/test_embeddings.py index 7b8308eeb192..d193d850b49c 100644 --- a/tests/auto/test_embeddings.py +++ b/tests/auto/test_embeddings.py @@ -6,22 +6,22 @@ from theano import function from keras.constraints import unitnorm -class TestConcatenation(unittest.TestCase): +class TestEmbedding(unittest.TestCase): def setUp(self): self.X1 = np.array([[1], [2]], dtype='int32') self.W1 = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype='float32') def test_unitnorm_constraint(self): lookup = Sequential() - lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm)) + lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm())) lookup.add(Flatten()) lookup.add(Dense(2, 1)) lookup.add(Activation('sigmoid')) lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary') - lookup.train(self.X1, np.array([[1], [0]], dtype='int32')) + lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32')) norm = np.linalg.norm(lookup.params[0].get_value(), axis=1) self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32'))) if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/auto/test_graph_model.py b/tests/auto/test_graph_model.py new file mode 100644 index 000000000000..a7fce2e7a7bf --- /dev/null +++ b/tests/auto/test_graph_model.py @@ -0,0 +1,213 @@ +from __future__ import print_function +import unittest +import numpy as np +np.random.seed(1337) + +from keras.models import Graph, Sequential +from keras.layers import containers +from keras.layers.core import Dense, Activation +from keras.utils.test_utils import get_test_data + +X = np.random.random((100, 32)) +X2 = np.random.random((100, 32)) +y = np.random.random((100, 4)) +y2 = np.random.random((100,)) + +(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,), + classification=False, output_shape=(4,)) +(X2_train, y2_train), (X2_test, y2_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,), + classification=False, output_shape=(1,)) + + +class TestGraph(unittest.TestCase): + def test_1o_1i(self): + print('test a non-sequential graph with 1 input and 1 output') + graph = Graph() + graph.add_input(name='input1', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 4), name='dense3', input='dense1') + + graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') + graph.compile('rmsprop', {'output1': 'mse'}) + + history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) + out = graph.predict({'input1': X_test}) + assert(type(out == dict)) + assert(len(out) == 1) + loss = graph.test_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.train_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.evaluate({'input1': X_test, 'output1': y_test}) + print(loss) + assert(loss < 2.5) + + def test_1o_1i_2(self): + print('test a more complex non-sequential graph with 1 input and 1 output') + graph = Graph() + graph.add_input(name='input1', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2-0', input='input1') + graph.add_node(Activation('relu'), name='dense2', input='dense2-0') + + graph.add_node(Dense(4, 16), name='dense3', input='dense2') + graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum') + + graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum') + graph.compile('rmsprop', {'output1': 'mse'}) + + history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) + out = graph.predict({'input1': X_train}) + assert(type(out == dict)) + assert(len(out) == 1) + loss = graph.test_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.train_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.evaluate({'input1': X_test, 'output1': y_test}) + print(loss) + assert(loss < 2.5) + graph.get_config(verbose=1) + + def test_1o_2i(self): + print('test a non-sequential graph with 2 inputs and 1 output') + graph = Graph() + graph.add_input(name='input1', ndim=2) + graph.add_input(name='input2', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input2') + graph.add_node(Dense(16, 4), name='dense3', input='dense1') + + graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') + graph.compile('rmsprop', {'output1': 'mse'}) + + history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10) + out = graph.predict({'input1': X_test, 'input2': X2_test}) + assert(type(out == dict)) + assert(len(out) == 1) + loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test}) + loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test}) + loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test}) + print(loss) + assert(loss < 3.0) + graph.get_config(verbose=1) + + def test_2o_1i_weights(self): + print('test a non-sequential graph with 1 input and 2 outputs') + graph = Graph() + graph.add_input(name='input1', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 1), name='dense3', input='dense1') + + graph.add_output(name='output1', input='dense2') + graph.add_output(name='output2', input='dense3') + graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'}) + + history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10) + out = graph.predict({'input1': X_test}) + assert(type(out == dict)) + assert(len(out) == 2) + loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test}) + loss = graph.train_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test}) + loss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test}) + print(loss) + assert(loss < 4.) + + print('test weight saving') + graph.save_weights('temp.h5', overwrite=True) + graph = Graph() + graph.add_input(name='input1', ndim=2) + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 1), name='dense3', input='dense1') + graph.add_output(name='output1', input='dense2') + graph.add_output(name='output2', input='dense3') + graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'}) + graph.load_weights('temp.h5') + nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test}) + print(nloss) + assert(loss == nloss) + + def test_2o_1i_sample_weights(self): + print('test a non-sequential graph with 1 input and 2 outputs with sample weights') + graph = Graph() + graph.add_input(name='input1', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 1), name='dense3', input='dense1') + + graph.add_output(name='output1', input='dense2') + graph.add_output(name='output2', input='dense3') + + weights1 = np.random.uniform(size=y_train.shape[0]) + weights2 = np.random.uniform(size=y2_train.shape[0]) + + graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'}) + + history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10, + sample_weight={'output1': weights1, 'output2': weights2}) + out = graph.predict({'input1': X_test}) + assert(type(out == dict)) + assert(len(out) == 2) + loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test}, + sample_weight={'output1': weights1, 'output2': weights2}) + loss = graph.train_on_batch({'input1': X_train, 'output1': y_train, 'output2': y2_train}, + sample_weight={'output1': weights1, 'output2': weights2}) + loss = graph.evaluate({'input1': X_train, 'output1': y_train, 'output2': y2_train}, + sample_weight={'output1': weights1, 'output2': weights2}) + print(loss) + + def test_recursive(self): + print('test layer-like API') + + graph = containers.Graph() + graph.add_input(name='input1', ndim=2) + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 4), name='dense3', input='dense1') + graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') + + seq = Sequential() + seq.add(Dense(32, 32, name='first_seq_dense')) + seq.add(graph) + seq.add(Dense(4, 4, name='last_seq_dense')) + + seq.compile('rmsprop', 'mse') + + history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10) + loss = seq.evaluate(X_test, y_test) + print(loss) + assert(loss < 2.5) + + loss = seq.evaluate(X_test, y_test, show_accuracy=True) + pred = seq.predict(X_test) + seq.get_config(verbose=1) + + def test_create_output(self): + print('test create_output argument') + graph = Graph() + graph.add_input(name='input1', ndim=2) + + graph.add_node(Dense(32, 16), name='dense1', input='input1') + graph.add_node(Dense(32, 4), name='dense2', input='input1') + graph.add_node(Dense(16, 4), name='dense3', input='dense1') + graph.add_node(Dense(4, 4), name='output1', inputs=['dense2', 'dense3'], merge_mode='sum', create_output=True) + graph.compile('rmsprop', {'output1': 'mse'}) + + history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) + out = graph.predict({'input1': X_test}) + assert(type(out == dict)) + assert(len(out) == 1) + loss = graph.test_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.train_on_batch({'input1': X_test, 'output1': y_test}) + loss = graph.evaluate({'input1': X_test, 'output1': y_test}) + print(loss) + assert(loss < 2.5) + + +if __name__ == '__main__': + print('Test graph model') + unittest.main() diff --git a/tests/auto/test_loss_weighting.py b/tests/auto/test_loss_weighting.py new file mode 100644 index 000000000000..01867730dcbf --- /dev/null +++ b/tests/auto/test_loss_weighting.py @@ -0,0 +1,129 @@ +from __future__ import absolute_import +from __future__ import print_function + +import numpy as np +np.random.seed(1336) # for reproducibility + +from keras.datasets import mnist +from keras.models import Sequential, Graph +from keras.layers.core import Dense, Activation +from keras.utils import np_utils +import unittest + +nb_classes = 10 +batch_size = 128 +nb_epoch = 5 +weighted_class = 9 +standard_weight = 1 +high_weight = 5 +max_train_samples = 5000 +max_test_samples = 1000 + +# the data, shuffled and split between tran and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() +X_train = X_train.reshape(60000, 784)[:max_train_samples] +X_test = X_test.reshape(10000, 784)[:max_test_samples] +X_train = X_train.astype("float32") / 255 +X_test = X_test.astype("float32") / 255 + +# convert class vectors to binary class matrices +y_train = y_train[:max_train_samples] +y_test = y_test[:max_test_samples] +Y_train = np_utils.to_categorical(y_train, nb_classes) +Y_test = np_utils.to_categorical(y_test, nb_classes) +test_ids = np.where(y_test == np.array(weighted_class))[0] + +class_weight = dict([(i, standard_weight) for i in range(nb_classes)]) +class_weight[weighted_class] = high_weight + +sample_weight = np.ones((y_train.shape[0])) * standard_weight +sample_weight[y_train == weighted_class] = high_weight + + +def create_sequential_model(): + model = Sequential() + model.add(Dense(784, 50)) + model.add(Activation('relu')) + model.add(Dense(50, 10)) + model.add(Activation('softmax')) + return model + + +def create_graph_model(): + model = Graph() + model.add_input(name='input') + model.add_node(Dense(784, 50, activation='relu'), name='d1', input='input') + model.add_node(Dense(50, 10, activation='softmax'), name='d2', input='d1') + model.add_output(name='output', input='d2') + return model + + +def _test_weights_sequential(model, class_weight=None, sample_weight=None): + model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, + class_weight=class_weight, sample_weight=sample_weight) + model.train_on_batch(X_train[:32], Y_train[:32], + class_weight=class_weight, sample_weight=sample_weight[:32] if sample_weight is not None else None) + model.test_on_batch(X_train[:32], Y_train[:32], + sample_weight=sample_weight[:32] if sample_weight is not None else None) + score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) + return score + + +def _test_weights_graph(model, class_weight=None, sample_weight=None): + model.fit({'input': X_train, 'output': Y_train}, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, + class_weight={'output': class_weight}, sample_weight={'output': sample_weight}) + model.train_on_batch({'input': X_train[:32], 'output': Y_train[:32]}, + class_weight={'output': class_weight}, sample_weight={'output': sample_weight[:32] if sample_weight is not None else None}) + model.test_on_batch({'input': X_train[:32], 'output': Y_train[:32]}, + sample_weight={'output': sample_weight[:32] if sample_weight is not None else None}) + score = model.evaluate({'input': X_test[test_ids, :], 'output': Y_test[test_ids, :]}, verbose=0) + return score + + +class TestLossWeighting(unittest.TestCase): + def test_sequential(self): + for loss in ['mae', 'mse', 'categorical_crossentropy']: + print('loss:', loss) + print('sequential') + # no weights: reference point + model = create_sequential_model() + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + standard_score = _test_weights_sequential(model) + # test class_weight + model = create_sequential_model() + model.compile(loss=loss, optimizer='rmsprop') + score = _test_weights_sequential(model, class_weight=class_weight) + print('score:', score, ' vs.', standard_score) + self.assertTrue(score < standard_score) + # test sample_weight + model = create_sequential_model() + model.compile(loss=loss, optimizer='rmsprop') + score = _test_weights_sequential(model, sample_weight=sample_weight) + print('score:', score, ' vs.', standard_score) + self.assertTrue(score < standard_score) + + def test_graph(self): + for loss in ['mae', 'mse', 'categorical_crossentropy']: + print('loss:', loss) + print('graph') + # no weights: reference point + model = create_graph_model() + model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop') + standard_score = _test_weights_graph(model) + # test class_weight + model = create_graph_model() + model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop') + score = _test_weights_graph(model, class_weight=class_weight) + print('score:', score, ' vs.', standard_score) + self.assertTrue(score < standard_score) + # test sample_weight + model = create_graph_model() + model.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop') + score = _test_weights_graph(model, sample_weight=sample_weight) + print('score:', score, ' vs.', standard_score) + self.assertTrue(score < standard_score) + + +if __name__ == '__main__': + print('Test class_weight and sample_weight') + unittest.main() diff --git a/tests/auto/test_lossweights.py b/tests/auto/test_lossweights.py deleted file mode 100644 index caff3ddb5211..000000000000 --- a/tests/auto/test_lossweights.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import absolute_import -from __future__ import print_function -from keras.datasets import mnist -from keras.models import Sequential -from keras.layers.core import Dense, Activation -from keras.utils import np_utils -import numpy as np -import unittest - -nb_classes = 10 -batch_size = 128 -nb_epoch = 5 -weighted_class = 9 -standard_weight = 1 -high_weight = 5 -max_train_samples = 5000 -max_test_samples = 1000 - -np.random.seed(1337) # for reproducibility - -# the data, shuffled and split between tran and test sets -(X_train, y_train), (X_test, y_test) = mnist.load_data() -X_train = X_train.reshape(60000, 784)[:max_train_samples] -X_test = X_test.reshape(10000, 784)[:max_test_samples] -X_train = X_train.astype("float32") / 255 -X_test = X_test.astype("float32") / 255 - -# convert class vectors to binary class matrices -y_train = y_train[:max_train_samples] -y_test = y_test[:max_test_samples] -Y_train = np_utils.to_categorical(y_train, nb_classes) -Y_test = np_utils.to_categorical(y_test, nb_classes) -test_ids = np.where(y_test == np.array(weighted_class))[0] - -def create_model(): - model = Sequential() - model.add(Dense(784, 50)) - model.add(Activation('relu')) - model.add(Dense(50, 10)) - model.add(Activation('softmax')) - return model - -def test_weights(model, class_weight=None, sample_weight=None): - model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, \ - class_weight=class_weight, sample_weight=sample_weight) - score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) - return score - -class TestConcatenation(unittest.TestCase): - - def test_loss_weighting(self): - class_weight = dict([(i, standard_weight) for i in range(nb_classes)]) - class_weight[weighted_class] = high_weight - - sample_weight = np.ones((y_train.shape[0])) * standard_weight - sample_weight[y_train == weighted_class] = high_weight - - for loss in ['mae', 'mse', 'categorical_crossentropy']: - print('loss:', loss) - # no weights: reference point - model = create_model() - model.compile(loss='categorical_crossentropy', optimizer='rmsprop') - standard_score = test_weights(model) - # test class_weight - model = create_model() - model.compile(loss=loss, optimizer='rmsprop') - score = test_weights(model, class_weight=class_weight) - print('score:', score, ' vs.', standard_score) - self.assertTrue(score < standard_score) - # test sample_weight - model = create_model() - model.compile(loss=loss, optimizer='rmsprop') - score = test_weights(model, sample_weight=sample_weight) - print('score:', score, ' vs.', standard_score) - self.assertTrue(score < standard_score) - -if __name__ == '__main__': - print('Test class_weight and sample_weight') - unittest.main() \ No newline at end of file diff --git a/tests/auto/test_optimizers.py b/tests/auto/test_optimizers.py new file mode 100644 index 000000000000..118a59595f5e --- /dev/null +++ b/tests/auto/test_optimizers.py @@ -0,0 +1,59 @@ +from __future__ import print_function +import numpy as np +np.random.seed(1337) + +from keras.utils.test_utils import get_test_data +from keras.optimizers import SGD, RMSprop, Adagrad, Adadelta, Adam +from keras.models import Sequential +from keras.layers.core import Dense, Activation +from keras.utils.np_utils import to_categorical +import unittest + + +(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), + classification=True, nb_class=2) +y_train = to_categorical(y_train) +y_test = to_categorical(y_test) + + +def get_model(input_dim, nb_hidden, output_dim): + model = Sequential() + model.add(Dense(input_dim, nb_hidden)) + model.add(Activation('relu')) + model.add(Dense(nb_hidden, output_dim)) + model.add(Activation('softmax')) + return model + + +def _test_optimizer(optimizer, target=0.9): + model = get_model(X_train.shape[1], 10, y_train.shape[1]) + model.compile(loss='categorical_crossentropy', optimizer=optimizer) + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) + return history.history['val_acc'][-1] > target + + +class TestOptimizers(unittest.TestCase): + def test_sgd(self): + print('test SGD') + sgd = SGD(lr=0.01, momentum=0.9, nesterov=True) + self.assertTrue(_test_optimizer(sgd)) + + def test_rmsprop(self): + print('test RMSprop') + self.assertTrue(_test_optimizer(RMSprop())) + + def test_adagrad(self): + print('test Adagrad') + self.assertTrue(_test_optimizer(Adagrad())) + + def test_adadelta(self): + print('test Adadelta') + self.assertTrue(_test_optimizer(Adadelta())) + + def test_adam(self): + print('test Adam') + self.assertTrue(_test_optimizer(Adam())) + +if __name__ == '__main__': + print('Test optimizers') + unittest.main() diff --git a/tests/auto/test_regularizers.py b/tests/auto/test_regularizers.py new file mode 100644 index 000000000000..b4e1374556a2 --- /dev/null +++ b/tests/auto/test_regularizers.py @@ -0,0 +1,62 @@ +import unittest +import numpy as np +np.random.seed(1337) # for reproducibility + +from keras.models import Sequential +from keras.layers.core import Merge, Dense, Activation, Flatten, ActivityRegularization +from keras.layers.embeddings import Embedding +from keras.datasets import mnist +from keras.utils import np_utils +from keras import regularizers + +nb_classes = 10 +batch_size = 128 +nb_epoch = 5 +weighted_class = 9 +standard_weight = 1 +high_weight = 5 +max_train_samples = 5000 +max_test_samples = 1000 + +# the data, shuffled and split between tran and test sets +(X_train, y_train), (X_test, y_test) = mnist.load_data() +X_train = X_train.reshape(60000, 784)[:max_train_samples] +X_test = X_test.reshape(10000, 784)[:max_test_samples] +X_train = X_train.astype("float32") / 255 +X_test = X_test.astype("float32") / 255 + +# convert class vectors to binary class matrices +y_train = y_train[:max_train_samples] +y_test = y_test[:max_test_samples] +Y_train = np_utils.to_categorical(y_train, nb_classes) +Y_test = np_utils.to_categorical(y_test, nb_classes) +test_ids = np.where(y_test == np.array(weighted_class))[0] + + +def create_model(weight_reg=None, activity_reg=None): + model = Sequential() + model.add(Dense(784, 50)) + model.add(Activation('relu')) + model.add(Dense(50, 10, W_regularizer=weight_reg, activity_regularizer=activity_reg)) + model.add(Activation('softmax')) + return model + + +class TestRegularizers(unittest.TestCase): + def test_W_reg(self): + for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]: + model = create_model(weight_reg=reg) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) + + def test_A_reg(self): + for reg in [regularizers.activity_l1(), regularizers.activity_l2()]: + model = create_model(activity_reg=reg) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0) + +if __name__ == '__main__': + print('Test weight and activity regularizers') + unittest.main() diff --git a/tests/auto/test_sequential_model.py b/tests/auto/test_sequential_model.py new file mode 100644 index 000000000000..c66346cc43e4 --- /dev/null +++ b/tests/auto/test_sequential_model.py @@ -0,0 +1,274 @@ +from __future__ import absolute_import +from __future__ import print_function +import unittest +import numpy as np +np.random.seed(1337) + +from keras.models import Sequential +from keras.layers.core import Dense, Activation, Merge +from keras.utils import np_utils +from keras.utils.test_utils import get_test_data + +input_dim = 32 +nb_hidden = 16 +nb_class = 4 +batch_size = 64 +nb_epoch = 1 + +train_samples = 5000 +test_samples = 1000 + +(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples, nb_test=test_samples, input_shape=(input_dim,), + classification=True, nb_class=4) +y_test = np_utils.to_categorical(y_test) +y_train = np_utils.to_categorical(y_train) +print(X_train.shape) +print(y_train.shape) + + +class TestSequential(unittest.TestCase): + def test_sequential(self): + print('Test sequential') + model = Sequential() + model.add(Dense(input_dim, nb_hidden)) + model.add(Activation('relu')) + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test)) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test)) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False) + + model.train_on_batch(X_train[:32], y_train[:32]) + + loss = model.evaluate(X_train, y_train, verbose=0) + print('loss:', loss) + if loss > 0.6: + raise Exception('Score too low, learning issue.') + preds = model.predict(X_test, verbose=0) + classes = model.predict_classes(X_test, verbose=0) + probas = model.predict_proba(X_test, verbose=0) + print(model.get_config(verbose=1)) + + print('test weight saving') + model.save_weights('temp.h5', overwrite=True) + model = Sequential() + model.add(Dense(input_dim, nb_hidden)) + model.add(Activation('relu')) + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + model.load_weights('temp.h5') + + nloss = model.evaluate(X_train, y_train, verbose=0) + print(nloss) + assert(loss == nloss) + + def test_merge_sum(self): + print('Test merge: sum') + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + + right = Sequential() + right.add(Dense(input_dim, nb_hidden)) + right.add(Activation('relu')) + + model = Sequential() + model.add(Merge([left, right], mode='sum')) + + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test)) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test)) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) + + loss = model.evaluate([X_train, X_train], y_train, verbose=0) + print('loss:', loss) + if loss > 0.7: + raise Exception('Score too low, learning issue.') + preds = model.predict([X_test, X_test], verbose=0) + classes = model.predict_classes([X_test, X_test], verbose=0) + probas = model.predict_proba([X_test, X_test], verbose=0) + print(model.get_config(verbose=1)) + + print('test weight saving') + model.save_weights('temp.h5', overwrite=True) + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + right = Sequential() + right.add(Dense(input_dim, nb_hidden)) + right.add(Activation('relu')) + model = Sequential() + model.add(Merge([left, right], mode='sum')) + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + model.load_weights('temp.h5') + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + nloss = model.evaluate([X_train, X_train], y_train, verbose=0) + print(nloss) + assert(loss == nloss) + + def test_merge_concat(self): + print('Test merge: concat') + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + + right = Sequential() + right.add(Dense(input_dim, nb_hidden)) + right.add(Activation('relu')) + + model = Sequential() + model.add(Merge([left, right], mode='concat')) + + model.add(Dense(nb_hidden * 2, nb_class)) + model.add(Activation('softmax')) + + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test)) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test)) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) + + loss = model.evaluate([X_train, X_train], y_train, verbose=0) + print('loss:', loss) + if loss > 0.6: + raise Exception('Score too low, learning issue.') + preds = model.predict([X_test, X_test], verbose=0) + classes = model.predict_classes([X_test, X_test], verbose=0) + probas = model.predict_proba([X_test, X_test], verbose=0) + print(model.get_config(verbose=1)) + + print('test weight saving') + model.save_weights('temp.h5', overwrite=True) + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + + right = Sequential() + right.add(Dense(input_dim, nb_hidden)) + right.add(Activation('relu')) + + model = Sequential() + model.add(Merge([left, right], mode='concat')) + + model.add(Dense(nb_hidden * 2, nb_class)) + model.add(Activation('softmax')) + + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + model.load_weights('temp.h5') + + nloss = model.evaluate([X_train, X_train], y_train, verbose=0) + print(nloss) + assert(loss == nloss) + + def test_merge_recursivity(self): + print('Test merge recursivity') + + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + + right = Sequential() + right.add(Dense(input_dim, nb_hidden)) + right.add(Activation('relu')) + + righter = Sequential() + righter.add(Dense(input_dim, nb_hidden)) + righter.add(Activation('relu')) + + intermediate = Sequential() + intermediate.add(Merge([left, right], mode='sum')) + intermediate.add(Dense(nb_hidden, nb_hidden)) + intermediate.add(Activation('relu')) + + model = Sequential() + model.add(Merge([intermediate, righter], mode='sum')) + + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test)) + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test)) + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) + + loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0) + print('loss:', loss) + if loss > 0.6: + raise Exception('Score too low, learning issue.') + preds = model.predict([X_test, X_test, X_test], verbose=0) + classes = model.predict_classes([X_test, X_test, X_test], verbose=0) + probas = model.predict_proba([X_test, X_test, X_test], verbose=0) + print(model.get_config(verbose=1)) + + model.save_weights('temp.h5', overwrite=True) + model.load_weights('temp.h5') + + nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0) + print(nloss) + assert(loss == nloss) + + def test_merge_overlap(self): + print('Test merge overlap') + left = Sequential() + left.add(Dense(input_dim, nb_hidden)) + left.add(Activation('relu')) + + model = Sequential() + model.add(Merge([left, left], mode='sum')) + + model.add(Dense(nb_hidden, nb_class)) + model.add(Activation('softmax')) + + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test)) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test)) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) + model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False) + + model.train_on_batch(X_train[:32], y_train[:32]) + + loss = model.evaluate(X_train, y_train, verbose=0) + print('loss:', loss) + if loss > 0.6: + raise Exception('Score too low, learning issue.') + preds = model.predict(X_test, verbose=0) + classes = model.predict_classes(X_test, verbose=0) + probas = model.predict_proba(X_test, verbose=0) + print(model.get_config(verbose=1)) + + model.save_weights('temp.h5', overwrite=True) + model.load_weights('temp.h5') + + nloss = model.evaluate(X_train, y_train, verbose=0) + print(nloss) + assert(loss == nloss) + + +if __name__ == '__main__': + print('Test Sequential model') + unittest.main() diff --git a/tests/auto/test_tasks.py b/tests/auto/test_tasks.py new file mode 100644 index 000000000000..4159b1544c26 --- /dev/null +++ b/tests/auto/test_tasks.py @@ -0,0 +1,131 @@ +from __future__ import print_function +import numpy as np +np.random.seed(1337) + +from keras.utils.test_utils import get_test_data +from keras.models import Sequential +from keras.layers.core import Dense, Activation, TimeDistributedDense, Flatten +from keras.layers.recurrent import GRU +from keras.layers.convolutional import Convolution2D +from keras.utils.np_utils import to_categorical +import unittest + + +class TestRegularizers(unittest.TestCase): + def test_vector_clf(self): + nb_hidden = 10 + + print('vector classification data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), + classification=True, nb_class=2) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + y_train = to_categorical(y_train) + y_test = to_categorical(y_test) + + model = Sequential() + model.add(Dense(X_train.shape[-1], nb_hidden)) + model.add(Activation('relu')) + model.add(Dense(nb_hidden, y_train.shape[-1])) + model.add(Activation('softmax')) + model.compile(loss='categorical_crossentropy', optimizer='rmsprop') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) + print(history.history) + self.assertTrue(history.history['val_acc'][-1] > 0.9) + + def test_vector_reg(self): + nb_hidden = 10 + print('vector regression data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(10,), output_shape=(2,), + classification=False) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + model = Sequential() + model.add(Dense(X_train.shape[-1], nb_hidden)) + model.add(Activation('tanh')) + model.add(Dense(nb_hidden, y_train.shape[-1])) + model.compile(loss='hinge', optimizer='adagrad') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2) + self.assertTrue(history.history['val_loss'][-1] < 0.9) + + def test_temporal_clf(self): + print('temporal classification data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5,10), + classification=True, nb_class=2) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + y_train = to_categorical(y_train) + y_test = to_categorical(y_test) + + model = Sequential() + model.add(GRU(X_train.shape[-1], y_train.shape[-1])) + model.add(Activation('softmax')) + model.compile(loss='categorical_crossentropy', optimizer='adadelta') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) + self.assertTrue(history.history['val_acc'][-1] > 0.9) + + def test_temporal_reg(self): + print('temporal regression data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(2,), + classification=False) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + model = Sequential() + model.add(GRU(X_train.shape[-1], y_train.shape[-1])) + model.compile(loss='hinge', optimizer='adam') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2) + self.assertTrue(history.history['val_loss'][-1] < 0.8) + + def test_seq_to_seq(self): + print('sequence to sequence data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(5, 10), output_shape=(5, 10), + classification=False) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + model = Sequential() + model.add(TimeDistributedDense(X_train.shape[-1], y_train.shape[-1])) + model.compile(loss='hinge', optimizer='rmsprop') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), verbose=2) + self.assertTrue(history.history['val_loss'][-1] < 0.75) + + def test_img_clf(self): + print('image classification data:') + (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(3, 32, 32), + classification=True, nb_class=2) + print('X_train:', X_train.shape) + print('X_test:', X_test.shape) + print('y_train:', y_train.shape) + print('y_test:', y_test.shape) + + y_train = to_categorical(y_train) + y_test = to_categorical(y_test) + + model = Sequential() + model.add(Convolution2D(32, 3, 32, 32)) + model.add(Activation('sigmoid')) + model.add(Flatten()) + model.add(Dense(32, y_test.shape[-1])) + model.add(Activation('softmax')) + model.compile(loss='categorical_crossentropy', optimizer='sgd') + history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16, validation_data=(X_test, y_test), show_accuracy=True, verbose=2) + self.assertTrue(history.history['val_acc'][-1] > 0.9) + + +if __name__ == '__main__': + print('Test different types of classification and regression tasks') + unittest.main() diff --git a/tests/manual/__init__.py b/tests/manual/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/manual/check_autoencoder.py b/tests/manual/check_autoencoder.py index 21e2f183f694..0d221adea267 100644 --- a/tests/manual/check_autoencoder.py +++ b/tests/manual/check_autoencoder.py @@ -2,7 +2,7 @@ from __future__ import print_function from keras.datasets import mnist from keras.models import Sequential -from keras.layers.core import DenoisingAutoEncoder, AutoEncoder, Dense, Activation, TimeDistributedDense, Flatten +from keras.layers.core import AutoEncoder, Dense, Activation, TimeDistributedDense, Flatten from keras.layers.recurrent import LSTM from keras.layers.embeddings import Embedding from keras.layers.core import Layer @@ -58,97 +58,73 @@ ########################## def build_lstm_autoencoder(autoencoder, X_train, X_test): - X_train = X_train[:, np.newaxis, :] - X_test = X_test[:, np.newaxis, :] - print("Modified X_train: ", X_train.shape) - print("Modified X_test: ", X_test.shape) - - # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784 - autoencoder.add(TimeDistributedDense(input_dim, 16)) - autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True), - decoder=LSTM(8, input_dim, activation=activation, return_sequences=True), - output_reconstruction=False, tie_weights=True)) - return autoencoder, X_train, X_test + X_train = X_train[:, np.newaxis, :] + X_test = X_test[:, np.newaxis, :] + print("Modified X_train: ", X_train.shape) + print("Modified X_test: ", X_test.shape) -def build_deep_classical_autoencoder(autoencoder): - encoder = containers.Sequential([Dense(input_dim, hidden_dim, activation=activation), Dense(hidden_dim, hidden_dim/2, activation=activation)]) - decoder = containers.Sequential([Dense(hidden_dim/2, hidden_dim, activation=activation), Dense(hidden_dim, input_dim, activation=activation)]) - autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False, tie_weights=True)) - return autoencoder - -def build_denoising_autoencoder(autoencoder): - # You need another layer before a denoising autoencoder - # This is similar to the dropout layers, etc.. - autoencoder.add(Dense(input_dim, input_dim)) - autoencoder.add(DenoisingAutoEncoder(encoder=Dense(input_dim, hidden_dim, activation=activation), - decoder=Dense(hidden_dim, input_dim, activation=activation), - output_reconstruction=False, tie_weights=True, corruption_level=0.3)) - return autoencoder - -def build_deep_denoising_autoencoder(autoencoder): - encoder = containers.Sequential([Dense(input_dim, hidden_dim, activation=activation), Dense(hidden_dim, hidden_dim/2, activation=activation)]) - decoder = containers.Sequential([Dense(hidden_dim/2, hidden_dim, activation=activation), Dense(hidden_dim, input_dim, activation=activation)]) - autoencoder.add(Dense(input_dim, input_dim)) - autoencoder.add(DenoisingAutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False, tie_weights=True)) - return autoencoder + # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784 + autoencoder.add(TimeDistributedDense(input_dim, 16)) + autoencoder.add(AutoEncoder(encoder=LSTM(16, 8, activation=activation, return_sequences=True), + decoder=LSTM(8, input_dim, activation=activation, return_sequences=True), + output_reconstruction=False)) + return autoencoder, X_train, X_test +def build_deep_classical_autoencoder(autoencoder): + encoder = containers.Sequential([Dense(input_dim, hidden_dim, activation=activation), Dense(hidden_dim, hidden_dim/2, activation=activation)]) + decoder = containers.Sequential([Dense(hidden_dim/2, hidden_dim, activation=activation), Dense(hidden_dim, input_dim, activation=activation)]) + autoencoder.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=False)) + return autoencoder # Try different things here: 'lstm' or 'classical' or 'denoising' # or 'deep_denoising' -for autoencoder_type in ['classical', 'denoising', 'deep_denoising', 'lstm']: - print(autoencoder_type) - print('-'*40) - # Build our autoencoder model - autoencoder = Sequential() - if autoencoder_type == 'lstm': - print("Training LSTM AutoEncoder") - autoencoder, X_train, X_test = build_lstm_autoencoder(autoencoder, X_train, X_test) - elif autoencoder_type == 'denoising': - print("Training Denoising AutoEncoder") - autoencoder = build_denoising_autoencoder(autoencoder) - elif autoencoder_type == 'deep_denoising': - print ("Training Deep Denoising AutoEncoder") - autoencoder = build_deep_denoising_autoencoder(autoencoder) - elif autoencoder_type == 'classical': - print("Training Classical AutoEncoder") - autoencoder = build_deep_classical_autoencoder(autoencoder) - else: - print("Error: unknown autoencoder type!") - exit(-1) - - autoencoder.get_config(verbose=1) - autoencoder.compile(loss='mean_squared_error', optimizer='adam') - # Do NOT use validation data with return output_reconstruction=True - autoencoder.fit(X_train, X_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1) - - # Do an inference pass - prefilter_train = autoencoder.predict(X_train, verbose=0) - prefilter_test = autoencoder.predict(X_test, verbose=0) - print("prefilter_train: ", prefilter_train.shape) - print("prefilter_test: ", prefilter_test.shape) - - # Classify results from Autoencoder - print("Building classical fully connected layer for classification") - model = Sequential() - if autoencoder_type == 'lstm': - model.add(TimeDistributedDense(8, nb_classes, activation=activation)) - model.add(Flatten()) - elif autoencoder_type == 'classical': - model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation)) - else: - model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation)) - - model.add(Activation('softmax')) - - model.get_config(verbose=1) - model.compile(loss='categorical_crossentropy', optimizer='adam') - model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test)) - - score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True) - print('\nscore:', score) - - print('Loss change:', (score[0] - classical_score[0])/classical_score[0], '%') - print('Accuracy change:', (score[1] - classical_score[1])/classical_score[1], '%') - +for autoencoder_type in ['classical', 'lstm']: + print(autoencoder_type) + print('-'*40) + # Build our autoencoder model + autoencoder = Sequential() + if autoencoder_type == 'lstm': + print("Training LSTM AutoEncoder") + autoencoder, X_train, X_test = build_lstm_autoencoder(autoencoder, X_train, X_test) + elif autoencoder_type == 'classical': + print("Training Classical AutoEncoder") + autoencoder = build_deep_classical_autoencoder(autoencoder) + else: + print("Error: unknown autoencoder type!") + exit(-1) + + autoencoder.get_config(verbose=1) + autoencoder.compile(loss='mean_squared_error', optimizer='adam') + # Do NOT use validation data with return output_reconstruction=True + autoencoder.fit(X_train, X_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1) + + # Do an inference pass + prefilter_train = autoencoder.predict(X_train, verbose=0) + prefilter_test = autoencoder.predict(X_test, verbose=0) + print("prefilter_train: ", prefilter_train.shape) + print("prefilter_test: ", prefilter_test.shape) + + # Classify results from Autoencoder + print("Building classical fully connected layer for classification") + model = Sequential() + if autoencoder_type == 'lstm': + model.add(TimeDistributedDense(8, nb_classes, activation=activation)) + model.add(Flatten()) + elif autoencoder_type == 'classical': + model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation)) + else: + model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation)) + + model.add(Activation('softmax')) + + model.get_config(verbose=1) + model.compile(loss='categorical_crossentropy', optimizer='adam') + model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test)) + + score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True) + print('\nscore:', score) + + print('Loss change:', (score[0] - classical_score[0])/classical_score[0], '%') + print('Accuracy change:', (score[1] - classical_score[1])/classical_score[1], '%') diff --git a/tests/manual/check_constraints.py b/tests/manual/check_constraints.py index 6f08cfaa52f5..cb658bca55b2 100644 --- a/tests/manual/check_constraints.py +++ b/tests/manual/check_constraints.py @@ -35,7 +35,7 @@ model.add(Dense(784, 20, W_constraint=maxnorm(1))) model.add(Activation('relu')) model.add(Dropout(0.1)) -model.add(Dense(20, 20, W_constraint=nonneg)) +model.add(Dense(20, 20, W_constraint=nonneg())) model.add(Activation('relu')) model.add(Dropout(0.1)) model.add(Dense(20, 10, W_constraint=maxnorm(1))) diff --git a/tests/manual/check_dot_utils.py b/tests/manual/check_dot_utils.py deleted file mode 100644 index 357c89f39b78..000000000000 --- a/tests/manual/check_dot_utils.py +++ /dev/null @@ -1,27 +0,0 @@ -from keras.utils.dot_utils import Grapher - -from keras.models import Sequential -from keras.layers.core import Dense, Activation, Merge, Flatten -from keras.layers.embeddings import Embedding -from keras.layers.recurrent import GRU - -ent_lookup = Sequential() -ent_lookup.add(Embedding(10, 2)) -ent_lookup.add(Flatten()) - -rel_lookup = Sequential() -rel_lookup.add(Embedding(20, 2)) -rel_lookup.add(Flatten()) - -word_sequence = Sequential() -word_sequence.add(Embedding(10, 5)) -word_sequence.add(GRU(5, 2)) - -model = Sequential() -model.add(Merge([word_sequence, ent_lookup, rel_lookup], mode='concat')) -model.add(Activation('relu')) -model.add(Dense(6, 2)) -model.add(Activation('softmax')) - -g = Grapher() -g.plot(model, 'mymodel.png') \ No newline at end of file diff --git a/tests/manual/check_masked_recurrent.py b/tests/manual/check_masked_recurrent.py new file mode 100644 index 000000000000..6fab7a8d464f --- /dev/null +++ b/tests/manual/check_masked_recurrent.py @@ -0,0 +1,130 @@ +# Dummy test data as input to RNN. This input is 3 timesteps long where the third timestep always matches the +# first. Without masking it should be able to learn it, with masking it should fail. + +import numpy as np +from keras.utils.theano_utils import sharedX +from keras.models import Sequential +from keras.layers.core import Dense, Activation, Merge, Dropout, TimeDistributedDense +from keras.layers.embeddings import Embedding +from keras.layers.recurrent import SimpleRNN, SimpleDeepRNN, LSTM, GRU +import theano + +theano.config.exception_verbosity = 'high' + +# (nb_samples, timesteps, dimensions) +X = np.random.random_integers(1, 4, size=(500000, 15)) + +print("About to compile the first model") +model = Sequential() +model.add(Embedding(5, 4, mask_zero=True)) +model.add(TimeDistributedDense(4, 4)) # obviously this is redundant. Just testing. +model.add(SimpleRNN(4, 4, activation='relu', return_sequences=True)) +model.add(Dropout(0.5)) +model.add(SimpleDeepRNN(4, 4, depth=2, activation='relu')) +model.add(Dropout(0.5)) +model.add(Dense(4, 4, activation='softmax')) +model.compile(loss='categorical_crossentropy', + optimizer='rmsprop', theano_mode=theano.compile.mode.FAST_RUN) +print("Compiled model") + +W = model.get_weights() # We'll save these so we can reset it later + +X[:, : 10] = 0 +Xmask0 = X.copy() +Xmask0[:, 10] = 0 + +Xmask12 = X.copy() +Xmask12[:, 11] = 0 +Xmask12[:, 12] = 0 + +X0_onehot = np.zeros((X.shape[0], 4)) +X1_onehot = np.zeros((X.shape[0], 4)) +for i, row in enumerate(X): + X0_onehot[i, row[10] - 1] = 1 + X1_onehot[i, row[11] - 1] = 1 + +# Uniform score: 4 options = ln(4) nats (2 bits) +# we should not do better than this when we mask out the part of the input +# that gives us the correct answer +uniform_score = np.log(4) +batch_size=512 + +# Train it to guess 0th dim +model.fit(X, X0_onehot, nb_epoch=1, batch_size=batch_size) +score = model.evaluate(X, X0_onehot, batch_size=batch_size) +if score > uniform_score * 0.9: + raise Exception('Failed to learn to copy timestep 0, score %f' % score) + + +model.set_weights(W) + +# Train without showing it the 0th dim to learn 1st dim +model.fit(X[: , 1:], X1_onehot, nb_epoch=1, batch_size=batch_size) +score = model.evaluate(X[:, 1:], X1_onehot, batch_size=batch_size) +if score > uniform_score * 0.9: + raise Exception('Failed to learn to copy timestep 1, score %f' % score) + +model.set_weights(W) + +# Train to guess 0th dim when 0th dim has been masked (should fail) +model.fit(Xmask0, X0_onehot, nb_epoch=1, batch_size=batch_size) +score = model.evaluate(Xmask0, X0_onehot, batch_size=batch_size) +if score < uniform_score * 0.9: + raise Exception('Somehow learned to copy timestep 0 despite mask, score %f' % score) + +model.set_weights(W) + +# Train to guess 1st dim when 0th dim has been masked (should succeed) +model.fit(Xmask0, X1_onehot, nb_epoch=1, batch_size=batch_size) +score = model.evaluate(Xmask0, X1_onehot, batch_size=batch_size) +if score > uniform_score * 0.9: + raise Exception('Failed to learn to copy timestep 1 in masked model, score %f' % score) + +model.set_weights(W) + +# Finally, make sure the mask is actually blocking input, mask out timesteps 1 and 2, and see if +# it can learn timestep 0 (should fail) +model.fit(Xmask12, X0_onehot, nb_epoch=1, batch_size=batch_size) + +score = model.evaluate(Xmask12, X0_onehot, batch_size=batch_size) +if score < uniform_score * 0.9: + raise Exception('Somehow learned to copy timestep 0 despite masking 1, score %f' % score) + +# Another testing approach, just initialize models and make sure that prepending zeros doesn't affect +# their output +print("About to compile the second model") +model2 = Sequential() +model2.add(Embedding(5, 4, mask_zero=True)) +model2.add(TimeDistributedDense(4, 4)) +model2.add(Activation('time_distributed_softmax')) +model2.add(LSTM(4, 4, return_sequences=True)) +model2.add(Activation('tanh')) +model2.add(GRU(4, 4, activation='softmax', return_sequences=True)) +model2.add(SimpleDeepRNN(4, 4, depth=2, activation='relu', return_sequences=True)) +model2.add(SimpleRNN(4, 4, activation='relu', return_sequences=True)) +model2.compile(loss='categorical_crossentropy', + optimizer='rmsprop', theano_mode=theano.compile.mode.FAST_RUN) +print("Compiled model2") + +X2 = np.random.random_integers(1, 4, size=(2, 5)) +y2 = np.random.random((X2.shape[0], X2.shape[1], 4)) + +ref = model2.predict(X2) +ref_eval = model2.evaluate(X2, y2) +mask = np.ones((y2.shape[0], y2.shape[1], 1)) + +for pre_zeros in range(1, 10): + padded_X2 = np.concatenate((np.zeros((X2.shape[0], pre_zeros)), X2), axis=1) + padded_mask = np.concatenate((np.zeros((mask.shape[0], pre_zeros, mask.shape[2])), mask), axis=1) + padded_y2 = np.concatenate((np.zeros((y2.shape[0], pre_zeros, y2.shape[2])), y2), axis=1) + + pred = model2.predict(padded_X2) + if not np.allclose(ref[:, -1, :], pred[:, -1, :]): + raise Exception("Different result after left-padding %d zeros. Ref: %s, Pred: %s" % (pre_zeros, ref, pred)) + + pad_eval = model2.evaluate(padded_X2, padded_y2, weights=padded_mask) + if not np.allclose([pad_eval], [ref_eval]): + raise Exception("Got dissimilar categorical_crossentropy after left-padding %d zeros. Ref: %f, Pred %f" %\ + (pref_eval, pred_val)) + + diff --git a/tests/manual/check_model_utils.py b/tests/manual/check_model_utils.py new file mode 100644 index 000000000000..a5b042b866d5 --- /dev/null +++ b/tests/manual/check_model_utils.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import +from __future__ import print_function +from keras.models import Sequential, Graph +from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge +from keras.layers.convolutional import Convolution2D, MaxPooling2D +import keras.utils.layer_utils as layer_utils + +print('-- Sequential model') +left = Sequential() +left.add(Convolution2D(32, 1, 3, 3, border_mode='valid')) +left.add(MaxPooling2D(poolsize=(2, 2))) +left.add(Flatten()) +left.add(Dense(32 * 13 * 13, 50)) +left.add(Activation('relu')) + +right = Sequential() +right.add(Dense(784, 30)) +right.add(Activation('relu')) + +model = Sequential() +model.add(Merge([left, right], mode='concat')) + +model.add(Dense(80, 10)) +model.add(Activation('softmax')) + +layer_utils.print_layer_shapes(model, [(1, 1, 28, 28), (1, 784)]) + +print('-- Graph model') +graph = Graph() +graph.add_input(name='input1', ndim=2) +graph.add_input(name='input2', ndim=4) +graph.add_node(Dense(32, 16), name='dense1', input='input1') +graph.add_node(Dense(16, 4), name='dense3', input='dense1') + +graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2') +graph.add_node(Flatten(), name='flatten1', input='conv1') +graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1') + +graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum') +graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat') + +layer_utils.print_layer_shapes(graph, {'input1': (1, 32), 'input2': (1, 1, 28, 28)}) + +print('Test script complete') diff --git a/tests/manual/check_models.py b/tests/manual/check_models.py deleted file mode 100644 index 13d13bf199d1..000000000000 --- a/tests/manual/check_models.py +++ /dev/null @@ -1,213 +0,0 @@ -from __future__ import absolute_import -from __future__ import print_function -from keras.datasets import mnist -from keras.models import Sequential -from keras.layers.core import Dense, Activation, Merge -from keras.utils import np_utils -import numpy as np - -nb_classes = 10 -batch_size = 128 -nb_epoch = 1 - -max_train_samples = 5000 -max_test_samples = 1000 - -np.random.seed(1337) # for reproducibility - -# the data, shuffled and split between tran and test sets -(X_train, y_train), (X_test, y_test) = mnist.load_data() - -X_train = X_train.reshape(60000,784)[:max_train_samples] -X_test = X_test.reshape(10000,784)[:max_test_samples] -X_train = X_train.astype("float32") -X_test = X_test.astype("float32") -X_train /= 255 -X_test /= 255 - -# convert class vectors to binary class matrices -Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples] -Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples] - -######################### -# sequential model test # -######################### -print('Test sequential') -model = Sequential() -model.add(Dense(784, 50)) -model.add(Activation('relu')) -model.add(Dense(50, 10)) -model.add(Activation('softmax')) - -model.compile(loss='categorical_crossentropy', optimizer='rmsprop') -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test)) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test)) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) - -score = model.evaluate(X_train, Y_train, verbose=0) -print('score:', score) -if score < 0.25: - raise Exception('Score too low, learning issue.') -preds = model.predict(X_test, verbose=0) -classes = model.predict_classes(X_test, verbose=0) - -model.get_config(verbose=1) - -################### -# merge test: sum # -################### -print('Test merge: sum') -left = Sequential() -left.add(Dense(784, 50)) -left.add(Activation('relu')) - -right = Sequential() -right.add(Dense(784, 50)) -right.add(Activation('relu')) - -model = Sequential() -model.add(Merge([left, right], mode='sum')) - -model.add(Dense(50, 10)) -model.add(Activation('softmax')) - -model.compile(loss='categorical_crossentropy', optimizer='rmsprop') - -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test)) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test)) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) - -score = model.evaluate([X_train, X_train], Y_train, verbose=0) -print('score:', score) -if score < 0.22: - raise Exception('Score too low, learning issue.') -preds = model.predict([X_test, X_test], verbose=0) -classes = model.predict_classes([X_test, X_test], verbose=0) - -model.get_config(verbose=1) - -################### -# merge test: concat # -################### -print('Test merge: concat') -left = Sequential() -left.add(Dense(784, 50)) -left.add(Activation('relu')) - -right = Sequential() -right.add(Dense(784, 50)) -right.add(Activation('relu')) - -model = Sequential() -model.add(Merge([left, right], mode='concat')) - -model.add(Dense(50*2, 10)) -model.add(Activation('softmax')) - -model.compile(loss='categorical_crossentropy', optimizer='rmsprop') - -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], Y_test)) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], Y_test)) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) -model.fit([X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) - -score = model.evaluate([X_train, X_train], Y_train, verbose=0) -print('score:', score) -if score < 0.22: - raise Exception('Score too low, learning issue.') -preds = model.predict([X_test, X_test], verbose=0) -classes = model.predict_classes([X_test, X_test], verbose=0) - -model.get_config(verbose=1) - -########################## -# test merge recursivity # -########################## -print('Test merge recursivity') - -left = Sequential() -left.add(Dense(784, 50)) -left.add(Activation('relu')) - -right = Sequential() -right.add(Dense(784, 50)) -right.add(Activation('relu')) - -righter = Sequential() -righter.add(Dense(784, 50)) -righter.add(Activation('relu')) - -intermediate = Sequential() -intermediate.add(Merge([left, right], mode='sum')) -intermediate.add(Dense(50, 50)) -intermediate.add(Activation('relu')) - -model = Sequential() -model.add(Merge([intermediate, righter], mode='sum')) - -model.add(Dense(50, 10)) -model.add(Activation('softmax')) - -model.compile(loss='categorical_crossentropy', optimizer='rmsprop') - -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], Y_test)) -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], Y_test)) -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) -model.fit([X_train, X_train, X_train], Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) - -score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0) -print('score:', score) -if score < 0.19: - raise Exception('Score too low, learning issue.') -preds = model.predict([X_test, X_test, X_test], verbose=0) -classes = model.predict_classes([X_test, X_test, X_test], verbose=0) - -model.get_config(verbose=1) - -model.save_weights('temp.h5') -model.load_weights('temp.h5') - -score = model.evaluate([X_train, X_train, X_train], Y_train, verbose=0) -print('score:', score) - -###################### -# test merge overlap # -###################### -print('Test merge overlap') -left = Sequential() -left.add(Dense(784, 50)) -left.add(Activation('relu')) - -model = Sequential() -model.add(Merge([left, left], mode='sum')) - -model.add(Dense(50, 10)) -model.add(Activation('softmax')) - -model.compile(loss='categorical_crossentropy', optimizer='rmsprop') - -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=(X_test, Y_test)) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test)) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) -model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False) - -score = model.evaluate(X_train, Y_train, verbose=0) -print('score:', score) -if score < 0.22: - raise Exception('Score too low, learning issue.') -preds = model.predict(X_test, verbose=0) -classes = model.predict_classes(X_test, verbose=0) - -model.get_config(verbose=1) diff --git a/tests/manual/check_yaml.py b/tests/manual/check_yaml.py new file mode 100644 index 000000000000..bdc24bfa2493 --- /dev/null +++ b/tests/manual/check_yaml.py @@ -0,0 +1,101 @@ +from __future__ import absolute_import +from __future__ import print_function +import numpy as np + +from keras.utils.test_utils import get_test_data +from keras.preprocessing import sequence +from keras.optimizers import SGD, RMSprop, Adagrad +from keras.utils import np_utils +from keras.models import Sequential, Graph +from keras.layers.core import Dense, Dropout, Activation, Merge +from keras.layers.embeddings import Embedding +from keras.layers.recurrent import LSTM, GRU +from keras.datasets import imdb +from keras.models import model_from_yaml + +''' +This is essentially the IMDB test. Deserialized models should yield +the same config as the original one. +''' + +max_features = 10000 +maxlen = 100 +batch_size = 32 + +(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2) + +X_train = sequence.pad_sequences(X_train, maxlen=maxlen) +X_test = sequence.pad_sequences(X_test, maxlen=maxlen) + +model = Sequential() +model.add(Embedding(max_features, 128)) +model.add(LSTM(128, 128)) +model.add(Dropout(0.5)) +model.add(Dense(128, 1, W_regularizer='identity', b_constraint='maxnorm')) +model.add(Activation('sigmoid')) + +model.get_config(verbose=1) + +##################################### +# save model w/o parameters to yaml # +##################################### + +yaml_no_params = model.to_yaml() + +no_param_model = model_from_yaml(yaml_no_params) +no_param_model.get_config(verbose=1) + +###################################### +# save multi-branch sequential model # +###################################### + +seq = Sequential() +seq.add(Merge([model, model], mode='sum')) +seq.get_config(verbose=1) +merge_yaml = seq.to_yaml() +merge_model = model_from_yaml(merge_yaml) + +large_model = Sequential() +large_model.add(Merge([seq,model], mode='concat')) +large_model.get_config(verbose=1) +large_model.to_yaml() + +#################### +# save graph model # +#################### + +X = np.random.random((100, 32)) +X2 = np.random.random((100, 32)) +y = np.random.random((100, 4)) +y2 = np.random.random((100,)) + +(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,), + classification=False, output_shape=(4,)) + +graph = Graph() + +graph.add_input(name='input1', ndim=2) + +graph.add_node(Dense(32, 16), name='dense1', input='input1') +graph.add_node(Dense(32, 4), name='dense2', input='input1') +graph.add_node(Dense(16, 4), name='dense3', input='dense1') + +graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum') +graph.compile('rmsprop', {'output1': 'mse'}) + +graph.get_config(verbose=1) + +history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10) +original_pred = graph.predict({'input1': X_test}) + +graph_yaml = graph.to_yaml() +graph.save_weights('temp.h5', overwrite=True) + +reloaded_graph = model_from_yaml(graph_yaml) +reloaded_graph.load_weights('temp.h5') +reloaded_graph.get_config(verbose=1) + +reloaded_graph.compile('rmsprop', {'output1': 'mse'}) +new_pred = reloaded_graph.predict({'input1': X_test}) + +assert(np.sum(new_pred['output1'] - original_pred['output1']) == 0)