diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index 5c8cee90d..000000000
--- a/.gitattributes
+++ /dev/null
@@ -1,23 +0,0 @@
-# Set the default behavior, in case people don't have core.autocrlf set.
-* text=auto
-
-# Explicitly declare text files you want to always be normalized and converted
-# to native line endings on checkout.
-*.c text
-*.cpp text
-*.cc text
-*.h text
-*.hpp text
-*.hh text
-*.mk text
-*.md text
-
-# Declare files that will always have CRLF line endings on checkout.
-*.sln text eol=crlf
-*.vcxproj text eol=crlf
-
-# Denote all files that are truly binary and should not be modified.
-*.png binary
-*.jpg binary
-*.pdf binary
-*.ipynb binary
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 35566b3f0..000000000
--- a/.gitignore
+++ /dev/null
@@ -1,289 +0,0 @@
-## Ignore Visual Studio temporary files, build results, and
-## files generated by popular Visual Studio add-ons.
-##
-## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
-
-# User-specific files
-*.suo
-*.user
-*.userosscache
-*.sln.docstates
-
-# User-specific files (MonoDevelop/Xamarin Studio)
-*.userprefs
-
-# Build results
-[Dd]ebug/
-[Dd]ebugPublic/
-[Rr]elease/
-[Rr]eleases/
-[Bb]uild/
-x64/
-x86/
-bld/
-[Bb]in/
-[Oo]bj/
-[Ll]og/
-
-# Visual Studio 2015 cache/options directory
-.vs/
-# Uncomment if you have tasks that create the project's static files in wwwroot
-#wwwroot/
-
-# MSTest test Results
-[Tt]est[Rr]esult*/
-[Bb]uild[Ll]og.*
-
-# NUNIT
-*.VisualState.xml
-TestResult.xml
-
-# Build Results of an ATL Project
-[Dd]ebugPS/
-[Rr]eleasePS/
-dlldata.c
-
-# .NET Core
-project.lock.json
-project.fragment.lock.json
-artifacts/
-**/Properties/launchSettings.json
-
-*_i.c
-*_p.c
-*_i.h
-*.ilk
-*.meta
-*.obj
-*.pch
-*.pdb
-*.pgc
-*.pgd
-*.rsp
-*.sbr
-*.tlb
-*.tli
-*.tlh
-*.tmp
-*.tmp_proj
-*.log
-*.vspscc
-*.vssscc
-.builds
-*.pidb
-*.svclog
-*.scc
-
-# Chutzpah Test files
-_Chutzpah*
-
-# Visual C++ cache files
-ipch/
-*.aps
-*.ncb
-*.opendb
-*.opensdf
-*.sdf
-*.cachefile
-*.VC.db
-*.VC.VC.opendb
-
-# Visual Studio profiler
-*.psess
-*.vsp
-*.vspx
-*.sap
-
-# TFS 2012 Local Workspace
-$tf/
-
-# Guidance Automation Toolkit
-*.gpState
-
-# ReSharper is a .NET coding add-in
-_ReSharper*/
-*.[Rr]e[Ss]harper
-*.DotSettings.user
-
-# JustCode is a .NET coding add-in
-.JustCode
-
-# TeamCity is a build add-in
-_TeamCity*
-
-# DotCover is a Code Coverage Tool
-*.dotCover
-
-# Visual Studio code coverage results
-*.coverage
-*.coveragexml
-
-# NCrunch
-_NCrunch_*
-.*crunch*.local.xml
-nCrunchTemp_*
-
-# MightyMoose
-*.mm.*
-AutoTest.Net/
-
-# Web workbench (sass)
-.sass-cache/
-
-# Installshield output folder
-[Ee]xpress/
-
-# DocProject is a documentation generator add-in
-DocProject/buildhelp/
-DocProject/Help/*.HxT
-DocProject/Help/*.HxC
-DocProject/Help/*.hhc
-DocProject/Help/*.hhk
-DocProject/Help/*.hhp
-DocProject/Help/Html2
-DocProject/Help/html
-
-# Click-Once directory
-publish/
-
-# Publish Web Output
-*.[Pp]ublish.xml
-*.azurePubxml
-# TODO: Comment the next line if you want to checkin your web deploy settings
-# but database connection strings (with potential passwords) will be unencrypted
-*.pubxml
-*.publishproj
-
-# Microsoft Azure Web App publish settings. Comment the next line if you want to
-# checkin your Azure Web App publish settings, but sensitive information contained
-# in these scripts will be unencrypted
-PublishScripts/
-
-# NuGet Packages
-*.nupkg
-# The packages folder can be ignored because of Package Restore
-**/packages/*
-# except build/, which is used as an MSBuild target.
-!**/packages/build/
-# Uncomment if necessary however generally it will be regenerated when needed
-#!**/packages/repositories.config
-# NuGet v3's project.json files produces more ignorable files
-*.nuget.props
-*.nuget.targets
-
-# Microsoft Azure Build Output
-csx/
-*.build.csdef
-
-# Microsoft Azure Emulator
-ecf/
-rcf/
-
-# Windows Store app package directories and files
-AppPackages/
-BundleArtifacts/
-Package.StoreAssociation.xml
-_pkginfo.txt
-
-# Visual Studio cache files
-# files ending in .cache can be ignored
-*.[Cc]ache
-# but keep track of directories ending in .cache
-!*.[Cc]ache/
-
-# Others
-ClientBin/
-~$*
-*~
-*.dbmdl
-*.dbproj.schemaview
-*.jfm
-*.pfx
-*.publishsettings
-orleans.codegen.cs
-
-# Since there are multiple workflows, uncomment next line to ignore bower_components
-# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
-#bower_components/
-
-# RIA/Silverlight projects
-Generated_Code/
-
-# Backup & report files from converting an old project file
-# to a newer Visual Studio version. Backup files are not needed,
-# because we have git ;-)
-_UpgradeReport_Files/
-Backup*/
-UpgradeLog*.XML
-UpgradeLog*.htm
-
-# SQL Server files
-*.mdf
-*.ldf
-*.ndf
-
-# Business Intelligence projects
-*.rdl.data
-*.bim.layout
-*.bim_*.settings
-
-# Microsoft Fakes
-FakesAssemblies/
-
-# GhostDoc plugin setting file
-*.GhostDoc.xml
-
-# Node.js Tools for Visual Studio
-.ntvs_analysis.dat
-node_modules/
-
-# Typescript v1 declaration files
-typings/
-
-# Visual Studio 6 build log
-*.plg
-
-# Visual Studio 6 workspace options file
-*.opt
-
-# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
-*.vbw
-
-# Visual Studio LightSwitch build output
-**/*.HTMLClient/GeneratedArtifacts
-**/*.DesktopClient/GeneratedArtifacts
-**/*.DesktopClient/ModelManifest.xml
-**/*.Server/GeneratedArtifacts
-**/*.Server/ModelManifest.xml
-_Pvt_Extensions
-
-# Paket dependency manager
-.paket/paket.exe
-paket-files/
-
-# FAKE - F# Make
-.fake/
-
-# JetBrains Rider
-.idea/
-*.sln.iml
-
-# CodeRush
-.cr/
-
-# Python Tools for Visual Studio (PTVS)
-__pycache__/
-*.pyc
-
-# Cake - Uncomment if you are using it
-# tools/**
-# !tools/packages.config
-
-# Telerik's JustMock configuration file
-*.jmconfig
-
-# BizTalk build output
-*.btp.cs
-*.btm.cs
-*.odx.cs
-*.xsd.cs
diff --git a/Algorithms/Bonsai.md b/Algorithms/Bonsai.md
new file mode 100644
index 000000000..cd8fad1f8
--- /dev/null
+++ b/Algorithms/Bonsai.md
@@ -0,0 +1,47 @@
+---
+layout: default
+title: Bonsai - Strong, Shallow and Sparse Non-linear Tree
+---
+
+Bonsai is a new tree model for supervised learning tasks such as binary and
+multi-class classification, regression, ranking, etc. Bonsai learns a single,
+shallow, sparse tree with powerful predictors at internal and leaf nodes. This
+allows Bonsai to achieve state-of-the-art prediction accuracies while making
+predictions efficiently in microseconds to milliseconds (depending on processor
+speed) using models that fit in a few KB of memory. Bonsai can be trained in
+the cloud or on your laptop, but can then make predictions locally on tiny
+resource-constrained devices without needing cloud connectivity.
+
+Bonsai has been deployed successfully on microcontrollers tinier than a grain
+of rice such as the ARM Cortex M0 with just 2 KB RAM. Bonsai can also make
+predictions accurately and efficiently on the tiniest of IoT boards such as the
+Arduino Pro Mini based on an 8 bit Atmel ATmega328P microcontroller operating
+at 8 MHz without any floating point support in hardware, with 2 KB RAM and 32
+KB read-only flash memory. Bonsai can also fit in the L1 cache of processors
+found in mobiles, tablets, laptops, and servers for low-latency applications.
+
+Bonsai is most useful for IoT scenarios where it is not advantageous to
+transmit sensor readings (or features) to the cloud and predictions need to be
+made locally on-device due to:
+
+- Poor bandwidth or lack of connectivity
+- Low-latency requirements where predictions need to be made very quickly and
+ there isn’t enough time to transmit data to the cloud and get back a prediction
+- Concerns about privacy and security where the data should not leave the
+ device
+- Low-energy requirements where data cannot be transmitted to the cloud so as
+ to enhance battery life
+
+
+Bonsai can also be useful for switching to a smaller, cheaper and more
+energy-efficient form factor such as from a Raspberry Pi 3 to an Arduino Pro
+Mini. Finally, Bonsai also generalizes to other resource-constrained scenarios
+beyond the Internet of Things and can be used on laptops, servers and the cloud
+for low-latency applications and to bring down energy consumption and operating
+costs.
+
+Please see our [ICML 2017 paper](/Microsoft/EdgeML/wiki/files/BonsaiPaper.pdf)
+for more details about the model and algorithm and our [Getting
+Started](/Microsoft/EdgeML/wiki/GettingStarted) section for instructions on how
+to use Bonsai.
+
diff --git a/Algorithms/EMI-RNN.md b/Algorithms/EMI-RNN.md
new file mode 100644
index 000000000..8abb6a4d1
--- /dev/null
+++ b/Algorithms/EMI-RNN.md
@@ -0,0 +1,19 @@
+---
+layout: default
+title: Early Multi-Instance Recurrent Neural Network
+---
+
+Deploying sequential data classification modules on tiny devices is challenging as
+predictions over sliding windows of data need to be invoked continuously at a
+high frequency. Each of these predictors themselves are expensive as they
+evaluate large models over long windows of data. In this paper, we address this
+challenge by exploiting the following two observations about classification
+tasks arising in typical IoT related applications: (a) the "signature" of a
+particular class (e.g. an audio keyword) typically occupies a small fraction of
+the overall data, and (b) class signatures tend to discernible early-on in the
+data. We propose a method that exploits these observations by using a multiple
+instance learning formulation along with an early prediction technique to learn
+a model that can achieve better accuracy compared to baseline models, while
+reducing the computation by a large fraction. For instance, on an audio keyword
+detection benchmark our model improves standard LSTM model’s accuracy by up to
+1.5% while decreasing the computation cost by more than 60%.
diff --git a/Algorithms/FastGRNN.md b/Algorithms/FastGRNN.md
new file mode 100644
index 000000000..b74e1df0e
--- /dev/null
+++ b/Algorithms/FastGRNN.md
@@ -0,0 +1,78 @@
+---
+layout: default
+title: Fast(G)RNN - Fast, Accurate, Stable and Tiny (Gated) Recurrent Neural Network
+---
+
+*FastRNN* and *FastGRNN*, two RNN architectures (cells), together called
+*FastCells*, are developed to address the twin RNN limitations of inaccurate
+training and inefficient prediction. FastRNN provably stabilizes the RNN
+training which usually suffers from the famous vanishing and exploding gradient
+problems. FastGRNN learns low-rank, sparse and quantized weight matrices whilst
+having elegant and provably stable Gated RNN update equations. This allows
+FastGRNN to achieve state-of-the-art prediction accuracies while making
+predictions in microseconds to milliseconds (depending on processor speed)
+using models that fit in a few KB of memory. Fast(G)RNN can be trained in the
+cloud or on your laptop, but can then make predictions locally on tiny
+resource-constrained devices without needing cloud connectivity.
+
+FastGRNN is upto **45x** smaller and faster (inference on edge-devices) than
+state-of-the-art RNN architectures like LSTM/GRU whilst maintaining accuracies
+on various benchmark datasets and FastRNN has provably stable training and
+better performance when compared to Unitary architectures proposed to tackle
+the unstable training.
+
+### FastRNN
+
+
+
+### FastGRNN
+
+
+
+FastGRNN has been deployed successfully on microcontrollers tinier than a grain
+of rice such as the ARM Cortex M0+ with just 2 KB RAM. Bonsai can also make
+predictions accurately and efficiently on the tiniest of IoT boards such as the
+Arduino MKR1000 based on a 32-bit low power ARM Cortex M0+ microcontroller
+without any floating point support in hardware, with 32 KB RAM and 256 KB
+read-only flash memory. FastGRNN can also fit in the L1 cache of processors
+found in mobiles, tablets, laptops, and servers for low-latency applications.
+
+Most of the IoT sensor readings are multi-modal and have inherent/latent
+temporal dependence. Using RNNs will eliminate the expensive and time-consuming
+feature extraction and engineering, thereby incorporating that as part of the
+model. RNNs are shown to be the state-of-the-art in various
+Time-series/Temporal tasks over the last few years.
+
+FastGRNN is most useful for IoT scenarios where it is not advantageous to
+transmit sensor readings (or features) to the cloud and predictions need to be
+made locally on-device due to:
+
+- Poor bandwidth or lack of connectivity
+- Low-latency requirements where predictions need to be made very quickly and
+ there isn’t enough time to transmit data to the cloud and get back a
+prediction
+- Concerns about privacy and security where the data should not leave the
+ device
+- Low-energy requirements where data cannot be transmitted to the cloud so as
+ to enhance battery life
+
+
+FastGRNN can also be useful for switching to a smaller, cheaper and more
+energy-efficient form factor such as from a Raspberry Pi 3 to an Arduino Pro
+Mini. Finally, Fast(G)RNN also generalizes to other resource-constrained
+scenarios beyond the Internet of Things and can be used on laptops, servers and
+the cloud for low-latency applications and to bring down energy consumption and
+operating costs.
+
+Apart from the resource-constrained scenarios, Fast(G)RNN is proved to be a
+smaller yet powerful replacement of expensive LSTM/GRU in various benchmark
+tasks like Sentiment Classification, Language Modelling, and Image
+Classification. This shows that the architectures have a wider impact and reach
+spanning NLP, Image and Time-series tasks.
+
+Please see our [NIPS 2018
+paper](/Microsoft/EdgeML/wiki/files/FastGRNNPaper.pdf) for more details about
+the model and algorithm and our [Getting
+Started](/Microsoft/EdgeML/wiki/GettingStarted) section for instructions on how
+to use Fast(G)RNN.
+
diff --git a/Algorithms/ProtoNN.md b/Algorithms/ProtoNN.md
new file mode 100644
index 000000000..c883c969a
--- /dev/null
+++ b/Algorithms/ProtoNN.md
@@ -0,0 +1,19 @@
+---
+layout: default
+title: ProtoNN - Compressed Accurate K-Nearest Neighbour
+---
+
+ProtoNN is a multi-class classification algorithm inspired by k-Nearest
+Neighbor (KNN) but has several orders lower storage and prediction complexity.
+ProtoNN models can be deployed even on devices with puny storage and
+computational power (e.g. an Arduino UNO with 2kB RAM) to get excellent
+prediction accuracy. ProtoNN derives its strength from three key ideas: a)
+learning a small number of prototypes to represent the entire training set, b)
+sparse low dimensional projection of data, c) joint discriminative learning of
+the projection and prototypes with explicit model
+size constraint.
+
+$$
+(\vec{h}_{t} = \tanh({W}_h x_t + {U}_h h_{t-1} + b_h).) \\
+(h_{t} = \tanh({W}_h x_t + {U}_h h_{t-1} + b_h).)
+$$
diff --git a/Algorithms/index.md b/Algorithms/index.md
new file mode 100644
index 000000000..d2c68b92f
--- /dev/null
+++ b/Algorithms/index.md
@@ -0,0 +1,147 @@
+---
+layout: default
+title: Algorithms and Tools
+permalink: /Algorithms
+---
+
+The algorithms that are part of EdgeML are written in Tensorflow and PyTorch for Python.
+They are hosted on [GitHub](https://github.com/Microsoft/EdgeML/).
+Additionally, the repository also provides fast and scalable C++
+implementations of Bonsai and ProtoNN. The common usecases are as follows:
+
+- **Bonsai** or **ProtoNN**: Can be used for traditional machine learning tasks with pre-computed features like gesture recongition ([Gesturepod](https://microsoft.github.io/EdgeML/Projects/GesturePod/instructable.html)), activity detection, image classification. They can also be used to replace bulky traditonal classifiers like fully connected layers, RBF-SVMs etc., in ML pipleines.
+- **EMI-RNN** & **FastGRNN**: These complementary techniques can be applied on time-series classification tasks which require the models to learn new feature representations such as wakeword detection ([Key-word spotting](https://microsoft.github.io/EdgeML/Projects/WakeWord/instructable.html)), sentiment classification, activity recognition. FastGRNN can be used as a cheaper alternative to LSTM and GRU in deep learning pipleines while EMI-RNN provides framework for computational savings using multi-instance learning.
+- **SeeDot**:
+
+A very brief introduction of these algorithms and tools is provided below.
+
+1. **Bonsai**: *Bonsai* is a shallow and strong non-linear tree based classifier which is designed to solve traditional ML problem with 2KB sized models.
+Bonsai has logarithmic prediction complexity and can be trained end-to-end with deep learning models.
+
+ [Paper @ ICML 2017]
+ [Bibtex]
+ [Poster]
+ [Cpp code]
+ [Tensorflow example]
+ [PyTorch example]
+ [Blog]
+
+2. **ProtoNN**: *ProtoNN* is a prototype based k-nearest neighbors (kNN) classifier which is designed to solve traditional ML problem with 2KB sized models.
+ProtoNN can be trained end-to-end with deep learning models and has been used for deployment in GesturePod.
+
+ [Paper @ ICML 2017]
+ [Bibtex]
+ [Poster]
+ [Cpp code]
+ [Tensorflow example]
+ [PyTorch example]
+ [Blog]
+
+3. **EMI-RNN**: Training routine to recover critical signature from time series data for faster and accurate RNN predictions. EMI-RNN helps in speeding-up RNN inference up to 72x when compared to traditional implementations.
+
+ [Paper @ NeurIPS 2018]
+ [Bibtex]
+ [Poster]
+ [Tensorflow example]
+ [PyTorch example]
+ [Video]
+
+4. **FastRNN** & **FastGRNN**: Fast, Accurate, Stable and Tiny (Gated) RNN Cells which can be used instead of LSTM and GRU. FastGRNN can be up to 35x smaller and faster than LSTM and GRU for time series classification problems with models with size less than 10KB.
+
+ [Paper @ NeurIPS 2018]
+ [Bibtex]
+ [Poster]
+ [Tensorflow example]
+ [PyTorch example]
+ [Video]
+ [Blog]
+
+5. **SeeDot**: Floating-point to fixed-point quantization tool including a new language and compiler.
+
+ [Paper @ PLDI 2019]
+ [Bibtex]
+ [Code]
+ [Video]
+
+
+All the above algorithms and tools are aimed at enabling machine learning inference on the edge devices which form the back-bone for the Internet of Things (IoT).
+
+
+
+
+
+
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index 75cbd3a0d..000000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-cmake_minimum_required(VERSION 3.3)
-
-set(CMAKE_CXX_STANDARD 11)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
-
-project(EdgeML)
-
-if(NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE Release)
-endif()
-
-# Turn on ability to create folders to organize projects
-set_property(GLOBAL PROPERTY USE_FOLDERS ON)
-
-set(CMAKE_POSITION_INDEPENDENT_CODE ON)
-
-# Set global compilation flags
-if(NOT WIN32 AND NOT CYGWIN)
- add_compile_options("-fvisibility-inlines-hidden")
-endif()
-
-# Set Visual Studio-specific options
-if(MSVC)
- add_compile_options(/MP) #multi process build
-endif()
-
-#define variables for mkl include directories
-#set your MKL_ROOT here
-
-if(MSVC)
- set(MKL_ROOT "C:/Program Files (x86)/IntelSWTools/compilers_and_libraries/windows/mkl")
- set(MKL_INCLUDE_DIR ${MKL_ROOT}/include)
- include_directories(${MKL_INCLUDE_DIR})
- link_directories(${MKL_ROOT}/lib/intel64_win)
- link_directories(${MKL_ROOT}/../compiler/lib/intel64_win)
-ENDIF(MSVC)
-
-IF(CMAKE_COMPILER_IS_GNUCC)
- set(MKL_ROOT "/opt/intel/mkl/")
- set(MKL_INCLUDE_DIR ${MKL_ROOT}/include)
- include_directories(${MKL_INCLUDE_DIR})
- link_directories(${MKL_ROOT}/lib/intel64_lin)
- link_directories(${MKL_ROOT}/../compiler/lib/intel64_lin)
-ENDIF(CMAKE_COMPILER_IS_GNUCC)
-
-# add debug definitions to compiler flags
-#set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DLIGHT_LOGGER") #-DLOGGER #-DTIMER -DCONCISE #-DSTDERR_ONSCREEN #-DLIGHT_LOGGER -DVERBOSE #-DDUMP #-DVERIFY
-set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DLIGHT_LOGGER -DSTDERR_ONSCREEN -DVERBOSE -DDUMP -DVERIFY") #-DLOGGER #-DTIMER -DCONCISE #-DSTDERR_ONSCREEN #-DLIGHT_LOGGER -DVERBOSE #-DDUMP #-DVERIFY
-
-set(CONFIG_FLAGS "-DSINGLE") #-DXML -DZERO_BASED_IO
-
-# mkl flags
-set(MKL_EIGEN_FLAGS "-DEIGEN_USE_BLAS -DMKL_ILP64")
-
-# add
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CONFIG_FLAGS} ${MKL_EIGEN_FLAGS}")
-
-IF(CMAKE_COMPILER_IS_GNUCC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcilkplus -DCILK")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -DLINUX")
- set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -p -g")
-ENDIF (CMAKE_COMPILER_IS_GNUCC)
-
-if(MSVC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Ox -DWINDOWS")
-endif()
-
-MESSAGE(STATUS "CMAKE_CXX_FLAGS:" ${CMAKE_CXX_FLAGS})
-MESSAGE(STATUS "CMAKE_CXX_FLAGS_DEBUG:" ${CMAKE_CXX_FLAGS_DEBUG})
-
-# Include project directories
-add_subdirectory(src)
-add_subdirectory(drivers)
diff --git a/License.txt b/License.txt
deleted file mode 100644
index ff03f27bb..000000000
--- a/License.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Edge Machine Learning
-
-Copyright (c) Microsoft Corporation
-
-All rights reserved.
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software
-and associated documentation files (the ""Software""), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
-and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
-LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
-THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Makefile b/Makefile
deleted file mode 100644
index c8c1c0551..000000000
--- a/Makefile
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT license.
-
-
-include config.mk
-
-SOURCE_DIR=src
-DRIVER_DIR=drivers
-
-COMMON_DIR=$(SOURCE_DIR)/common
-PROTONN_DIR=$(SOURCE_DIR)/ProtoNN
-BONSAI_DIR=$(SOURCE_DIR)/Bonsai
-
-IFLAGS = -I eigen/ -I$(MKL_ROOT)/include \
- -I$(COMMON_DIR) -I$(PROTONN_DIR) -I$(BONSAI_DIR)
-
-all: ProtoNNTrain ProtoNNPredict BonsaiTrain BonsaiPredict #ProtoNNIngestTest BonsaiIngestTest
-
-libcommon.so: $(COMMON_INCLUDES)
- $(MAKE) -C $(SOURCE_DIR)/common
-
-libProtoNN.so: $(PROTONN_INCLUDES)
- $(MAKE) -C $(SOURCE_DIR)/ProtoNN
-
-libBonsai.so: $(BONSAI_INCLUDES)
- $(MAKE) -C $(SOURCE_DIR)/Bonsai
-
-ProtoNNTrainDriver.o:
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/trainer
-
-ProtoNNPredictDriver.o:
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/predictor
-
-BonsaiTrainDriver.o:
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/trainer
-
-BonsaiPredictDriver.o:
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/predictor
-
-#ProtoNNIngestTest.o BonsaiIngestTest.o:
-
-ProtoNNTrain: ProtoNNTrainDriver.o libcommon.so libProtoNN.so
- $(CC) -o $@ $^ $(CFLAGS) $(MKL_PAR_LDFLAGS) $(CILK_LDFLAGS)
-
-ProtoNNPredict: ProtoNNPredictDriver.o libcommon.so libProtoNN.so
- $(CC) -o $@ $^ $(CFLAGS) $(MKL_PAR_LDFLAGS) $(CILK_LDFLAGS)
-
-#ProtoNNIngestTest: ProtoNNIngestTest.o libcommon.so libProtoNN.so
-# $(CC) -o $@ $^ $(CFLAGS) $(MKL_PAR_LDFLAGS) $(CILK_LDFLAGS)
-
-#Bonsai: BonsaiLocalDriver.o libcommon.so libBonsai.so
-# $(CC) -o $@ $^ $(CFLAGS) $(MKL_SEQ_LDFLAGS) $(CILK_LDFLAGS)
-
-BonsaiTrain: BonsaiTrainDriver.o libcommon.so libBonsai.so
- $(CC) -o $@ $^ $(CFLAGS) $(MKL_SEQ_LDFLAGS) $(CILK_LDFLAGS)
-
-BonsaiPredict: BonsaiPredictDriver.o libcommon.so libBonsai.so
- $(CC) -o $@ $^ $(CFLAGS) $(MKL_SEQ_LDFLAGS) $(CILK_LDFLAGS)
-
-#BonsaiIngestTest: BonsaiIngestTest.o libcommon.so libBonsai.so
-# $(CC) -o $@ $^ $(CFLAGS) $(MKL_PAR_LDFLAGS) $(CILK_LDFLAGS)
-
-
-.PHONY: clean cleanest
-
-clean:
- rm -f *.o
- $(MAKE) -C $(SOURCE_DIR)/common clean
- $(MAKE) -C $(SOURCE_DIR)/ProtoNN clean
- $(MAKE) -C $(SOURCE_DIR)/Bonsai clean
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/trainer clean
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/predictor clean
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/trainer clean
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/predictor clean
-
-cleanest: clean
- rm -f ProtoNN ProtoNNPredict ProtoNNIngestTest BonsaiIngestTest Bonsai
- $(MAKE) -C $(SOURCE_DIR)/common cleanest
- $(MAKE) -C $(SOURCE_DIR)/ProtoNN cleanest
- $(MAKE) -C $(SOURCE_DIR)/Bonsai cleanest
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/trainer cleanest
- $(MAKE) -C $(DRIVER_DIR)/ProtoNN/predictor cleanest
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/trainer cleanest
- $(MAKE) -C $(DRIVER_DIR)/Bonsai/predictor cleanest
diff --git a/People.md b/People.md
new file mode 100755
index 000000000..568e37c0f
--- /dev/null
+++ b/People.md
@@ -0,0 +1,236 @@
+---
+layout: default
+title: People
+permalink: /People
+---
+
+
+ Vivek Seshadri
+ Senior Researcher, Microsoft Research
+
+
+
+
+
+
+
+
+
+
+
+
+## Former Members
+
+
+- [Arun Sai Suggala](http://www.cs.cmu.edu/~asuggala/) _(Intern at MSRI from Carnegie Mellon University)_
+- [Ashish Kumar](https://ashishkumar1993.github.io/) _(Research Fellow at MSRI, now a PhD student at UC Berkeley)_
+- [Ankit Goyal](https://www.linkedin.com/in/ankit-goyal-5baaa287) _(Intern at MSRI from University of Michigan, Ann Arbor)_
+- [Bhargavi Paranjape](https://www.linkedin.com/in/bhargaviparanjape) _(Research Fellow at MSRI, now Master's student at Carnegie Mellon University)_
+- [Chirag Gupta](https://aigen.github.io/) _(Research Fellow at MSRI, now a PhD student at Carnegie Mellon University)_
+- [Chirag Pabbaraju](https://in.linkedin.com/in/chirag-pabbaraju-277a4ba5) _(Research Fellow at MSRI, now a Master's student at Carnegie Mellon University)_
+
+- [Kush Bhatia](http://people.eecs.berkeley.edu/~kush/) _(Research Fellow at MSRI, now a PhD student at UC Berkeley)_
+- [Manish Singh](https://in.linkedin.com/in/manish-singh-ab4358b6) _(B.Tech student at IIT Delhi, now a PhD student at MIT)_
+- [Raghavendra Udupa](https://www.linkedin.com/in/raghavendra-udupa-6a943a2/) _(Researcher at MSRI, now Principal Researcher at Wadhwani AI)_
+- [Saurabh Goyal](https://in.linkedin.com/in/saurabh-goyal-6561162b) _(Master's student at IIT Delhi, now at IBM Research)_
+
+
+- [Suvadeep Hajra](https://www.linkedin.com/in/suvadeep-hajra-4a441159/) _(Intern at MSRI from IIT Bombay)_
+- [Vivek Gupta](https://vgupta123.github.io/) _(Research Fellow at MSRI, now a PhD student at University of Utah)_
+- [Yeshwanth Cherapanamjeri](https://yeshwanth94.github.io/) _(Research Fellow at MSRI, now a PhD student at UC Berkeley)_
+
diff --git a/Projects/GesturePod/index.md b/Projects/GesturePod/index.md
new file mode 100644
index 000000000..50fc555f6
--- /dev/null
+++ b/Projects/GesturePod/index.md
@@ -0,0 +1,21 @@
+---
+layout: default
+title: GesturePod
+---
+
+*GesturePod* is a plug-and-play, gesture recognition device
+that is designed to be clamped onto any white-cane used by
+persons with Visually Impairment. Once clamped onto the cane
+firmly, simple and natural gestures performed on the cane
+can be used to interact with various devices, for instance
+a mobile phone.
+
+
+
+
diff --git a/Projects/GesturePod/instructable.md b/Projects/GesturePod/instructable.md
new file mode 100755
index 000000000..f2e685933
--- /dev/null
+++ b/Projects/GesturePod/instructable.md
@@ -0,0 +1,180 @@
+---
+layout: instructable
+title: Building a GesturePod from Scratch
+titleimage: /img/Projects/GesturePod-original.jpg
+---
+
+
+## Introduction
+
+Do you want to make a simple pod that can detect natural gestures? Do you want
+to deploy Machine Learning / Artificial Intelligence on micro controllers and
+other edge devices? We show you how to build *GesturePod* - a plug and play
+device that recognizes gestures in real-time. This instructable demonstrates
+how you can use GesturePod to convert any white cane into an interactive cane.
+GesturePod enables easy access to smartphones and other home devices for people
+who use cane e.g., people with visual impairments and the elderly. Through this
+GesturePod, you can control devices by performing gestures on the cane. For
+example, you can answer an incoming call with a double swipe.
+
+The design of the pod and the schematics for the electronic subsystem is shared
+here[yet to hyperlink]. The algorithm is open sourced under MIT license
+[here](https://github.com/microsoft/EdgeML/blob/master/License.txt) and the
+training data for the 5 gestures - described in our [UIST'19 publication](https://github.com/microsoft/EdgeML/blob/master/docs/publications/GesturePod-UIST19.pdf)
+\- is available
+[here](https://www.microsoft.com/en-us/research/uploads/prod/2018/05/dataTR_v1.tar-5b058a4590168.gz)
+
+In this Instructable we will interface an Inertial Measurement Unit(IMU)
+MPU6050 with a MKR1000 (ARM Cortex M0+ class microcontroller) and deploy a
+pre-trained model that detects 5 gestures - Double Tap, Right Twist, Left
+Twist, Twirl and Double Swipe. These gestures are then communicated to a
+smartphone over Bluetooth Low Energy(BLE).
+
+Don't have the time to build hardware - no worries! Try the
+[simulation](https://github.com/microsoft/EdgeML/tree/master/applications/GesturePod/onComputer)
+on your computer!
+
+In part 2 of this tutorial, you will see how you can train and develop a ML
+model to detect your own gestures.
+
+Pictures speak a thousand words and videos even more! Here are three short video tutorials to help you:
+1. [Video: Raw set-up](https://drive.google.com/file/d/13BYBcYroHY_c4nu1DdYQr4B14SrqJtRL/view?usp=sharing)
+2. [Video: Stand-alone GesturePod](https://drive.google.com/file/d/1JnsX-Kr7Nm77E2ReGSzVNo0cjskrdD8r/view?usp=sharing)
+3. [Video: Integrating GesturePod with cane](https://drive.google.com/file/d/14WoAWmu-wqKG7axfIvB3j3IzGcxNTYqB/view?usp=sharing)
+
+### Components and Tools
+
+#### Electronics
+
+1. [Arduino MKR1000](https://store.arduino.cc/usa/arduino-mkr1000)
+2. MPU6050
+3. HM- 10 BLE module
+4. 3.3V 700mAh LiPo Battery (Optional) with JST 2.0 PH 2Pin Connector
+
+Alternatively, you can use HC-05 Bluetooth module in place of the HM-10. Keep
+in mind that for deployment of a system that is running on battery, traditional
+Bluetooth consumes more power than Bluetooth Low Energy (BLE).
+
+#### Mechanical Casing
+
+1. The above electronics will be housed in a casing that can be 3D printed from the design files provided [here](https://drive.google.com/drive/folders/12WCF7AjGkNXSlXDj9md3rvfvUyPVR4sQ?usp=sharing).
+2. Metallic clamps to robustly clamp the pod to the cane.
+3. Screws to hold the casing together.
+4. Toggle Switch - To turn the system on/off.
+
+This pod is then clamped onto a white-cane as shown in the video. You can also
+make-do without the Pod casing, and perhaps tape the system to any stick, or
+pipe. As long as the MPU6050 axis alignment is consistent, you should be good
+to go.
+
+#### Software
+
+Get the latest [Arduino IDE](https://www.arduino.cc/en/Main/Software). This
+instructable has been tested with Arduino version 1.8.5 on Windows 10. A good
+tutorial to get the MKR1000 up and running can be found
+[here](https://www.arduino.cc/en/Guide/MKR1000). We recommend running the
+``blink`` example to verify the setup.
+
+Tools:
+1. Solder Gun
+2. Screws
+3. Star Screw Driver
+4. Insulation Tape
+
+### Step 2: Connections
+
+
+
+We provide video instructions for two types of setup: a) raw set-up, and b) a
+stand-alone full-fledged GesturePod. Instructions for the raw set-up is
+described in the [first video](https://drive.google.com/file/d/13BYBcYroHY_c4nu1DdYQr4B14SrqJtRL/view?usp=sharing). The full fledged pod builds upon the raw set-up and is
+described in the [second video](https://drive.google.com/file/d/1JnsX-Kr7Nm77E2ReGSzVNo0cjskrdD8r/view?usp=sharing).
+
+* The raw set-up will enable you to implement the full Machine Learning
+ Algorithm without any loss in functionality. The connections are described
+ below:
+ ```
+ MKR1000 ----------------> HM10
+ VCC ----------------> VCC
+ GND ----------------> GND
+ 0 (DO) ----------------> RX
+ 1 (D1) ----------------> TX
+
+ MKR1000 ----------------> MPU6050
+ VCC ----------------> VCC
+ GND ----------------> GND
+ SDA (D11) ----------------> SDA
+ SCL (D12) ----------------> SCL
+ ```
+
+* The full fledged pod integrates the raw-set up along with a battery and
+ switch - thereby, helping use the system without any connections to a power
+ source, while conserving the battery when the system is turned off.
+
+We recommend running the ``testMPU.ino`` example to verify MPU6050 connection.
+
+## Step 3: Components housing
+
+
+
+After ensuring data can be polled from the MPU, you can now encapsulate the
+electronics into the casing that can be 3D printed using files provided
+[here](https://drive.google.com/drive/folders/12WCF7AjGkNXSlXDj9md3rvfvUyPVR4sQ?usp=sharing). Ensure you have the following 3 parts:
+* Pod Base
+* Pod Cap
+* Clamp Support
+
+First, the MPU is housed in the cavity in the pod base. The MKR1000 is then
+placed on top of the MPU6050 on the columnar supports. Finally, the HM-10 BLE
+module is suspended between the projections in the pod cap. The cap and the
+base are then joined together and fastened with wedge joints. Further, there is
+support provided for screws to further bolster mechanical support. After
+attaching the clamp to the pod, you can now clamp the system to the Interactive
+cane as shown.
+
+*Note:* Take care to align the MPU to the axis of the pod, as shown in the video.
+
+## Step 4: Burn the code onto MKR1000 and connect to phone
+
+You are now just a step away from implementing gesture recognition on edge
+device..!
+
+Download the code / clone the repository from
+[onMKR1000.ino](https://github.com/microsoft/EdgeML/tree/master/applications/GesturePod/onMKR1000).
+Build and upload the code using Arduino IDE. Remember to select MKR1000 as the
+Board. Open your Serial monitor and set the *BAUD* rate to 115200. You can
+now notice the predicted classes. Perform the gestures as demonstrated in
+the [Integrating with cane](https://drive.google.com/file/d/14WoAWmu-wqKG7axfIvB3j3IzGcxNTYqB/view?usp=sharing)
+video and the corresponding gestures will be predicted.
+
+The gesture detected are also transmitted over BLE. You can use [nrF Connect
+app](https://play.google.com/store/apps/details?id=no.nordicsemi.android.mcp&hl=en_IN)
+to connect over BLE, and receive the gestures on your phone. To use the
+gestures detected to trigger corresponding actions on the phone, you can
+download and install the "Interactive Cane" app from [Coming Soon..!]. Remember
+to give all necessary permissions and turn the Bluetooth and location services on.
+
+*Note:* If you are using BLE then it is necessary to have a phone that supports BLE.
+
+## What Next?
+
+This tutorial focused on building the GesturePod, and deploying a pre-trianed
+machine learning model to recognize gestures. The next tutorial will teach you
+\- how to train, and deploy a machine learning model to recognize your own
+gestures. Know enough to get started already? Head over to [here](https://github.com/microsoft/EdgeML/tree/master/applications/GesturePod/training) to start training a new model to recognize your custom gestures.
+
+## The Things Network Conference - A shorter tutorial
+
+[Paul Foster](https://github.com/PaulDFoster) created a [shorter _(and better !?)_ tutorial](https://github.com/PaulDFoster/GesturePod) for the hands on workshop at The Things Network Conference, UK (2019). An Adafruit Trinket M0 is used instead of the MKR1000. Further, this basic GesturePod indicates gestures recognised _(respecting the British-English spelling, considering the event was held in the UK :D)_ by using the RGB LED on the Adafruit Trinket M0, rather than via Bluetooth as in the full GesturePod implementation. The tutorial also guides you on how to train a new model for custom gestures.
+
+---
+
+Did you make this project? Share it with us! We welcome feed-back, comments,
+and suggestions - please let us know what you think at edgeml@microsoft.com.
+
+
+
+
+
+
+
diff --git a/Projects/WakeWord/index.md b/Projects/WakeWord/index.md
new file mode 100644
index 000000000..431c84abf
--- /dev/null
+++ b/Projects/WakeWord/index.md
@@ -0,0 +1,6 @@
+---
+layout: default
+title: Wake-Word: 'Hey Cortana!'
+---
+
+*Coming soon...!*
diff --git a/Projects/WakeWord/instructable.md b/Projects/WakeWord/instructable.md
new file mode 100755
index 000000000..735ef5854
--- /dev/null
+++ b/Projects/WakeWord/instructable.md
@@ -0,0 +1,6 @@
+---
+layout: default
+title: GesturePod
+---
+
+*Coming Soon*
diff --git a/Projects/index.md b/Projects/index.md
new file mode 100644
index 000000000..c4636c470
--- /dev/null
+++ b/Projects/index.md
@@ -0,0 +1,51 @@
+---
+layout: projects
+---
+
+
+
+ GesturePod is a plug-and-play, gesture recognition device that is
+ designed to be clamped onto any white-cane used by persons with Visually
+ Impairment. Once clamped onto the cane firmly, simple and natural
+ gestures performed on the cane can be used to interact with various
+ devices, for instance a mobile phone.
+
+ EdgeML enables small, fast and accurate classifiers based on LSTM and
+ ProtoNN for real-time keyword spotting on Raspberry Pi3 and Pi0. Our
+ latest set of works, (EMI-RNN and Shallow RNNs) makes keyword spotting
+ possible on even smaller devices; as small as a MXChip with a Cortex M4.
+
+
+
+
+
diff --git a/Publications.md b/Publications.md
new file mode 100755
index 000000000..0cec3521e
--- /dev/null
+++ b/Publications.md
@@ -0,0 +1,91 @@
+---
+layout: default
+title: Publications
+permalink: /Publications
+---
+
+• **GesturePod: Enabling On-device Gesture-based Interaction for White Cane Users**
+_Shishir G. Patil, Don Kurian Dennis, Chirag Pabbaraju, Nadeem Shaheer, Harsha Vardhan Simhadri, Vivek Seshadri, Manik Varma and Prateek Jain_
+_ACM User Interface Software and Technology Symposium (UIST), 2019_
+
+ [Bibtex]
+ [Code]
+ [Video]
+
+
+• **Compiling KB-Sized Machine Learning Models to Tiny IoT Devices**
+_Sridhar Gopinath, Nikhil Ghanathe, Vivek Seshadri and Rahul Sharma_
+_Programming Language Design and Implementation (PLDI), 2019_
+
+ [PDF]
+ [Bibtex]
+ [Code]
+ [Video]
+
+
+• **FastGRNN: A Fast, Accurate, Stable and Tiny Kilobyte Sized Gated Recurrent Neural Network**
+_Aditya Kusupati, Manish Singh, Kush Bhatia, Ashish Kumar, Prateek Jain and Manik Varma_
+_Neural Information Processing Systems (NeurIPS), 2018_
+
+ [PDF]
+ [Bibtex]
+ [Poster]
+ [Tensorflow example]
+ [PyTorch example]
+ [Video]
+ [Blog]
+
+
+• **Multiple Instance Learning for Efficient Sequential Data
+Classification for Resource Constrained Devices**
+_Don Kurian Dennis, Chirag Pabbaraju, Harsha Vardhan Simhadri and Prateek Jain_
+_Neural Information Processing Systems (NeurIPS), 2018_
+
+ [PDF]
+ [Bibtex]
+ [Poster]
+ [Tensorflow example]
+ [PyTorch example]
+ [Video]
+
+
+• **ProtoNN: Compressed and Accurate kNN for Resource-scarce Devices**
+_Chirag Gupta, Arun Sai Suggala, Ankit Goyal, Harsha Vardhan Simhadri, Bhargavi
+Paranjape, Ashish Kumar, Saurabh Goyal, Raghavendra Udupa, Manik Varma and Prateek
+Jain_
+_International Conference on Machine Learning (ICML), 2017_
+
+ [PDF]
+ [Bibtex]
+ [Poster]
+ [Cpp code]
+ [Tensorflow example]
+ [PyTorch example]
+ [Blog]
+
+
+• **Resource-efficient Machine Learning in 2 KB RAM for the Internet of Things**
+_Ashish Kumar, Saurabh Goyal and Manik Varma_
+_International Conference on Machine Learning (ICML), 2017_
+
+ [PDF]
+ [Bibtex]
+ [Poster]
+ [Cpp code]
+ [Tensorflow example]
+ [PyTorch example]
+ [Blog]
+
+
+
+• **Thresholding based Efficient Outlier Robust PCA.**
+_Yeshwanth Cherapanamjeri, Prateek Jain and Praneeth Netrapalli_
+_Conference on Learning Theory (COLT), 2017_
+
+ [PDF]
+ [Bibtex]
+ [Poster]
+ [Matlab ZIP]
+ [More Code]
+ [Slides]
+
diff --git a/README.md b/README.md
deleted file mode 100644
index 96723b4e9..000000000
--- a/README.md
+++ /dev/null
@@ -1,115 +0,0 @@
-## Edge Machine Learning
-
-This repository provides code for machine learning algorithms for edge devices developed at [Microsoft Research India](https://www.microsoft.com/en-us/research/project/resource-efficient-ml-for-the-edge-and-endpoint-iot-devices/).
-
-Machine learning models for edge devices need to have a small footprint in terms of storage, prediction latency and energy. One example of a ubiquitous real-world application where such models are desirable is resource-scarce devices and sensors in the Internet of Things (IoT) setting. Making real-time predictions locally on IoT devices without connecting to the cloud requires models that fit in a few kilobytes.
-
-This repository contains two such algorithms **Bonsai** and **ProtoNN** that shine in this setting. These algorithms can train models for classical supervised learning problems with memory requirements that are orders of magnitude lower than other modern ML algorithms. The trained models can be loaded onto edge devices such as IoT devices/sensors, and used to make fast and accurate predictions completely offline.
-
-For details, please see our [wiki page](https://github.com/Microsoft/EdgeML/wiki/) and our ICML'17 publications on [Bonsai](publications/Bonsai.pdf) and [ProtoNN](publications/ProtoNN.pdf) algorithms.
-
-Initial Code Contributors: [Chirag Gupta](https://aigen.github.io/), [Aditya Kusupati](https://adityakusupati.github.io/), [Ashish Kumar](https://ashishkumar1993.github.io/), and [Harsha Simhadri](http://harsha-simhadri.org).
-
-We welcome contributions, comments and criticism. For questions, please [email Harsha](mailto:harshasi@microsoft.com).
-
-[People](https://github.com/Microsoft/EdgeML/wiki/People/) who have contributed to this [project](https://www.microsoft.com/en-us/research/project/resource-efficient-ml-for-the-edge-and-endpoint-iot-devices/).
-
-### Requirements
-* Linux:
- * gcc version 5.4. Other gcc versions above 5.0 could also work.
- * We developed the code on Ubuntu 16.04LTS. Other linux versions could also work.
- * You can either use the Makefile in the root, or cmake via the build directory (see below).
-
-* Windows 10:
- * Visual Studio 2015. Use cmake (see below).
- * For Anniversary Update or later, one can use the Windows Subsystem for Linux, and the instructions for Linux build.
-
-* On both Linux and Windows 10, you need an implementation of BLAS, sparseBLAS and vector math calls.
- We link with the implementation provided by the [Intel(R) Math Kernel Library](https://software.intel.com/en-us/mkl).
- Please download later versions (2017v3+) of MKL as far as possible.
- The code can be made to work with other math libraries with a few modifications.
-
-### Building using Makefile
-
-After cloning this repository, set compiler and flags appropriately in `config.mk`. Then execute the following in bash:
-
-```bash
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH::
-make -Bj
-```
-Typically, MKL_PATH = /opt/intel/mkl/lib/intel64_lin/, and EDGEML_ROOT is '.'.
-
-This will build four executables _BonsaiTrain_, _BonsaiPredict_, _ProtoNNTrain_ and _ProtoNNPredict_ in .
-Sample data to try these executables is not included in this repository, but instructions to do so are given below.
-
-### Building using CMake
-
-For Linux, in the directory:
-
-```bash
-mkdir build
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:
-cd build
-cmake ..
-make -Bj
-```
-
-For Windows 10, in the directory, modify `CMakeLists.txt` file to change by changing the
-line
-```set(MKL_ROOT "")```
-
-Then, generate Visual Studio 2015 solution using:
-
-```mkdir build
-cd build
-cmake -G "Visual Studio 14 2015 Win64" -DCMAKE_BUILD_TYPE=Release ..
-```
-Finally, open `EdgeML.sln` in VS2015, build and run.
-
-For both Linux and Windows10, cmake builds will generate four executables _BonsaiTrain_, _BonsaiPredict_, _ProtoNNTrain_ and _ProtoNNPredict_ in .
-
-### Download a sample dataset
-Follow the bash commands given below to download a sample dataset, USPS10, to the root of the repository. Bonsai and ProtoNN come with sample scripts to run on the usps10 dataset. EDGEML_ROOT is defined in the previous section.
-
-```bash
-cd
-mkdir usps10
-cd usps10
-wget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2
-wget http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2
-bzip2 -d usps.bz2
-bzip2 -d usps.t.bz2
-mv usps train.txt
-mv usps.t test.txt
-mkdir ProtoNNResults
-cd
-```
-This will create a sample train and test dataset, on which
-you can train and test Bonsai and ProtoNN algorithms. As specified, we create an output folder for ProtoNN. Bonsai on the other hand creates its own output folder.
-For instructions to actually run the algorithms, see [Bonsai Readme](docs/README_BONSAI_OSS.md) and [ProtoNN Readme](docs/README_PROTONN_OSS.ipynb).
-
-### Makefile flags
-You could change the behavior of the code by setting these flags in `config.mk` and rebuilding with `make -Bj` when building with the default Makefile in . When building with CMake, change these flags in `CMakeLists.txt` in . All these flags can be set for both ProtoNN and Bonsai.
-The following are supported currently by both ProtoNN and Bonsai.
-
- SINGLE/DOUBLE: Single/Double precision floating-point. Single is most often sufficient. Double might help with reproducibility.
- ZERO_BASED_IO: Read datasets with 0-based labels and indices instead of the default 1-based.
- TIMER: Timer logs. Print running time of various calls.
- CONCISE: To be used with TIMER to limit the information printed to those deltas above a threshold.
-
-The following currently only change the behavior of ProtoNN, but one can write corresponding code for Bonsai.
-
- LOGGER: Debugging logs. Currently prints min, max and norm of matrices.
- LIGHT_LOGGER: Less verbose version of LOGGER. Can be used to track call flow.
- XML: Enable training with large sparse datasets with many labels. This is in beta.
- VERBOSE: Print additional informative output to stdout.
- DUMP: Dump models after each optimization iteration instead of just in the end.
- VERIFY: Legacy verification code for comparison with Matlab version.
-
-Additionally, there is one of two flags that has to be set in the Makefile:
-
- MKL_PAR_LDFLAGS: Linking with parallel version of MKL.
- MKL_SEQ_LDFLAGS: Linking with sequential version of MKL.
-
-### Microsoft Open Source Code of Conduct
-This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/ThirdPartyNotice.txt b/ThirdPartyNotice.txt
deleted file mode 100644
index d7fdb0d87..000000000
--- a/ThirdPartyNotice.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-This Microsoft Edge Machine Learning project is based on or incorporates materials and code from the projects listed below (collectively, “Third Party Code”).
-
-(c) The Eigen Project. Benoît Jacob (founder), Gaël Guennebaud (guru) and other contributors.
-Mozilla Public License, version 2.0. https://opensource.org/licenses/MPL-2.0
diff --git a/_Blog/Bonsai-Blog.md b/_Blog/Bonsai-Blog.md
new file mode 100644
index 000000000..9265cdaf3
--- /dev/null
+++ b/_Blog/Bonsai-Blog.md
@@ -0,0 +1,175 @@
+---
+layout: blogpost
+title: Bonsai: Strong, Shallow and Sparse Non-linear Tree Based Classifier
+postdate: 04 September, 2018
+author: Aditya Kusupati
+---
+
+### TL;DR
+Bonsai is a new tree model for supervised learning tasks such as binary and
+multi-class classification, regression, ranking, etc. Bonsai learns a single,
+shallow, sparse tree with powerful predictors at internal and leaf nodes. This
+allows Bonsai to achieve state-of-the-art prediction accuracies while making
+predictions efficiently in microseconds to milliseconds (depending on processor
+speed) using models that fit in a few KB of memory. Bonsai can be trained in
+the cloud or on your laptop, but can then make predictions locally on tiny
+resource-constrained devices without needing cloud connectivity.
+
+Bonsai has been deployed successfully on microcontrollers tinier than a grain
+of rice such as the ARM Cortex M0 with just 2 KB RAM. Bonsai can also make
+predictions accurately and efficiently on the tiniest of IoT boards such as the
+Arduino Pro Mini based on an 8 bit Atmel ATmega328P microcontroller operating
+at 8 MHz without any floating point support in hardware, with 2 KB RAM and 32
+KB read-only flash memory. Bonsai can also fit in the L1 cache of processors
+found in mobiles, tablets, laptops, and servers for low-latency applications.
+
+
+Bonsai can also be useful for switching to a smaller, cheaper and more
+energy-efficient form factor such as from a Raspberry Pi 3 to an Arduino Pro
+Mini. Finally, Bonsai also generalizes to other resource-constrained scenarios
+beyond the Internet of Things and can be used on laptops, servers and the cloud
+for low-latency applications and to bring down energy consumption and operating
+costs. [Bonsai end-to-end script](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/Bonsai).
+
+### Introduction, Motivation, and Ideas:
+##### Tree algorithms:
+Tree algorithms are general and can be used for classification, regression, ranking and other problems commonly found in the IoT setting. Even more importantly, they are ideally suited to IoT applications as they can achieve good prediction accuracies with prediction times and energies that are logarithmic in the number of training points. Unfortunately, they do not directly fit on tiny IoT devices as their space complexity is linear rather than logarithmic.
+In particular, learning shallow trees, or aggressively pruning deep trees or large ensembles, to fit in just a
+few KB often leads to poor prediction accuracy.
+
+Given the capabilities of Tree Algorithms in general, Bonsai targets three key points to make them
+work in IoT scenario.
+
+##### The main components:
+There are *3* vital ideas in Bonsai:
+
+- Bonsai learns a single, shallow, sparse tree so as to reduce model size but with powerful nodes for accurate prediction.
+- Both internal and leaf nodes in Bonsai together make non-linear predictions. They contribute to the final prediction similar to an ensemble.
+- Bonsai learns a sparse matrix which projects all data points into a low-dimensional space in which the tree is learned.
+
+##### Stronger nodes in the Tree:
+Both leaf and internal nodes of Bonsai use two *1-vs-all* classifiers combined in a non-linear fashion to make the nodes much
+powerful than the regular tree-based algorithms.
+
+Assume that $$\hat{\v x}$$ is the input for a given node. Each node has two matrices (*1-vs-all* classifiers) $$\v W \ \& \ \v V$$.
+The prediction ($$\v p$$) at each node is ($$\sigma$$ is a scalar and acts the sigmoid sharpness paramter for $$\tanh$$):
+
+$$
+\v p = \v W \hat{\v x}\odot \tanh(\sigma\v V \hat{\v x})
+$$
+
+
+One can find any optimal non-linear combination if deemed right for the task. Hence this is tunable.
+
+##### Contribution of internal and leaf nodes to prediction:
+Bonsai’s overall prediction for a point is the sum of the individual node predictions along the path traversed by the point. Path-based prediction allows Bonsai to accurately learn non-linear decision boundaries while sharing parameters along paths to further reduce model size.
+
+If $$\v p_{i}$$ is the prediction from $$i^{th}$$ node of Bonsai. *0* indexed at the root and the children follow the numbering of *2i+1* and *2i+2*. As Bonsai is a balanced binary tree, every path to the leaf node has the same no: of
+nodes ie., the depth of the tree.
+
+The final prediction of Bonsai comes from the aggregate from internal nodes and leaf node in the path taken by the point.
+
+Final prediction $$y(\v x)$$ is given by:
+
+
+$$I_i(\v x)$$ is the indicator function which states if the node lies in the path of the data point or not. $$k$$ is the total no: of nodes in the Bonsai tree.
+
+The indicator function is simulated by simple branching hyperplane at each internal node. Each internal node has branching hyperplane $$\v \theta_{i}$$.
+At each internal node the point passes through it chooses the next node/child based on the sign of the scalar ie., $$\v \theta_{i}^\top \hat{\v x}$$.
+Hence the path is determined by following the data point using the branching hyper-planes at each internal node.
+
+##### Sparse projection matrix to work in low-dimensional space:
+Bonsai learns a sparse matrix ($$\v Z$$) which projects all data points into a low-dimensional space in which the tree is learned.
+This allows Bonsai to fit in a few KB of flash. Furthermore, the sparse projection is implemented in a streaming
+fashion thereby allowing Bonsai to tackle IoT applications where even a single feature vector might not fit in 2 KB of RAM.
+
+So the $$\hat{\v x}$$ being discussed till now is actually generated using the sparse projection matrix $$\v Z$$. The projection dimension is
+generally very small when compared to the actual dimentionality of $$\v x$$, there by helping to learn paramter matrices in very low-dimensional space
+which inturn help in lower compute and model size.
+
+$$
+\hat{\v x} = \v Z \v x
+$$
+
+
+### Bonsai Training:
+Rather than learning the Bonsai tree node by node in a greedy fashion, all nodes are learned jointly, along with the sparse projection matrix, so as to optimally allocate memory budgets to each node while maximizing prediction accuracy. Even though the Bonsai in the current formulation is non-differentiable (non-backpropable)
+it can be made back propagation friendly or differentiable using an annealed Indicator function which starts as a soft indicator function and finally converges to the aforementioned hard indicator ie., $$\v \theta_{i}^\top \hat{\v x}$$. This can be modeled using
+
+
+
+For any node $$i$$ apart from the root node, the parent node is indexed by $$(\lceil{\frac{i}{2}}\rceil-1)$$. So this formulation depends on
+$$\sigma_{I}$$ value for the softness/hardness of the indicator/branching function. Essentially, this formulation suggests it is a
+probabilistic weight given to each node depending on the path the point takes. $$\sigma_{I}$$ is a scalar which is updated heuristically over training routine finally culminating at a very high positive value ensuring a hard indicator function. This is empirical and can be set anyway one wishes it to converge to hard branching function.
+
+After making Bonsai differentiable, we employ a *3* phase joint training routine for all the parameters to ensure better performance
+while maintaining the required budget constraints.
+
+##### Three phase training routine:
+The training routine has 3 phases:
+
+- Dense Training Phase
+- Sparse Training with IHT to find optimal support
+- Sparse Re-training on a fixed support
+
+In the first stage of the training, Bonsai is trained for one-third epochs with the model using non-convex optimizers.
+This stage of optimization ignores the sparsity constraints on the parameters and learns the dense parameter matrices.
+
+Bonsai is next trained for the next third of the epochs using a non-convex optimizer, projecting the parameters onto the space of sparse low-rank matrices after every few batches while maintaining support between two consecutive projection steps. This stage, using non-convex optimizers with Iterative Hard Thresholding (IHT), helps Bonsai identify the correct support for parameters $$(\v W_{i},\v V_{i})$$.
+
+Lastly, Bonsai is trained for the last third of the epochs with non-convex optimizer while freezing the support set of the parameters. Early stopping is often deployed in stages (II) and (III) to obtain the best models within budget constraints and this acts as a regularizer.
+
+This training routine was developed during the time of DSD-training and uses much higher sparsity constraints and shows that the routine can
+maintaining the performance while reducing the memory and compute footprint.
+
+##### Final Optimisation - Integer arithmetic
+Standard floating point operations are 4x more expensive than 1-byte integer operations on edge-devices without Floating point unit. This most of the times will result in slow inferences. In most Bonsai, the expensive step is the floating point operations and especially the exponentiation in the non-linearity (tanh). This can be circumvented using the piece-wise linear approximations for the non-linearities and using them in the models instead of the original ones during training and thereby during prediction.
+
+The approximate or quantized non-linearities are most of the times simple conditionals and integer operations and when trained as part of the model
+maintain the accuracies and the final model after byte-quantization will be tailor-made for all integer operations.
+
+### Results:
+
+One can have a look at the results in the the [ICML 2017](http://manikvarma.org/pubs/kumar17.pdf) publication as well as the [poster](https://github.com/Microsoft/EdgeML/wiki/files/BonsaiPoster.pdf) and [presentation](https://github.com/Microsoft/EdgeML/wiki/files/BonsaiResults.pptx) ([video](https://vimeo.com/237274524))
+
+##### Auxiliary observations:
+The join training of the projection matrix and the three-phase training are very vital and actually, make a significant difference if the projection matrix was replaced by PCA and the sparsification was done after the complete training. You can find a table on this as part of the paper.
+
+### Conclusions:
+The paper proposed an alternative IoT paradigm, centric
+to the device rather than the cloud, where ML models run
+on tiny IoT devices without necessarily connecting to the
+cloud thereby engendering local decision-making capabilities.
+The Bonsai tree learner was developed towards this
+end and demonstrated to be fast, accurate, compact and
+energy-efficient at prediction time. Bonsai was deployed
+on the Arduino Uno board as it could fit in a few KB of
+flash required only 70 bytes of writable memory for binary
+classification and 500 bytes for a 62 class problem,
+handled streaming features and made predictions in milliseconds
+taking only milliJoules of energy. Bonsai’s prediction
+accuracies could be as much as 30% higher as compared
+to state-of-the-art resource-efficient ML algorithms
+for a fixed model size and could even approach and outperform
+those of uncompressed models taking many MB of
+RAM. Bonsai achieved these gains by developing a novel
+model based on a single, shallow, sparse tree learned in a
+low-dimensional space. Predictions made by both internal
+and leaf nodes and the sharing of parameters along
+paths allowed Bonsai to learn complex non-linear decision
+boundaries using a compact representation.
+
+### Code and Usage:
+The code is public and is part of the [Microsoft EdgeML Repository](https://github.com/Microsoft/EdgeML). The Bonsai Graph can be found as part of `tf.edgeml.graph.bonsai` and can be used in a plug and play fashion in place of any classifier in tensorflow as `tf.edgeml.graph.bonsai.Bonsai`.
+Bonsai graph has multiple arguments for fine-tuning and appropriate selection of the hyperparameters for the given problem.
+The `tf.edgeml.trainer.bonsaiTrainer.BonsaiTrainer` takes in the created Bonsai graph object and run the 3-phase training routine to ensure optimal compression.
+Bonsai is packaged as a single end-point user script and can be found along with example on a public dataset as part of [Bonsai](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/Bonsai).
diff --git a/_Blog/FastGRNN-Blog-1.md b/_Blog/FastGRNN-Blog-1.md
new file mode 100644
index 000000000..5d753f25e
--- /dev/null
+++ b/_Blog/FastGRNN-Blog-1.md
@@ -0,0 +1,255 @@
+---
+layout: blogpost
+title: Fast(G)RNN - Fast, Accurate, Stable and Tiny (Gated) Recurrent Neural Network (Part I)
+postdate: 04 September, 2018
+author: Aditya Kusupati
+---
+
+### TL;DR
+*FastRNN* and *FastGRNN*, two RNN architectures (cells), together called
+*FastCells*, are developed to address the twin RNN limitations of inaccurate/unstable
+training and inefficient prediction.
+
+FastRNN provably stabilizes the RNN training which usually suffers from the infamous vanishing and exploding gradient
+problems.
+
+FastGRNN extends over and above FastRNN and learns low-rank, sparse and quantized weight matrices whilst having novel, elegant and expressive Gated RNN update equations. This allows FastGRNN to achieve state-of-the-art
+prediction accuracies while making predictions in microseconds to milliseconds
+(depending on processor speed) using models that fit in a few KB of memory.
+
+FastGRNN is up to **45x** smaller and faster (inference on edge-devices) than
+state-of-the-art RNN architectures (LSTM/GRU) whilst maintaining accuracies
+on various benchmark datasets and FastRNN has provably stable training and
+better performance when compared to the Unitary architectures which try to solve the same.
+
+Fast(G)RNN can be trained in the cloud or on your laptop, but can then make
+predictions locally on tiny resource-constrained devices without needing cloud connectivity.
+While the NIPS'18 publication talks only about 3-component compression in FastGRNN, the same pipeline can be seamlessly used for any RNN architecture and in our open source release, we extend the same to FastRNN. [FastCells end-to-end script](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/FastCells).
+
+
+### Introduction and Motivation
+*FastRNN* and *FastGRNN* architectures were particularly inspired to tackle and get rid of three major problems:
+
+- Costly and expert feature engineering techniques like FFT for ML classifiers in Time-series regime.
+- Unstable RNN Training due to vanishing and exploding gradient problem.
+- Expensive RNN models and compute footprint for inference on edge-devices.
+
+##### Solution to expensive feature engineering - Deep Learning? and Why RNNs?
+The major motivation comes from the fact that most of the feature engineering techniques
+involved in the Time-series classification are expensive like FFT, which is
+the bottleneck if the edge-device doesn't have DSP support, and also involve the investment of
+experts' time to understand and craft the ideal features for the task.
+
+Deep Learning has proved over last few years that one can incorporate featurization
+as part of the model cost and try to learn compact models which can be used on raw data
+while having the state-of-the-art accuracies.
+
+Recurrent Neural Networks (RNNs) are the compact Neural Network models that have
+capacity to harness temporal information in a time-series setting and make effective/accurate
+predictions.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$. Note that
+the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+Spending several weeks on finding the appropriate features for the [Gesture Pod] along
+with data visualization, revealed that multi-modal sensor data obtained from IoT devices
+can be modeled as time-series and hence RNNs might be the right way to circumvent the issues
+and painstaking feature engineering.
+
+##### Solution to Ineffective/Unstable RNN Training - FastRNN
+Simple RNNs are plagued with inaccurate/unstable training for longer time-series sequences
+and it has been the reason for the advent of complex yet more stable and expressive models like
+
+- *Gated Architectures*: LSTM, GRU, UGRNN etc.,
+- *Unitary Architectures*: Unitary RNN, Orthogonal RNN, Spectral RNN etc.,
+
+These architectures, however, have their own drawbacks and will be addressed as we go along.
+
+FastRNN stabilizes the entire RNN training using at most *2* scalars. FastRNN is not a novel
+architecture but a simple improvement over the existing Leaky Units along with a rigorous analysis
+which show the stability of training along with generalization and convergence bounds.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$.
+$$\alpha$$ and $$\beta$$ are trainable parameters and generally parameterized using sigmoid function to ensure that
+they lie between $$0 \ \& \ 1$$.
+Note that the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+
+
+
+
+##### Solution to expensive RNN models and inference on edge-devices - FastGRNN
+Even though FastRNN stabilizes the RNN training, the model's expressivity is limited and relies on constant attention (scalar gating) for the new information and the running memory/ context across time-steps and across the hidden-units.
+This led to the creation of a Gated architecture named FastGRNN, which while being as accurate as
+state-of-the-art RNN models (LSTM/GRU) but is 45x smaller and faster (on edge-devices).
+
+FastGRNN inherently consists of *3* components of compression over the base architecture:
+
+- Low-Rank parameterization of weight matrices $$\v W$$ and $$\v U$$ (**L**)
+- Sparse parameter matrices (**S**)
+- Byte Quantized weights in parameter matrices (**Q**)
+
+The base architecture without any compression is hereby referred to as *FastGRNN-LSQ* (read as minus LSQ).
+FastGRNN-LSQ is *4x* smaller and faster than LSTM for inference and has very small compute overhead when compared to Simple RNN.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$.
+$$\vec{\tilde{h}}_{t}$$ is the simple RNN update equation and $$\v z_t$$ is the gate equation.
+$$\zeta$$ and $$\nu$$ are trainable parameters and generally parameterized using sigmoid function to ensure that
+they lie between $$0 \ \& \ 1$$.
+Note that the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+
+
+
+Upon using the 3 compression components it culminates at FastGRNN which is 45x smaller and faster (on edge-device) than
+the state-of-the-art LSTM/GRU models whilst maintaining similar accuracies.
+
+### Mathematical arguments for RNN and FastRNN Training
+
+##### Why is RNN training unstable?
+Simple RNNs eventhough are theoretically powerful, their training faces great threats from
+vanishing and exploding gradient problem. Note that simple RNN is a special case of FastRNN when
+$$\alpha = 1 \ \& \ \beta = 0 $$
+Mathematically the gradients of the weight matrices
+of RNN with respect to the loss function $$L$$ for binary classification where $$\v v$$ is the final classifier and $$T$$ total sequence length:
+
+
+
+Even though $$\v D_{k+1}$$ is bounded, the $$M(\v U)$$ term is still a
+repeated multiplication of similar matrices. On looking at the eigenvalue decomposition followed by the repeated multiplication, one can observe that the singular values are exponentiated and this results in gradient vanishing in the smallest eigenvector direction while having gradient explode along the largest eigenvector direction. Also the term $$M(\v U)$$
+can be very ill-conditioned, which summarises the above two concerns. These two potentially lead to no change in train loss or result in NaN for the usual Non-Convex Optimizers.
+
+
+
+Classically, the gradient explosion problem is tackled in various ways and the most famous one being gradient clipping, but this doesn't address the vanishing gradient problem.
+Unitary architectures claim to solve both the problems by using re-parameterization of $$\v U$$ matrix.
+
+##### Do Unitary methods address this issue completely?
+Unitary architectures rely on Unitary parameterization of $$\v U$$ matrix so as to stabilize training.
+Unitary parameterization implies that during the entire training phase $$\v U$$ is forced to be
+Unitary either by re-parameterization (most of the time expensive) or applying transforms during the gradient update (very expensive).
+
+There are a couple of papers which rely on hard unitary constraints like
+Unitary RNN (Arjosky et al., ICML 2016), Full Capacity Unitary RNN (Wisdom et al., NIPS 2016) and (Mhammedi et al., ICML 2017).
+The hard unitary constraints restrict the solution space drastically and potentially miss out the right weight matrices. To tackle this, soft unitary constraints were enforced, to span a larger space like
+Factorized RNN (Vorontsov et al., ICML 2017), Spectral RNN (Zhang et al., ICML 2018) and Kronecker Recurrent Units (Jose et al., ICML 2018).
+
+The soft constraints improve the performance and training times, but their solution space is still restricted and needs very extensive grid search to find the appropriate hyper-parameters. Even with all of this, they still fall short of the state-of-the-art Gated RNNs like LSTM/GRU.
+Their re-parameterization forces them to have higher hidden-units to reach considerably good performance and thereby increasing the model sizes
+making them unfit for edge devices.
+
+As unitary matrices only focus on $$\v U$$, the fact that the coupled term $$(\v U^\top \v D_{k+1})$$ is the culprit is often overlooked.
+The coupled term actually prone to gradient vanishing when the non-linearity has gradients less than *1* (Ex: tanh and sigmoid).
+The stabilization solution provided by these methods is still flawed and still can suffer from vanishing gradient problem while effectively
+dodging the exploding gradient.
+
+##### How does FastRNN stabilize RNN training?
+As discussed earlier FastRNN is a very simple extension of Leaky Units, FastRNN has both $$\alpha \ \& \ \beta$$ trainable, which are already prevalent in the literature.
+But the mathematical analysis which at the end shows that at most *2* scalars are enough to stabilize RNN training is not extensively studied and perhaps FastRNN is the first one to do so. Note: Simple RNN is a special case of FastRNN where
+$$\alpha = 1 \ \& \ \beta = 0 $$.
+
+Let us look at the gradient and the $$M(\v U)$$ terms in case of FastRNN with respect to the loss function $$L$$ for binary classification where $$\v v$$ is the final classifier and $$T$$ total sequence length:
+
+
+
+Most of the times one can use the fact that $$\beta \approx 1 - \alpha$$ and use a single parameter to stabilize the entire
+training. Looking at the bounds on largest singular value (2-norm), smallest singular value and condition number of $$M(\v U)$$:
+
+
+
+All the above terms start to well-behave with the simple theoretical setting of $$\beta=1-\alpha$$ and $$\alpha=\frac{1}{T \max_k \|\v U^T\v D_{k+1}\|}$$.
+One can observe that this setting leads to:
+
+
+
+This shows that FastRNN can probably avoid both vanishing and exploding gradient problems while ensuring the $$M(\v U)$$ term is well conditioned. Empirically, FastRNN training ensures $$\alpha$$ behaves as predicted and is $$\alpha = \bigO{1/T}$$. FastRNN outperforms most of the Unitary methods across most of the benchmark datasets used
+across speech, NLP, activity and image classification as shown in the paper.
+
+##### Any guarantees for FastRNN in terms of Convergence and Generalization?
+It turns out that FastRNN has polynomial bounds in terms of length of the time-series sequence ($$T$$). Using the
+theoretical setting that $$\alpha = \bigO{1/T}$$ one can show that by using randomized stochastic gradient descent with no
+assumptions on the data distributions, FastRNN converges to a stationary point in upper bound of $$\Om{T^{6}}$$ iterations while the
+same analysis for simple RNN reveals an exponential upper bound.
+
+Coming to the generalization error, while assuming that the loss function is 1-Lipschitz, FastRNN has the scaling polynomial in $$T$$ ie., $$\bigO{\alpha T}$$
+whereas same analysis for simple RNN reveals an exponential dependency. Note that the upper bounds are being compared and comparing and proving of stronger lower bounds is an open problem that needs to be answered to analyze this further.
+
+### FastGRNN & Results
+These two are part of the Part II of the Fast(G)RNN Blog. Link to [Part II]({{ site.baseurl }}/Blog/fastgrnn-blog-2).
+Code: [FastCells](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/FastCells).
diff --git a/_Blog/FastGRNN-Blog-2.md b/_Blog/FastGRNN-Blog-2.md
new file mode 100644
index 000000000..7b9cf4218
--- /dev/null
+++ b/_Blog/FastGRNN-Blog-2.md
@@ -0,0 +1,156 @@
+---
+layout: blogpost
+title: Fast(G)RNN - Fast, Accurate, Stable and Tiny (Gated) Recurrent Neural Network (Part II)
+postdate: 04 September, 2018
+author: Aditya Kusupati
+---
+
+### FastGRNN
+
+##### Why move away from FastRNN to FastGRNN?
+Even after stable training, the expressiveness of FastRNN falls short of state-of-the-art gated architectures in performance.
+This is due to having the same scalar gate (dumb & constant gate) for all the hidden units over all the timesteps. The ideal scenario, like in LSTM/GRU is to have an input dependent and per hidden unit scalar gate for having more expressivity. FastGRNN is created incorporating all the things discussed along with a much lower memory and compute footprint (45x).
+
+##### Base architecture: FastGRNN-LSQ
+As mentioned in the earlier section the base architecture created is addressed as *FastGRNN-LSQ*. The choice of architecture is very intuitive and tries to reuse as much information as possible and have minimal memory and compute footprint.
+
+FastGRNN-LSQ essentially uses the self-information from the update equation and reuses it in the gate with a different bias vector and a non-linearity. This ensures the compute is shared for both the update and the gate equations. The final hidden state update ie., the linear combination of gated update equation and the previously hidden state, ensure that the architecture is expressive enough to match the
+performance of state-of-the-art gated architectures.
+
+The update equation $$\tilde {\v h}_{t}$$ and the gate equation $$\v z_{t}$$, therefore share the memory and compute.
+The final update equation $$\v h_{t}$$ can be interpreted as the gate acting as the forget gate for the previous
+hidden state and the affine transformed $$(1 - \v z_{t})$$ ie., $$(\zeta(1 - \v z_{t}) + \nu)$$ improves the model's
+capability and expressivity and helps it achieve the last few points of accuracy to beat or be on par with LSTM/GRU.
+
+The paper shows that FastGRNN-LSQ is either better or in the leagues of LSTM/GRU across benchmark datasets while being up to 4x smaller than LSTM/GRU.
+Given that we have a model which is powerful, small and elegant, can we make it very small and make it work on edge-devices?
+
+##### FastGRNN Compression.
+We use three components Low-Rank (L), Sparsity (S) and Quantization (Q) as part of our compression routine.
+
+Low-Rank parameterisation of weight matrices:
+
+
+$$\v W^1 \ \& \ \v W^2$$ are two low-rank matrices of the same rank used to re-parameterize $$\v W$$.
+$$\v U^1 \ \& \ \v U^2$$ are two low-rank matrices of the same rank used to re-parameterize $$\v U$$.
+
+Sparse weight matrices ($$s_w \ \& \ s_u$$ are the no:of non-zeros):
+
+
+Byte Quantization: The parameter matrices are trained with 4-byte floats and are finally quantized to 1-byte integers.
+This directly gives a 4x compression and if done right (using approximate piecewise non-linearities) will result in pure integer arithmetic on edge-devices without floating point unit. Note that one can use much more effective compression pipelines like Deep Compression over and above this to achieve further compression. For example, clustering of weights and generation of codebooks can result in up to 2-3x compression on FastGRNN.
+
+##### Training routine to induce compression
+The training routine has 3 phases:
+
+- Dense Training Phase
+- Sparse Training with IHT to find optimal support
+- Sparse Re-training on a fixed support
+
+In the first stage of the training, FastGRNN is trained for one-third epochs with the model using non-convex optimizers.
+This stage of optimization ignores the sparsity constraints on the parameters and learns a low-rank representation of the parameters.
+
+FastGRNN is next trained for the next third of the epochs using a non-convex optimizer, projecting the parameters onto the space of sparse low-rank matrices after every few batches while maintaining support between two consecutive projection steps. This stage, using non-convex optimizers with Iterative Hard Thresholding (IHT), helps FastGRNN identify the correct support for parameters $$(\vec{W}^i,\vec{U}^i)$$.
+
+Lastly, FastGRNN is trained for the last third of the epochs with non-convex optimizer while freezing the support set of the parameters. Early stopping is often deployed in stages (II) and (III) to obtain the best models within budget constraints and this acts as a regularizer.
+
+This training routine was developed during the time of DSD-training and uses much higher sparsity constraints and shows that the routine can
+maintaining the performance while reducing the memory and compute footprint.
+
+##### Final Optimisation - Integer arithmetic
+Standard floating point operations are 4x more expensive than 1-byte integer operations on edge-devices without Floating point unit. This most of the times will result in slow inferences. In most of the RNN architectures, the expensive step is floating point operations and especially the exponentiation in the non-linearities like tanh and sigmoid. This can be circumvented using the piece-wise linear approximations for the non-linearities and using them in the models instead of the original ones during training and thereby during prediction.
+
+The approximate or quantized non-linearities are most of the times simple conditionals and integer operations and when trained as part of the model
+maintain the accuracies and the final model after byte-quantization will be tailor-made for all integer operations.
+
+### Results
+
+##### Datasets
+
+
+
+
+**Google-12 & Google-30:** Google Speech Commands dataset contains 1 second long utterances of 30 short words (30 classes) sampled at 16KHz. Standard log Mel-filter-bank featurization with 32 filters over a window size of 25ms and stride of 10ms gave 99 timesteps of 32 filter responses for a 1-second audio clip. For the 12 class version, 10 classes used in Kaggle's [Tensorflow Speech Recognition challenge](https://www.kaggle.com/c/tensorflow-speech-recognition-challenge) were used and remaining two classes were noise and background sounds (taken randomly from remaining 20 short word utterances). Both the datasets were zero mean - unit variance normalized during training and prediction.
+
+**Wakeword-2:** Wakeword-2 consists of 1.63 second long utterances sampled at 16KHz. This dataset was featurized in the same way as the Google Speech Commands dataset and led to 162 timesteps of 32 filter responses. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**[HAR-2](https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones):** Human Activity Recognition (HAR) dataset was collected from an accelerometer and gyroscope on a Samsung Galaxy S3 smartphone. The features available on the repository were directly used for experiments. The 6 activities were merged to get the binarized version. The classes {Sitting, Laying, Walking_Upstairs} and {Standing, Walking, Walking_Downstairs} were merged to obtain the two classes. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**[DSA-19](https://archive.ics.uci.edu/ml/datasets/Daily+and+Sports+Activities):** This dataset is based on Daily and Sports Activity (DSA) detection from a resource-constrained IoT wearable device with 5 Xsens MTx sensors having accelerometers, gyroscopes and magnetometers on the torso and four limbs. The features available on the repository were used for experiments. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**Yelp-5:** Sentiment Classification dataset based on the [text reviews](https://www.yelp.com/dataset/challenge). The data consists of 500,000 train points and 500,000 test points from the first 1 million reviews. Each review was clipped or padded to be 300 words long. The vocabulary consisted of 20000 words and 128-dimensional word embeddings were jointly trained with the network.
+
+**Penn Treebank:** 300 length word sequences were used for word level language modeling task using Penn Treebank (PTB) corpus. The vocabulary consisted of 10,000 words and the size of trainable word embeddings was kept the same as the number of hidden units of architecture.
+
+**Pixel-MNIST-10:** Pixel-by-pixel version of the standard [MNIST-10 dataset](http://yann.lecun.com/exdb/mnist/). The dataset was zero mean - unit variance normalized during training and prediction.
+
+
+##### Accuracy and Model Size Comparision:
+Apart from the tables in the paper, have a look at the charts below to see that:
+
+- FastGRNN is at most 1.13% lower than the state-of-the-art but it can be up to 45x smaller.
+- FastGRNN-LSQ has almost similar performance as state-of-the-art with up to 4.5x smaller size.
+- FastRNN is better all the Unitary techniques in 6 of the 8 datasets.
+- Spectral RNN is the best unitary technique.
+- FastGRNN is up to 45x smaller than unitary techniques is always higher in performance.
+
+
+
+
+
+
+
+
+
+
+Penn Treebank Language Modelling with 1-layer RNN:
+
+The results suggest that FastGRNN, FastGRNN-LSQ, and FastRNN all have higher train perplexity scores while having good test perplexity scores. This might suggest that the proposed architectures are avoiding overfitting due to lesser parameters in general.
+
+
+
+
+##### Model Size vs Accuracy in 0-64KB:
+Both FastGRNN and FastGRNN-LSQ are always the best possible models in the regime and this resonates with edge devices due to their RAM and flash limitations.
+
+
+
+
+##### Analysis of each component of compression in FastGRNN:
+The effect of Low-rank is surprising as FastGRNN-SQ generally gains accuracy over
+FastGRNN-LSQ and thereby gives the required boost to nullify the loss of accuracy due to the other two components ie., sparsity and quantization. Sparsity and quantization result in a slight drop in accuracy and together account up to 1.5% drop in performance.
+
+
+
+
+##### The edge device deployment and inference times:
+The models were deployed on two popular IoT boards Arduino MKR1000 and Arduino Due.
+Unfortunately, the experiments on Arduino UNO were not possible as no other models except FastGRNN were small enough to be burnt onto the flash (32KB) and need more working RAM (2KB).
+
+FastGRNN-Q is the model without quantization and no integer arithmetic. Given the boards
+don't have Floating Point Unit, one can observe that FastGRNN (the model with all integer arithmetic and quantized weights)
+is 4x faster during prediction on the edge-device. FastGRNN was 25-45x faster than UGRNN
+(smallest state-of-the-art gated RNN) and 57-132x faster than Spectral RNN (best and one of the smaller Unitary RNN)
+
+
+
+
+
+### Conclusion
+This work studies the FastRNN algorithm for addressing the issues of inaccurate training and inefficient prediction in RNNs. FastRNN develops a peephole connection architecture with the addition of two extra scalar parameters to address this problem. It then builds on FastRNN to develop a novel gated architecture, FastGRNN, which reuses RNN matrices in the gating unit. Further compression in the model size of FastGRNN is achieved by allowing the parameter matrices to be low-rank, sparse and quantized. The performance of FastGRNN and FastRNN is benchmarked on several datasets and are shown to achieve state-of-the-art accuracies whilst having up to 45x smaller model as compared to leading gated RNN techniques.
+
+### Code and Usage:
+The code is public and is part of the [Microsoft EdgeML Repository](https://github.com/Microsoft/EdgeML). The new cells FastRNNCell and FastGRNNCell can be found as part of `tf.edgeml.graph.rnn` and can be used in a plug and play fashion in place of any inbuilt Tensorflow RNN Cell as `tf.edgeml.graph.rnn.FastRNNCell` and `tf.edgeml.graph.rnn.FastGRNNCell`.
+Both the cells have multiple arguments for fine-tuning and appropriate selection of the hyperparameters for the given problem.
+The `tf.edgeml.trainer.fastTrainer.FastTrainer` takes in the created cell object and run the 3-phase training routine to ensure optimal compression. Note that
+even though FastGRNN is the architecture that uses the 3-phase training, as it is independent of the architecture, the current code supports
+the trainer for both FastGRNN and FastRNN ensuring that even FastRNN can be compressed further if required.
+Both of these are packaged as a single end-point user script and can be found along with example on a public dataset as part of [FastCells](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/FastCells).
diff --git a/_config.yml b/_config.yml
new file mode 100644
index 000000000..084678e63
--- /dev/null
+++ b/_config.yml
@@ -0,0 +1,20 @@
+# Site settings
+title: EdgeML
+email:
+description: >
+ Project page for EdgeML
+# Comment baseurl out when editing locally
+baseurl: "https://microsoft.github.io/EdgeML"
+url: "https://microsoft.github.io/EdgeML"
+
+# Build settings
+markdown: kramdown
+port: 4000
+host: localhost
+
+collections:
+ Blog:
+ output: true
+ permalink: /:collection/:name
+
+exclude: [archive]
diff --git a/_includes/footer.html b/_includes/footer.html
new file mode 100644
index 000000000..3e734a021
--- /dev/null
+++ b/_includes/footer.html
@@ -0,0 +1,24 @@
+
+
diff --git a/_includes/head.html b/_includes/head.html
new file mode 100644
index 000000000..f36c7d15f
--- /dev/null
+++ b/_includes/head.html
@@ -0,0 +1,40 @@
+
+
+
+ {% if page.title %}{{ page.title | escape }}{% else %}{{ site.title | escape }}{% endif %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {% if page.custom_css %}
+ {% for stylesheet in page.custom_css %}
+
+ {% endfor %}
+ {% endif %}
+
+
+
+
diff --git a/_includes/header.html b/_includes/header.html
new file mode 100644
index 000000000..6ea498ce9
--- /dev/null
+++ b/_includes/header.html
@@ -0,0 +1,45 @@
+
+
+
+
diff --git a/_includes/macros.html b/_includes/macros.html
new file mode 100644
index 000000000..d5e773a6b
--- /dev/null
+++ b/_includes/macros.html
@@ -0,0 +1,37 @@
+
+{% raw %}
+$$
+
+% Not all commands are supported. Please refer to MathJax docs before including
+% your macros
+% http://docs.mathjax.org/en/latest/tex.html#defining-tex-macros
+%
+\newcommand{\ALG}{FastGRNN\xspace}
+\newcommand{\redSpace}{\vspace{-6mm}}
+\newcommand{\algs}{FastRNN\xspace}
+\newcommand{\alg}{FastGRNN\xspace}
+\newcommand{\salg}{FastRNN\xspace}
+\newcommand{\algfloat}{FastGRNN-LSQ\xspace}
+
+%!TEX root = paper.tex
+\newcommand{\reals}{\mathbb R}
+\newcommand{\ex}{\mathbb E}
+\newcommand{\prob}{\mathbb P}
+
+\renewcommand{\vec}[1]{{\mathbf{#1}}}
+\newcommand{\br}[1]{\left({#1}\right)}
+
+\DeclareMathOperator{\Tr}{Tr}
+
+% Asymptotic notation
+\newcommand{\bigO}[1]{{\cal O}\br{{#1}}}
+\newcommand{\softO}[1]{\widetilde{\cal O}\br{{#1}}}
+\newcommand{\Om}[1]{\Omega\br{{#1}}}
+\newcommand{\softOm}[1]{\tilde\Omega\br{{#1}}}
+
+\def\half{{\textstyle\frac{1}{2}}}
+\newcommand{\smallfrac}[2]{{\textstyle \frac{#1}{#2}}}
+\def\v#1{\vec #1}
+$$
+{% endraw %}
+
diff --git a/_layouts/blogpost.html b/_layouts/blogpost.html
new file mode 100644
index 000000000..e02461d8d
--- /dev/null
+++ b/_layouts/blogpost.html
@@ -0,0 +1,35 @@
+
+
+
+ {% include head.html %}
+
+
+ {% include header.html %}
+ {% include macros.html %}
+
+
+
+
+
+
+
+
{{ page.title }}
+ {{ page.author}}
+ •
+ {{ page.postdate}}
+
+
+
+
+
+
+
+
+
+
{{ content }}
+
+
+
+ {% include footer.html %}
+
+
diff --git a/_layouts/default.html b/_layouts/default.html
new file mode 100644
index 000000000..b2db4167c
--- /dev/null
+++ b/_layouts/default.html
@@ -0,0 +1,30 @@
+
+
+
+ {% include head.html %}
+
+
+ {% include header.html %}
+ {% include macros.html %}
+
+
+
+
{{ page.title }}
+
+
+
+
+
+
+
+
+
+
+ {{ content }}
+
+
+
+
+ {% include footer.html %}
+
+
diff --git a/_layouts/instructable.html b/_layouts/instructable.html
new file mode 100644
index 000000000..15b625bac
--- /dev/null
+++ b/_layouts/instructable.html
@@ -0,0 +1,42 @@
+
+
+
+ {% include head.html %}
+
+
+ {% include header.html %}
+ {% include macros.html %}
+
+
+
+
{{ page.title }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ content }}
+
+
+
+
+
+ {% include footer.html %}
+
diff --git a/_layouts/landingpage.html b/_layouts/landingpage.html
new file mode 100644
index 000000000..3bf2d12ae
--- /dev/null
+++ b/_layouts/landingpage.html
@@ -0,0 +1,11 @@
+
+
+
+ {% include head.html %}
+
+
+ {% include header.html %}
+ {{ content }}
+ {% include footer.html %}
+
+
diff --git a/_layouts/projects.html b/_layouts/projects.html
new file mode 100644
index 000000000..aaf5ff461
--- /dev/null
+++ b/_layouts/projects.html
@@ -0,0 +1,31 @@
+
+
+
+
+ {% include head.html %}
+
+
+ {% include header.html %}
+ {% include macros.html %}
+
+
+
+
Projects
+
+
+
+
+
+
+
+
+
+
+ {{ content }}
+
+
+
+
+
+ {% include footer.html %}
+
diff --git a/archive/FastGRNN-Blog.md b/archive/FastGRNN-Blog.md
new file mode 100644
index 000000000..9905e4156
--- /dev/null
+++ b/archive/FastGRNN-Blog.md
@@ -0,0 +1,402 @@
+---
+layout: blogpost
+title: Fast(G)RNN - Fast, Accurate, Stable and Tiny (Gated) Recurrent Neural Network
+postdate: 04 September, 2018
+author: Aditya Kusupati
+---
+
+### TL;DR
+*FastRNN* and *FastGRNN*, two RNN architectures (cells), together called
+*FastCells*, are developed to address the twin RNN limitations of inaccurate/unstable
+training and inefficient prediction.
+
+FastRNN provably stabilizes the RNN training which usually suffers from the infamous vanishing and exploding gradient
+problems.
+
+FastGRNN extends over and above FastRNN and learns low-rank, sparse and quantized weight matrices whilst having novel, elegant and expressive Gated RNN update equations. This allows FastGRNN to achieve state-of-the-art
+prediction accuracies while making predictions in microseconds to milliseconds
+(depending on processor speed) using models that fit in a few KB of memory.
+
+FastGRNN is up to **45x** smaller and faster (inference on edge-devices) than
+state-of-the-art RNN architectures (LSTM/GRU) whilst maintaining accuracies
+on various benchmark datasets and FastRNN has provably stable training and
+better performance when compared to the Unitary architectures which try to solve the same.
+
+Fast(G)RNN can be trained in the cloud or on your laptop, but can then make
+predictions locally on tiny resource-constrained devices without needing cloud connectivity.
+While the NIPS'18 publication talks only about 3-component compression in FastGRNN, the same pipeline can be seamlessly used for any RNN architecture and in our open source release, we extend the same to FastRNN. [FastCells end-to-end script](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/FastCells).
+
+
+### Introduction and Motivation
+*FastRNN* and *FastGRNN* architectures were particularly inspired to tackle and get rid of three major problems:
+
+- Costly and expert feature engineering techniques like FFT for ML classifiers in Time-series regime.
+- Unstable RNN Training due to vanishing and exploding gradient problem.
+- Expensive RNN models and compute footprint for inference on edge-devices.
+
+##### Solution to expensive feature engineering - Deep Learning? and Why RNNs?
+The major motivation comes from the fact that most of the feature engineering techniques
+involved in the Time-series classification are expensive like FFT, which is
+the bottleneck if the edge-device doesn't have DSP support, and also involve the investment of
+experts' time to understand and craft the ideal features for the task.
+
+Deep Learning has proved over last few years that one can incorporate featurization
+as part of the model cost and try to learn compact models which can be used on raw data
+while having the state-of-the-art accuracies.
+
+Recurrent Neural Networks (RNNs) are the compact Neural Network models that have
+capacity to harness temporal information in a time-series setting and make effective/accurate
+predictions.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$. Note that
+the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+Spending several weeks on finding the appropriate features for the [Gesture Pod] along
+with data visualization, revealed that multi-modal sensor data obtained from IoT devices
+can be modeled as time-series and hence RNNs might be the right way to circumvent the issues
+and painstaking feature engineering.
+
+##### Solution to Ineffective/Unstable RNN Training - FastRNN
+Simple RNNs are plagued with inaccurate/unstable training for longer time-series sequences
+and it has been the reason for the advent of complex yet more stable and expressive models like
+
+- *Gated Architectures*: LSTM, GRU, UGRNN etc.,
+- *Unitary Architectures*: Unitary RNN, Orthogonal RNN, Spectral RNN etc.,
+
+These architectures, however, have their own drawbacks and will be addressed as we go along.
+
+FastRNN stabilizes the entire RNN training using at most *2* scalars. FastRNN is not a novel
+architecture but a simple improvement over the existing Leaky Units along with a rigorous analysis
+which show the stability of training along with generalization and convergence bounds.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$.
+$$\alpha$$ and $$\beta$$ are trainable parameters and generally parameterized using sigmoid function to ensure that
+they lie between $$0 \ \& \ 1$$.
+Note that the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+
+
+
+
+##### Solution to expensive RNN models and inference on edge-devices - FastGRNN
+Even though FastRNN stabilizes the RNN training, the model's expressivity is limited and relies on constant attention (scalar gating) for the new information and the running memory/ context across time-steps and across the hidden-units.
+This led to the creation of a Gated architecture named FastGRNN, which while being as accurate as
+state-of-the-art RNN models (LSTM/GRU) but is 45x smaller and faster (on edge-devices).
+
+FastGRNN inherently consists of *3* components of compression over the base architecture:
+
+- Low-Rank parameterization of weight matrices $$\v W$$ and $$\v U$$ (**L**)
+- Sparse parameter matrices (**S**)
+- Byte Quantized weights in parameter matrices (**Q**)
+
+The base architecture without any compression is hereby referred to as *FastGRNN-LSQ* (read as minus LSQ).
+FastGRNN-LSQ is *4x* smaller and faster than LSTM for inference and has very small compute overhead when compared to Simple RNN.
+
+
+
+$$\v W$$ is the input-to-hidden state transition matrix, $$\v U$$ is the hidden-to-hidden state transition matrix.
+$$\v x_t$$ is the input at timestep $$t$$, $$\v h_{t}$$ is the hidden state at the end of timestep $$t$$.
+$$\vec{\tilde{h}}_{t}$$ is the simple RNN update equation and $$\v z_t$$ is the gate equation.
+$$\zeta$$ and $$\nu$$ are trainable parameters and generally parameterized using sigmoid function to ensure that
+they lie between $$0 \ \& \ 1$$.
+Note that the total number of timesteps is $$T$$ and the classification is done using a simple FC-layer on $$\v h_T$$.
+
+
+
+
+Upon using the 3 compression components it culminates at FastGRNN which is 45x smaller and faster (on edge-device) than
+the state-of-the-art LSTM/GRU models whilst maintaining similar accuracies.
+
+### Mathematical arguments for RNN and FastRNN Training
+
+##### Why is RNN training unstable?
+Simple RNNs eventhough are theoretically powerful, their training faces great threats from
+vanishing and exploding gradient problem. Note that simple RNN is a special case of FastRNN when
+$$\alpha = 1 \ \& \ \beta = 0 $$
+Mathematically the gradients of the weight matrices
+of RNN with respect to the loss function $$L$$ for binary classification where $$\v v$$ is the final classifier and $$T$$ total sequence length:
+
+
+
+Even though $$\v D_{k+1}$$ is bounded, the $$M(\v U)$$ term is still a
+repeated multiplication of similar matrices. On looking at the eigenvalue decomposition followed by the repeated multiplication, one can observe that the singular values are exponentiated and this results in gradient vanishing in the smallest eigenvector direction while having gradient explode along the largest eigenvector direction. Also the term $$M(\v U)$$
+can be very ill-conditioned, which summarises the above two concerns. These two potentially lead to no change in train loss or result in NaN for the usual Non-Convex Optimizers.
+
+
+
+Classically, the gradient explosion problem is tackled in various ways and the most famous one being gradient clipping, but this doesn't address the vanishing gradient problem.
+Unitary architectures claim to solve both the problems by using re-parameterization of $$\v U$$ matrix.
+
+##### Do Unitary methods address this issue completely?
+Unitary architectures rely on Unitary parameterization of $$\v U$$ matrix so as to stabilize training.
+Unitary parameterization implies that during the entire training phase $$\v U$$ is forced to be
+Unitary either by re-parameterization (most of the time expensive) or applying transforms during the gradient update (very expensive).
+
+There are a couple of papers which rely on hard unitary constraints like
+Unitary RNN (Arjosky et al., ICML 2016), Full Capacity Unitary RNN (Wisdom et al., NIPS 2016) and (Mhammedi et al., ICML 2017).
+The hard unitary constraints restrict the solution space drastically and potentially miss out the right weight matrices. To tackle this, soft unitary constraints were enforced, to span a larger space like
+Factorized RNN (Vorontsov et al., ICML 2017), Spectral RNN (Zhang et al., ICML 2018) and Kronecker Recurrent Units (Jose et al., ICML 2018).
+
+The soft constraints improve the performance and training times, but their solution space is still restricted and needs very extensive grid search to find the appropriate hyper-parameters. Even with all of this, they still fall short of the state-of-the-art Gated RNNs like LSTM/GRU.
+Their re-parameterization forces them to have higher hidden-units to reach considerably good performance and thereby increasing the model sizes
+making them unfit for Edge-devices.
+
+As unitary matrices only focus on $$\v U$$, the fact that the coupled term $$(\v U^\top \v D_{k+1})$$ is the culprit is often overlooked.
+The coupled term actually prone to gradient vanishing when the non-linearity has gradients less than *1* (Ex: tanh and sigmoid).
+The stabilization solution provided by these methods is still flawed and still can suffer from vanishing gradient problem while effectively
+dodging the exploding gradient.
+
+##### How does FastRNN stabilize RNN training?
+As discussed earlier FastRNN is a very simple extension of Leaky Units, FastRNN has both $$\alpha \ \& \ \beta$$ trainable, which are already prevalent in the literature.
+But the mathematical analysis which at the end shows that at most *2* scalars are enough to stabilize RNN training is not extensively studied and perhaps FastRNN is the first one to do so. Note: Simple RNN is a special case of FastRNN where
+$$\alpha = 1 \ \& \ \beta = 0 $$.
+
+Let us look at the gradient and the $$M(\v U)$$ terms in case of FastRNN with respect to the loss function $$L$$ for binary classification where $$\v v$$ is the final classifier and $$T$$ total sequence length:
+
+
+
+Most of the times one can use the fact that $$\beta \approx 1 - \alpha$$ and use a single parameter to stabilize the entire
+training. Looking at the bounds on largest singular value (2-norm), smallest singular value and condition number of $$M(\v U)$$:
+
+
+
+All the above terms start to well-behave with the simple theoretical setting of $$\beta=1-\alpha$$ and $$\alpha=\frac{1}{T \max_k \|\v U^T\v D_{k+1}\|}$$.
+One can observe that this setting leads to:
+
+
+
+This shows that FastRNN can probably avoid both vanishing and exploding gradient problems while ensuring the $$M(\v U)$$ term is well conditioned. Empirically, FastRNN training ensures $$\alpha$$ behaves as predicted and is $$\alpha = \bigO{1/T}$$. FastRNN outperforms most of the Unitary methods across most of the benchmark datasets used
+across speech, NLP, activity and image classification as shown in the paper.
+
+##### Any guarantees for FastRNN in terms of Convergence and Generalization?
+It turns out that FastRNN has polynomial bounds in terms of length of the time-series sequence ($$T$$). Using the
+theoretical setting that $$\alpha = \bigO{1/T}$$ one can show that by using randomized stochastic gradient descent with no
+assumptions on the data distributions, FastRNN converges to a stationary point in upper bound of $$\Om{T^{6}}$$ iterations while the
+same analysis for simple RNN reveals an exponential upper bound.
+
+Coming to the generalization error, while assuming that the loss function is 1-Lipschitz, FastRNN has the scaling polynomial in $$T$$ ie., $$\bigO{\alpha T}$$
+whereas same analysis for simple RNN reveals an exponential dependency. Note that the upper bounds are being compared and comparing and proving of stronger lower bounds is an open problem that needs to be answered to analyze this further.
+
+
+### FastGRNN
+
+##### Why move away from FastRNN to FastGRNN?
+Even after stable training, the expressiveness of FastRNN falls short of state-of-the-art gated architectures in performance.
+This is due to having the same scalar gate (dumb & constant gate) for all the hidden units over all the timesteps. The ideal scenario, like in LSTM/GRU is to have an input dependent and per hidden unit scalar gate for having more expressivity. FastGRNN is created incorporating all the things discussed along with a much lower memory and compute footprint (45x).
+
+##### Base architecture: FastGRNN-LSQ
+As mentioned in the earlier section the base architecture created is addressed as *FastGRNN-LSQ*. The choice of architecture is very intuitive and tries to reuse as much information as possible and have minimal memory and compute footprint.
+
+FastGRNN-LSQ essentially uses the self-information from the update equation and reuses it in the gate with a different bias vector and a non-linearity. This ensures the compute is shared for both the update and the gate equations. The final hidden state update ie., the linear combination of gated update equation and the previously hidden state, ensure that the architecture is expressive enough to match the
+performance of state-of-the-art gated architectures.
+
+The update equation $$\tilde {\v h}_{t}$$ and the gate equation $$\v z_{t}$$, therefore share the memory and compute.
+The final update equation $$\v h_{t}$$ can be interpreted as the gate acting as the forget gate for the previous
+hidden state and the affine transformed $$(1 - \v z_{t})$$ ie., $$(\zeta(1 - \v z_{t}) + \nu)$$ improves the model's
+capability and expressivity and helps it achieve the last few points of accuracy to beat or be on par with LSTM/GRU.
+
+The paper shows that FastGRNN-LSQ is either better or in the leagues of LSTM/GRU across benchmark datasets while being up to 4x smaller than LSTM/GRU.
+Given that we have a model which is powerful, small and elegant, can we make it very small and make it work on edge-devices?
+
+##### FastGRNN Compression.
+We use three components Low-Rank (L), Sparsity (S) and Quantization (Q) as part of our compression routine.
+
+Low-Rank parameterisation of weight matrices:
+
+
+$$\v W^1 \ \& \ \v W^2$$ are two low-rank matrices of the same rank used to re-parameterize $$\v W$$.
+$$\v U^1 \ \& \ \v U^2$$ are two low-rank matrices of the same rank used to re-parameterize $$\v U$$.
+
+Sparse weight matrices ($$s_w \ \& \ s_u$$ are the no:of non-zeros):
+
+
+Byte Quantization: The parameter matrices are trained with 4-byte floats and are finally quantized to 1-byte integers.
+This directly gives a 4x compression and if done right (using approximate piecewise non-linearities) will result in pure integer arithmetic on edge-devices without floating point unit. Note that one can use much more effective compression pipelines like Deep Compression over and above this to achieve further compression. For example, clustering of weights and generation of codebooks can result in up to 2-3x compression on FastGRNN.
+
+##### Training routine to induce compression
+The training routine has 3 phases:
+
+- Dense Training Phase
+- Sparse Training with IHT to find optimal support
+- Sparse Re-training on a fixed support
+
+In the first stage of the training, FastGRNN is trained for one-third epochs with the model using non-convex optimizers.
+This stage of optimization ignores the sparsity constraints on the parameters and learns a low-rank representation of the parameters.
+
+FastGRNN is next trained for the next third of the epochs using a non-convex optimizer, projecting the parameters onto the space of sparse low-rank matrices after every few batches while maintaining support between two consecutive projection steps. This stage, using non-convex optimizers with Iterative Hard Thresholding (IHT), helps FastGRNN identify the correct support for parameters $$(\vec{W}^i,\vec{U}^i)$$.
+
+Lastly, FastGRNN is trained for the last third of the epochs with non-convex optimizer while freezing the support set of the parameters. Early stopping is often deployed in stages (II) and (III) to obtain the best models within budget constraints and this acts as a regularizer.
+
+This training routine was developed during the time of DSD-training and uses much higher sparsity constraints and shows that the routine can
+maintaining the performance while reducing the memory and compute footprint.
+
+##### Final Optimisation - Integer arithmetic
+Standard floating point operations are 4x more expensive than 1-byte integer operations on edge-devices without Floating point unit. This most of the times will result in slow inferences. In most of the RNN architectures, the expensive step is floating point operations and especially the exponentiation in the non-linearities like tanh and sigmoid. This can be circumvented using the piece-wise linear approximations for the non-linearities and using them in the models instead of the original ones during training and thereby during prediction.
+
+The approximate or quantized non-linearities are most of the times simple conditionals and integer operations and when trained as part of the model
+maintain the accuracies and the final model after byte-quantization will be tailor-made for all integer operations.
+
+### Results
+
+##### Datasets
+
+
+
+
+**Google-12 & Google-30:** Google Speech Commands dataset contains 1 second long utterances of 30 short words (30 classes) sampled at 16KHz. Standard log Mel-filter-bank featurization with 32 filters over a window size of 25ms and stride of 10ms gave 99 timesteps of 32 filter responses for a 1-second audio clip. For the 12 class version, 10 classes used in Kaggle's [Tensorflow Speech Recognition challenge](https://www.kaggle.com/c/tensorflow-speech-recognition-challenge) were used and remaining two classes were noise and background sounds (taken randomly from remaining 20 short word utterances). Both the datasets were zero mean - unit variance normalized during training and prediction.
+
+**Wakeword-2:** Wakeword-2 consists of 1.63 second long utterances sampled at 16KHz. This dataset was featurized in the same way as the Google Speech Commands dataset and led to 162 timesteps of 32 filter responses. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**[HAR-2](https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones):** Human Activity Recognition (HAR) dataset was collected from an accelerometer and gyroscope on a Samsung Galaxy S3 smartphone. The features available on the repository were directly used for experiments. The 6 activities were merged to get the binarized version. The classes {Sitting, Laying, Walking_Upstairs} and {Standing, Walking, Walking_Downstairs} were merged to obtain the two classes. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**[DSA-19](https://archive.ics.uci.edu/ml/datasets/Daily+and+Sports+Activities):** This dataset is based on Daily and Sports Activity (DSA) detection from a resource-constrained IoT wearable device with 5 Xsens MTx sensors having accelerometers, gyroscopes and magnetometers on the torso and four limbs. The features available on the repository were used for experiments. The dataset was zero mean - unit variance normalized during training and prediction.
+
+**Yelp-5:** Sentiment Classification dataset based on the [text reviews](https://www.yelp.com/dataset/challenge). The data consists of 500,000 train points and 500,000 test points from the first 1 million reviews. Each review was clipped or padded to be 300 words long. The vocabulary consisted of 20000 words and 128-dimensional word embeddings were jointly trained with the network.
+
+**Penn Treebank:** 300 length word sequences were used for word level language modeling task using Penn Treebank (PTB) corpus. The vocabulary consisted of 10,000 words and the size of trainable word embeddings was kept the same as the number of hidden units of architecture.
+
+**Pixel-MNIST-10:** Pixel-by-pixel version of the standard [MNIST-10 dataset](http://yann.lecun.com/exdb/mnist/). The dataset was zero mean - unit variance normalized during training and prediction.
+
+
+##### Accuracy and Model Size Comparision:
+Apart from the tables in the paper, have a look at the charts below to see that:
+
+- FastGRNN is at most 1.13% lower than the state-of-the-art but it can be up to 45x smaller.
+- FastGRNN-LSQ has almost similar performance as state-of-the-art with up to 4.5x smaller size.
+- FastRNN is better all the Unitary techniques in 6 of the 8 datasets.
+- Spectral RNN is the best unitary technique.
+- FastGRNN is up to 45x smaller than unitary techniques is always higher in performance.
+
+
+
+
+
+
+
+
+
+
+Penn Treebank Language Modelling with 1-layer RNN:
+
+The results suggest that FastGRNN, FastGRNN-LSQ, and FastRNN all have higher train perplexity scores while having good test perplexity scores. This might suggest that the proposed architectures are avoiding overfitting due to lesser parameters in general.
+
+
+
+
+##### Model Size vs Accuracy in 0-64KB:
+Both FastGRNN and FastGRNN-LSQ are always the best possible models in the regime and this resonates with Edge-devices due to their RAM and flash limitations.
+
+
+
+
+##### Analysis of each component of compression in FastGRNN:
+The effect of Low-rank is surprising as FastGRNN-SQ generally gains accuracy over
+FastGRNN-LSQ and thereby gives the required boost to nullify the loss of accuracy due to the other two components ie., sparsity and quantization. Sparsity and quantization result in a slight drop in accuracy and together account up to 1.5% drop in performance.
+
+
+
+
+##### The Edge-device deployment and inference times:
+The models were deployed on two popular IoT boards Arduino MKR1000 and Arduino Due.
+Unfortunately, the experiments on Arduino UNO were not possible as no other models except FastGRNN were small enough to be burnt onto the flash (32KB) and need more working RAM (2KB).
+
+FastGRNN-Q is the model without quantization and no integer arithmetic. Given the boards
+don't have Floating Point Unit, one can observe that FastGRNN (the model with all integer arithmetic and quantized weights)
+is 4x faster during prediction on the edge-device. FastGRNN was 25-45x faster than UGRNN
+(smallest state-of-the-art gated RNN) and 57-132x faster than Spectral RNN (best and one of the smaller Unitary RNN)
+
+
+
+
+
+### Conclusion
+This work studies the FastRNN algorithm for addressing the issues of inaccurate training and inefficient prediction in RNNs. FastRNN develops a peephole connection architecture with the addition of two extra scalar parameters to address this problem. It then builds on FastRNN to develop a novel gated architecture, FastGRNN, which reuses RNN matrices in the gating unit. Further compression in the model size of FastGRNN is achieved by allowing the parameter matrices to be low-rank, sparse and quantized. The performance of FastGRNN and FastRNN is benchmarked on several datasets and are shown to achieve state-of-the-art accuracies whilst having up to 45x smaller model as compared to leading gated RNN techniques.
+
+### Code and Usage:
+The code is public and is part of the [Microsoft EdgeML Repository](https://github.com/Microsoft/EdgeML). The new cells FastRNNCell and FastGRNNCell can be found as part of `tf.edgeml.graph.rnn` and can be used in a plug and play fashion in place of any inbuilt Tensorflow RNN Cell as `tf.edgeml.graph.rnn.FastRNNCell` and `tf.edgeml.graph.rnn.FastGRNNCell`.
+Both the cells have multiple arguments for fine-tuning and appropriate selection of the hyperparameters for the given problem.
+The `tf.edgeml.trainer.fastTrainer.FastTrainer` takes in the created cell object and run the 3-phase training routine to ensure optimal compression. Note that
+even though FastGRNN is the architecture that uses the 3-phase training, as it is independent of the architecture, the current code supports
+the trainer for both FastGRNN and FastRNN ensuring that even FastRNN can be compressed further if required.
+Both of these are packaged as a single end-point user script and can be found along with example on a public dataset as part of [FastCells](https://github.com/Microsoft/EdgeML/tree/master/tf/examples/FastCells).
diff --git a/citation.txt b/citation.txt
new file mode 100644
index 000000000..dc4307dc1
--- /dev/null
+++ b/citation.txt
@@ -0,0 +1,7 @@
+@software{edgeml01,
+ author = {{Dennis, Don and Gopinath, Sridhar and Gupta, Chirag and Kumar,
+ Ashish and Kusupati, Aditya and Patil, Shishir and Simhadhri, Harsha Vardhan}},
+ title = {{EdgeML: Machine Learning for resource-constrained edge devices}},
+ url = {https://github.com/Microsoft/EdgeML},
+ version = {0.1},
+}
diff --git a/config.mk b/config.mk
deleted file mode 100644
index 69a0c6e4b..000000000
--- a/config.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT license.
-
-DEBUGGING_FLAGS = #-DLIGHT_LOGGER #-DLOGGER #-DTIMER -DCONCISE #-DSTDERR_ONSCREEN #-DLIGHT_LOGGER -DVERBOSE #-DDUMP #-DVERIFY
-CONFIG_FLAGS = -DSINGLE #-DXML -DZERO_BASED_IO
-
-MKL_EIGEN_FLAGS = -DEIGEN_USE_BLAS -DMKL_ILP64
-
-LDFLAGS= -lm -ldl
-
-MKL_ROOT=/opt/intel/mkl
-
-MKL_COMMON_LDFLAGS=-L $(MKL_ROOT)/lib/intel64 -Wl,--no-as-needed -lmkl_intel_ilp64 -lmkl_core
-MKL_SEQ_LDFLAGS = $(MKL_COMMON_LDFLAGS) -lmkl_sequential
-MKL_PAR_LDFLAGS = $(MKL_COMMON_LDFLAGS) -lmkl_gnu_thread -lgomp -lpthread
-MKL_PAR_STATIC_LDFLAGS = -Wl,--start-group /opt/intel/mkl/lib/intel64/libmkl_intel_ilp64.a /opt/intel/mkl/lib/intel64/libmkl_gnu_thread.a /opt/intel/mkl/lib/intel64/libmkl_core.a -Wl,--end-group -lgomp -lpthread -lm -ldl
-
-CILK_LDFLAGS = -lcilkrts
-CILK_FLAGS = -fcilkplus -DCILK
-
-CC=g++-5
-
-CFLAGS= -p -g -fPIC -O3 -std=c++11 -DLINUX $(DEBUGGING_FLAGS) $(CONFIG_FLAGS) $(MKL_EIGEN_FLAGS) $(CILK_FLAGS)
diff --git a/css/landing.css b/css/landing.css
new file mode 100644
index 000000000..d65d609f4
--- /dev/null
+++ b/css/landing.css
@@ -0,0 +1,235 @@
+---
+Title: CSS
+---
+/* Color pallet
+ * ------------
+ * http://www.colorzilla.com/chrome/
+ * To use:
+ color: var(--ms-red);
+ */
+
+/* Commong-CSS */
+.landing-subheading {
+ font-size: 2em;
+ font-weight: 500;
+}
+
+.landing-subsubheading{
+ font-size: 1.2em;
+ font-weight: 500;
+}
+
+.top-buffer-1 {
+ padding-top: 1%;
+}
+
+.top-buffer-3 {
+ padding-top: 3%;
+}
+
+.top-buffer-5{
+ padding-top: 5%;
+}
+
+.top-buffer-7{
+ padding-top: 7%;
+}
+
+.bottom-buffer-1 {
+ padding-bottom: 1%;
+}
+
+.bottom-buffer-3 {
+ padding-bottom: 3%;
+}
+
+.ms-btn-blue{
+ color: white;
+ background-color: var(--ms-blue);
+ border-color: var(--ms-blue);
+}
+
+.ms-btn-white{
+ color: var(--ms-blue);
+ background-color: white;
+ border-color: var(--ms-grey);
+}
+
+.ms-btn-white:hover {
+ background-color: var(--ms-green);
+}
+
+
+/* Slide 1 */
+header {
+ position: relative;
+ background-color: var(--ms-blue);
+ height: 65vh;
+ min-height: 25rem;
+ width: 100%;
+ overflow: hidden;
+}
+
+
+/*header video {*/
+ /*position: absolute;*/
+ /*top: 50%;*/
+ /*left: 50%;*/
+ /*min-width: 100%;*/
+ /*min-height: 100%;*/
+ /*width: auto;*/
+ /*height: auto;*/
+ /*z-index: 0;*/
+ /*-ms-transform: translateX(-50%) translateY(-50%);*/
+ /*-moz-transform: translateX(-50%) translateY(-50%);*/
+ /*-webkit-transform: translateX(-50%) translateY(-50%);*/
+ /*transform: translateX(-50%) translateY(-50%);*/
+/*}*/
+
+header .container {
+ position: relative;
+ z-index: 2;
+}
+
+/*header .overlay {*/
+ /*position: absolute;*/
+ /*top: 0;*/
+ /*left: 0;*/
+ /*height: 100%;*/
+ /*width: 100%;*/
+ /*background-color: black;*/
+ /*opacity: 0.6;*/
+ /*z-index: 1;*/
+/*}*/
+
+/*@media (pointer: coarse) and (hover: none) {*/
+ /*header {*/
+ /*background: url('/img/ICane/only_cane_first_page.jgp') black no-repeat center center scroll;*/
+ /*}*/
+ /*header video {*/
+ /*display: none;*/
+ /*}*/
+/*}*/
+
+
+#landing-heading {
+ font-size: 6em;
+ font-weight: 400;
+}
+
+
+#landing-caption{
+ font-size: 1.52em;
+ font-weight: 300;
+}
+
+/* Slide description */
+.landing-slide-description{
+ position:relative;
+ min-height: 30vh;
+ font-size: 1.2em;
+ color: var(--ms-grey);
+ padding-top: 3%;
+ padding-bottom: 2%;
+}
+
+/* Slide features */
+.landing-slide-features {
+ background-color: #ffffff;
+}
+.landing-img-features {
+ max-width: 55%;
+}
+
+.landing-subheading-features {
+ font-weight: 500;
+
+}
+
+/* Slide connected-world */
+.landing-connected-world {
+ /* The image used */
+ background-image: url('{{ site.baseurl }}/img/connected_world.jpg');
+ height: 40%;
+
+ /* Center and scale the image nicely */
+ background-position: center;
+ background-repeat: no-repeat;
+ background-size: cover;
+}
+
+/* Slide applications */
+.landing-slide-applications {
+ background-color: white;
+ color: var(--ms-grey);
+}
+
+.landing-img-applications{
+ width: 50%;
+}
+
+.landing-bottom-applications{
+ position: absolute;
+ bottom: 0;
+ margin-right: 0;
+}
+
+/* Slide 4 Algorithms */
+.landing-slide-algorithms {
+ background-color: var(--ms-blue);
+ color: white;
+ font-size: 1em;
+}
+
+.landing-slide-algorithms-card {
+ background-color: white;
+ color: var(--ms-blue);
+}
+
+.landing-img-algorithms{
+ width: 55%;
+}
+
+/* Slide 5 */
+.landing-slide-usecases {
+ padding-top: 5%;
+ padding-bottom: 5%;
+ background-color: #fffcf3;
+}
+
+.landing-img-usecases{
+ width: 55%;
+}
+
+/* Slide 6 Tools */
+.landing-slide-tools{
+ background-color: var(--ms-blue);
+ color: white;
+ font-size: 1em;
+}
+
+.landing-slide-tools-card {
+ background-color: white;
+ color: var(--ms-blue);
+}
+
+.landing-img-tools{
+ width: 65%;
+}
+
+/* Slide people */
+.landing-slide-people{
+ background-color: var(--ms-blue);
+ color: white;
+}
+
+.landing-slide-people a {color: white; text-decoration: underline;}
+.landing-slide-people a:hover {color: var(--ms-yellow); text-decoration: underline;}
+.landing-slide-people a:visited {color: var(--ms-yellow); text-decoration: underline;}
+
+/* Slide citation */
+.landing-slide-citation {
+ position:relative;
+ color: var(--ms-grey);
+}
+
diff --git a/css/style.css b/css/style.css
new file mode 100644
index 000000000..abae75818
--- /dev/null
+++ b/css/style.css
@@ -0,0 +1,229 @@
+/* Pallet */
+:root {
+ --ms-red: #f05522;
+ --ms-green: #7bbb42;
+ --ms-yellow: #fdba12;
+ --ms-blue: #07639a;
+ --ms-blue-original: #389bd6;
+ --ms-grey: #737373;
+ }
+
+a {
+ color: var(--ms-blue);
+}
+
+a:hover {
+ color: var(--ms-green);
+}
+
+/*a:visited {*/
+ /*color: var(--ms-yellow);*/
+/*}*/
+
+body{
+ font-size:15px;
+ overflow-y: scroll;
+}
+
+body, html {
+ height: 100%;
+}
+
+.top-buffer5 {
+ margin-top: 5%;
+}
+.top-buffer10 {
+ margin-top: 10%;
+}
+
+.navbar-custom {
+ -webkit-box-shadow: 0 4px 6px -6px #999;
+ -moz-box-shadow: 0 4px 6px -6px #999;
+ box-shadow: 0 4px 6px -6px #999;
+}
+
+.navbar-brand-custom {
+ font-weight: 650;
+ font-size: 1.4em;
+}
+
+.navbar-border-green {
+ border-right: 1px solid var(--ms-green);
+}
+
+.navbar-border-yellow {
+ border-right: 1px solid var(--ms-yellow);
+}
+
+.navbar-border-blue {
+ border-right: 1px solid var(--ms-blue);
+}
+
+.navbar-border-red {
+ border-right: 1px solid var(--ms-red);
+}
+
+.navbar-border-grey {
+ border-right: 1px solid var(--ms-grey);
+}
+
+.navbar-item-custom {
+ margin-right: 5px;
+ font-weight: 500;
+ margin-left: 5px;
+ color: var(--ms-grey);
+}
+
+
+.text-grey {
+ color: var(--ms-grey);
+}
+
+.text-green {
+ color: var(--ms-green);
+}
+
+.text-red {
+ color: var(--ms-red);
+}
+
+.text-yellow {
+ color: var(--ms-yellow);
+}
+
+.text-blue {
+ color: var(--ms-blue);
+}
+
+.hr-grey{
+ color: var(--ms-grey);
+}
+
+.hr-blue{
+ border-color: var(--ms-blue);
+}
+
+#hr-post {
+ margin-top: 0;
+ margin-bottom: 32px;
+}
+
+.custom-mathjax-hidden {
+ display: hidden;
+ display: none;
+}
+
+.blog-card {
+ padding-top: 20px;
+ padding-bottom: 20px;
+}
+
+.blog-border-green {
+ border-left: 1px solid var(--ms-green);
+}
+
+.blog-border-yellow {
+ border-left: 1px solid var(--ms-yellow);
+}
+
+.blog-border-blue {
+ border-left: 1px solid var(--ms-blue);
+}
+
+.blog-border-red {
+ border-left: 1px solid var(--ms-red);
+}
+
+.blog-border-grey {
+ border-left: 1px solid var(--ms-grey);
+}
+
+.blog-card-title {
+ font-size: 1.4em;
+}
+
+.blog-card-subtitle {
+ color: var(--ms-grey);
+ font-weight: 500;
+}
+
+.blog-card-subtitle-author {
+ font-style: italic;
+}
+
+.blog-card-subtitle-date {
+}
+
+.blog-card-subtitle-middle-dot {
+ font-size: 1.2em;
+ color: var(--ms-grey);
+}
+
+.people-page-img img {
+ width: 150px;
+ height: 150px;
+}
+
+.project-title {
+ font-size: 1.5em;
+}
+
+.project-title>a{
+ color: #312525;
+ text-decoration: underline;
+}
+
+.project-abstract {
+}
+
+.project-img {
+ max-width: 65%;
+ max-width: 200px;
+}
+
+.project-links {
+ color: #5f5f5f;
+}
+
+.project-links>a{
+ color: #5f5f5f;
+}
+
+.project-tag{
+ padding-left:2px;
+ padding-right:2px;
+ -webkit-box-shadow: 0px 0px 1px 0px rgba(0,0,0,0.75);
+ -moz-box-shadow: 0px 0px 1px 0px rgba(0,0,0,0.75);
+ box-shadow: 0px 0px 1px 0px rgba(0,0,0,0.75);
+}
+
+.project-highlight {
+ color: #0e6f0e;
+ font-weight: bold;
+}
+
+.project-institute {
+}
+
+.project-collaborators {
+}
+
+.project-collab-row {
+ margin-bottom: 5px;
+ font-style: italic;
+ color: #5f5f5f;
+}
+
+.project-text-row {
+ margin-bottom: 5px;
+}
+
+.footer-container{
+ background-color: #13405d;
+ color: white;
+ font-size: 0.9em;
+}
+
+.footer-container a {color: var(--ms-green); text-decoration: underline;}
+.footer-container a:hover {color: var(--ms-yellow); text-decoration: underline;}
+.footer-container a:visited {color: var(--ms-yellow); text-decoration: underline;}
diff --git a/docs/README_BONSAI_OSS.md b/docs/README_BONSAI_OSS.md
deleted file mode 100644
index b2c66f8ff..000000000
--- a/docs/README_BONSAI_OSS.md
+++ /dev/null
@@ -1,124 +0,0 @@
-# Bonsai
-
-[Bonsai](publications/Bonsai.pdf) is a novel tree based algorithm for efficient prediction on IoT devices – such as those based on the Arduino Uno board having an 8 bit ATmega328P microcontroller operating at 16 MHz with no native floating point support, 2 KB RAM and 32 KB read-only flash.
-
-Bonsai maintains prediction accuracy while minimizing model size and prediction costs by:
-
- (a) Developing a tree model which learns a single, shallow, sparse tree with powerful nodes
- (b) Sparsely projecting all data into a low-dimensional space in which the tree is learnt
- (c) Jointly learning all tree and projection parameters
-
-Experimental results on multiple benchmark datasets demonstrate that Bonsai can make predictions in milliseconds even on slow microcontrollers, can fit in KB of memory, has lower battery consumption than all other algorithms while achieving prediction accuracies that can be as much as 30% higher than state-of-the-art methods for resource-efficient machine learning. Bonsai is also shown to generalize to other resource constrained settings beyond IoT by generating significantly better search results as compared to Bing’s L3 ranker when the model size is restricted to 300 bytes.
-
-## Algorithm
-
-Bonsai learns a balanced tree of user specified height `h`.
-
-The parameters that need to be learnt include:
-
- (a) Z: the sparse projection matrix;
- (b) θ = [θ1,...,θ2h−1]: the parameters of the branching function at each internal node
- (c) W = [W1,...,W2h+1−1] and V = [V1,...,V2h+1−1]:the predictor parameters at each node
-
-We formulate a joint optimization problem to train all the parameters using the following three phase training routine:
-
- (a) Unconstrained Gradient Descent: Train all the parameters without having any Budget Constraint
- (b) Iterative Hard Thresholding (IHT): Applies IHT constantly while training
- (c) Training with constant support: After the IHT phase the support(budget) for the parameters is fixed and are trained
-
-We use simple Batch Gradient Descent as the solver with Armijo rule as the step size selector.
-
-## Prediction
-
-When given an input feature vector X, Bonsai gives the prediction as follows :
-
- (a) We project the data onto a low dimensional space by computing x^ = Zx
- (b) The final bonsai prediction score is the non linear scores (wx^ * tanh(sigma*vx^) ) predicted by each of the individual nodes along the path traversed by the Bonsai tree
-
-## Usage
-
-BonsaiTrain
-
- ./BonsaiTrain [Options] DataFolder
- Options:
-
- -F : [Required] Number of features in the data.
- -C : [Required] Number of Classification Classes/Labels.
- -nT : [Required] Number of training examples.
- -nE : [Required] Number of examples in test file.
- -f : [Optional] Input format. Takes two values [0 and 1]. 0 is for libsvm_format(default), 1 is for tab/space separated input.
-
- -P : [Optional] Projection Dimension. (Default: 10 Try: [5, 20, 30, 50])
- -D : [Optional] Depth of the Bonsai tree. (Default: 3 Try: [2, 4, 5])
- -S : [Optional] sigma = parameter for sigmoid sharpness (Default: 1.0 Try: [3.0, 0.05, 0.005] ).
-
- -lW : [Optional] lW = regularizer for predictor parameter W (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- -lT : [Optional] lTheta = regularizer for branching parameter Theta (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- -lV : [Optional] lV = regularizer for predictor parameter V (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- -lZ : [Optional] lZ = regularizer for projection parameter Z (Default: 0.00001 Try: [0.001, 0.0001, 0.000001]).
-
- Use Sparsity Params to vary your model size for a given tree depth and projection dimension
- -sW : [Optional] lambdaW = sparsity for predictor parameter W (Default: For Binaay 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- -sT : [Optional] lambdaTheta = sparsity for branching parameter Theta (Default: For Binary 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- -sV : [Optional] lambdaV = sparsity for predictor parameters V (Default: For Binary 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- -sZ : [Optional] lambdaZ = sparsity for projection parameters Z (Default: 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
-
- -I : [Optional] [Default: 42 Try: [100, 30, 60]] Number of passes through the dataset.
- -B : [Optional] Batch Factor [Default: 1 Try: [2.5, 10, 100]] Float Factor to multiply with sqrt(ntrain) to make the batch_size = min(max(100, B*sqrt(nT)), nT).
- DataFolder : [Required] Path to folder containing data with filenames being 'train.txt' and 'test.txt' in the folder."
-
- Note - Both libsvm_format and Space/Tab separated format can be either Zero or One Indexed in labels. To use Zero Index enable ZERO_BASED_IO flag in config.mk and recompile Bonsai
-
-BonsaiPredict:
-
- ./BonsaiPredict [Options]
-
- Options:
- -f : [Required] Input format. Takes two values [0 and 1]. 0 is for libsvmFormat(default), 1 is for tab/space separated input.
- -N : [Required] Number of data points in the test data.
- -D : [Required] Directory of data with test.txt present in it.
- -M : [Required] Directory of the Model (loadableModel and loadableMeanVar).
-
-## Data Format
-
- (a) "train.txt" is train data file with label followed by features, "test.txt" is test data file with label followed by features
- (b) They can be either in libsvm_format or a simple tab/space separated format
- (c) Try to shuffle the "train.txt" file before feeding it in. Ensure that all instances of a single class are not together
-
-## Running on USPS-10
-
-Following the instructions in the [common readme](../README.md) will give you a binaries for BonsaiTrain and BonsaiPredict along with a folder called usps10 with train and test datasets.
-
-For running Training separately followed by prediction
-```bash
-sh run_BonsaiTrain_usps10.sh
-
-The script prints the path of the model-dir
-
-use "ln -s current_model" to set a soft alias(shortcut) if you wish to run on that model or you choose as per your wish so as to use it in BonsaiPredict on usps10
-
-sh run_BonsaiPredict_usps10.sh
-```
-This should give you output as described in the next section. Test accuracy will be about 94.07% with the specified parameters.
-
-## Output
-
-The DataFolder will have a new forlder named "BonsaiResults" with the following files in it:
-
- (a) A directory for each run with the signature hrs_min_sec_day_month with the following in it:
- (1) loadableModel - Char file which can be directly loaded using the inbuilt load model functions
- (2) loadableMeanVar - Char file which can be directly loaded using inbuilt load mean-var functions
- (3) predClassAndScore - File with Prediction Score and Predicted Class for each Data point in the test set
- (4) runInfo - File with the hyperparameters for that run of Bonsai along with Test Accuracy and Total NonZeros in the model
- (5) timerLog - Created on using the `TIMER` flag. This file stores proc time and wall time taken to execute various function calls in the code. Indicates the degree of parallelization and is useful for identifying bottlenecks to optimize the code. On specifying the `CONCISE` flag, timing information will only be printed if running time is higher than a threshold specified in `src/common/timer.cpp`
- (6) Params - A directory with readable files with Z, W, V, Theta, Mean and Variance
- (b) A file resultDump which has consolidated results and map to the respective run directory
-
-## Notes
- (a) You can load an pretrained model and continue further training on it using one of the BonsaiTrainer constructors. Please look into BonsaiTrainer.cpp for details
- (b) You can load the pretrained model and the test data by setting -nT 0 and then export model and construct predictor for the same. Look into BonsaiTrainer.cpp for details
- (c) As of now, there is no support to Multi Label Classification, Ranking and Regression in Bonsai
- (d) Model Size = 8*totalNonZeros Bytes. 4 Bytes to store index and 4 Bytes to store value to store a sparse model
- (e) We do not provide support for Cross-Validation, support exists only for Train-Test. The user can write a bash wrapper to perform Cross-Validation.
- (f) Currently, Bonsai is being compiled with MKL_SEQ_LDFLAGS, one can change to MKL_PAR_FLAGS if interested
-
diff --git a/docs/README_BONSAI_TLC.md b/docs/README_BONSAI_TLC.md
deleted file mode 100644
index 0e6a4273a..000000000
--- a/docs/README_BONSAI_TLC.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# Bonsai
-
-Bonsai ([paper](http://proceedings.mlr.press/v70/kumar17a/kumar17a.pdf))
-is a novel tree based algorithm for for efficient prediction on IoT devices –
-such as those based on the Arduino Uno board having an 8 bit ATmega328P microcontroller operating
-at 16 MHz with no native floating point support, 2 KB RAM and 32 KB read-only flash.
-
- Bonsai maintains prediction accuracy while minimizing model size and prediction costs by:
- (a) developing a tree model which learns a single, shallow, sparse tree with powerful nodes;
- (b) sparsely projecting all data into a low-dimensional space in which the tree is learnt;
- (c) jointly learning all tree and projection parameters.
-
-Experimental results on multiple benchmark datasets demonstrate that Bonsai can make predictions in milliseconds even on slow microcontrollers,
-can fit in KB of memory, has lower battery consumption than all other algorithms while achieving prediction accuracies that can be as much as
-30% higher than state-of-the-art methods for resource-efficient machine learning.
-
-Bonsai is also shown to generalize to other resource constrained settings beyond IoT
-by generating significantly better search results as compared to Bing’s L3 ranker when the model size is restricted to 300 bytes.
-
-## Algorithm
-
-Bonsai learns a balanced tree of user specified height `h`.
-
- The parameters that need to be learnt include:
- (a) Z: the sparse projection matrix;
- (b) θ = [θ1,...,θ2h−1]: the parameters of the branching function at each internal node;
- (c) W = [W1,...,W2h+1−1] and V = [V1,...,V2h+1−1]:the predictor parameters at each node
-
-We formulate a joint optimization problem to train all the parameters using a training routine which is as follows.
-
- It has 3 Phases:
- (a) Unconstrained Gradient Descent: Train all the parameters without having any Budget Constraint
- (b) Iterative Hard Thresholding (IHT): Applies IHT constantly while training
- (c) Training with constant support: After the IHT phase the support(budget) for the parameters is fixed and are trained
-We use simple Batch Gradient Descent as the solver with Armijo rule as the step size selector.
-
-## Prediction
-
-When given an input fearure vector X, Bonsai gives the prediction as follows :
-
- (a) We project the data onto a low dimensional space by computing x^ = Zx.
- (b) The final bonsai prediction score is the sum of the non linear scores ( wx^ * tanh(sigma*vx^) ) predicted by each of the individual nodes along the path traversed by the Bonsai tree.
-
-
-## Parameters and HyperParameters
-
- pd : Projection Dimension. (Default: 10 Try: [5, 20, 30, 50])
- td : Depth of the Bonsai tree. (Default: 3 Try: [2, 4, 5])
- s : sigma = parameter for sigmoid sharpness (Default: 1.0 Try: [3.0, 0.05, 0.005] ).
-
- rw : lambda_W = regularizer for classifier parameter W (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- rTheta : lambda_Theta = regularizer for kernel parameter Theta (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- rv : lambda_V = regularizer for kernel parameters V (Default: 0.0001 Try: [0.01, 0.001, 0.00001]).
- rz : lambda_Z = regularizer for kernel parameters Z (Default: 0.00001 Try: [0.001, 0.0001, 0.000001]).
-
- Use Sparsity Params to vary your model Size
- sw : sparsity_W = sparsity for classifier parameter W (Default: For Binaray 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- sTheta : sparsity_Theta = sparsity for kernel parameter Theta (Default: For Binaray 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- sv : sparsity_V = sparsity for kernel parameters V (Default: For Binaray 1.0 else 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
- sz : sparsity_Z = sparsity for kernel parameters Z (Default: 0.2 Try: [0.1, 0.3, 0.4, 0.5]).
-
- iter : [Default: 40 Try: [100, 30, 60]] Number of passes through the dataset.
diff --git a/docs/README_PROTONN_OSS.ipynb b/docs/README_PROTONN_OSS.ipynb
deleted file mode 100644
index 07150e173..000000000
--- a/docs/README_PROTONN_OSS.ipynb
+++ /dev/null
@@ -1,226 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# ProtoNN: Compressed and accurate KNN for resource-constrained devices ([paper](publications/ProtoNN.pdf))\n",
- "ProtoNN is an algorithm developed for binary, multiclass and multilabel supervised learning. ProtoNN models are time and memory efficient and are thus ideal for resource-constrained scenarios like Internet of Things (IoT). \n",
- "\n",
- "## Overview of algorithm\n",
- "Suppose a single data-point has **dimension** $D$. Suppose also that the total number of **classes** is $L$. For the most basic version of ProtoNN, there are 2 more user-defined hyper-parameters: the **projection dimension** $d$ and the **number of prototypes** $m$. \n",
- "\n",
- "- ProtoNN learns 3 parameter matrices:\n",
- " - A **projection matrix** $W$ of dimension $(d,\\space D)$ that projects the datapoints to a small dimension $d$.\n",
- " - A **prototypes matrix** $B$ that learns $m$ prototypes in the projected space, each $d$-dimensional. $B = [B_1,\\space B_2, ... \\space B_m]$.\n",
- " - A **prototype labels matrix** $Z$ that learns $m$ label vectors for each of the prototypes to allow a single prototype to represent multiple labels. Each prototype label is $L$-dimensional. $Z = [Z_1,\\space Z_2, ... \\space Z_m]$.\n",
- "\n",
- "- By default, these matrices are dense. However, for high model-size compression, we need to learn sparse versions of the above matrices. The user can restrict the **sparsity of these matrices using the parameters**: $\\lambda_W$, $\\lambda_B$ and $\\lambda_Z$.\n",
- " - $||W||_0 < \\lambda_W \\cdot size(W) = \\lambda_W \\cdot d \\cdot D$\n",
- " - $||B||_0 < \\lambda_B \\cdot size(B) = \\lambda_B \\cdot d \\cdot m$\n",
- " - $||Z||_0 < \\lambda_Z \\cdot size(Z) = \\lambda_Z \\cdot L \\cdot m$ \n",
- "\n",
- "- ProtoNN also assumes an **RBF-kernel parametrized by a single parameter:** $\\gamma$, which can be inferred heuristically from data, or be specified by the user.\n",
- "\n",
- "More details about the ProtoNN prediction function, the training algorithm, and pointers on how to tune hyper-parameters are suspended to the end of this Readme for better readability. \n",
- "\n",
- "\n",
- "## Training ProtoNN\n",
- "Follow the instructions on the main Readme to compile and create an executable `ProtoNN` \n",
- "##### A sample execution with 10-class USPS\n",
- "After creating the executable, we download a sample dataset: **USPS10**. Instructions for this can be found on the main README. To execute ProtoNN on this dataset, run the following script in bash:\n",
- "```bash\n",
- "sh run_ProtoNN_usps10.sh\n",
- "```\n",
- "This should give you output on screen as described in the output section. The final test accuracy will be about 93.4 with the specified parameters. \n",
- "\n",
- "##### Format of data files\n",
- "Data files can exist in one of the following two formats: \n",
- "- **Tab-separated (tsv)**: This is only supported for multiclass and binary datasets, not multilabel ones. The file should have $N$ rows and $D+1$ columns, where $N$ is the number of data-points and $D$ is the dimensionality of each point. Columns should be separated by _tabs_. The first column contains the label, which must be a natural number between $1$ and $L$. The rest of the $D$ columns contain the data which are real numbers.\n",
- "- **Libsvm format**: See https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/. The labels should be between $1$ and $L$, and the indices should be between $1$ and $D$. The sample **USPS-10** dataset uses this format. \n",
- "\n",
- "The following flag in `config.mk` changes this behavior: \n",
- " \n",
- " ZERO_BASED_IO: The expected label range becomes 0 ... (L-1), and the expected feature range becomes 0 ... (D-1). \n",
- "The number of lines in train and validation/test data files, the dimension of the data, and the number of labels will _not_ be inferred automatically. They must be specified as described below. \n",
- "\n",
- "##### Specifying parameters and executing\n",
- "To specify hyper-parameters for ProtoNN as well as metadata such as the location of the dataset, input format, etc., one has to write a bash script akin to the sample script at `run_ProtoNN_usps10.sh`. \n",
- "\n",
- "Once ProtoNN is compiled, we execute it via this script: \n",
- "```bash\n",
- "sh run_ProtoNN_usps10.sh\n",
- "```\n",
- "This bash script is a config file as well as an execution script. There are a number of hyper-parameters to specify, so we split them into categories as described below. Consult `run_ProtoNN_usps10.sh` for a sample. The value in the bracket indicates the command line flag used to set the given hyperparameter. \n",
- "\n",
- "##### Input-output parameters\n",
- "- **-C**: Problem format. Specify one of:\n",
- " - 0 (binary)\n",
- " - 1 (multiclass)\n",
- " - 2 (multilabel)\n",
- "- **-I**: File that contains training data. \n",
- "- **-V**: [Optional] File that contains validation/test data. \n",
- "- **-O**: Folder to store output (see output section below). \n",
- "- **-F**: Input format for data (described above):\n",
- " - 0 (libsvm format)\n",
- " - 1 (tab-separated format)\n",
- "- **-P**: Option to load a predefined model. [**Default:** 0]. Specify as 1 if pre-loading initial values of matrices $W$, $B$, $Z$. One can use this option to initialize with the output of a previous run, or with SLEEC, LMNN, etc. All three matrices, should be present in a single directory in tsv format. The directory is specified with the `-M` flag (see next). The values of the parameters $d$, $D$, $L$ will _not_ be inferred, and must be specified correctly in the rest of the fields. The filenames and dimensions of the matrices should be as follows: \n",
- " - $W$: Filename: \"W\". Dimension: ($d$, $D$). \n",
- " - $B$: Filename: \"B\". Dimension: ($d$, $m$). \n",
- " - $Z$: Filename: \"Z\". Dimension: ($L$, $m$).\n",
- " - $\\gamma$: Filename: \"gamma\". A single number representing the RBF kernel parameter.\n",
- "- **-M**: Folder that contains the predefined model files.\n",
- "\t\n",
- "\n",
- "##### Data-dependent parameters\n",
- "- **-r**: Number of training points.\n",
- "- **-v**: Number of validation/test points.\n",
- "- **-D**: The original dimension of the data.\n",
- "- **-l**: Number of classes.\n",
- "\n",
- "##### ProtoNN hyper-parameters (required)\n",
- "- **-d**: Projection dimension (the dimension into which the data is projected). [**Default:** $15$]\n",
- "- **Specify only one of the -m and the -k flags:**\n",
- " - **-m**: Number of Prototypes. On specifying this parameter, the initialization of prototypes is done by clustering the training data in projected space. [**Default:** $20$]\n",
- " - **-k**: Number of Prototypes Per Class. On specifying this parameter, initialization of prototypes is done by performing k-means clustering separately for each class, to identify $k$ different prototypes for each class. Thus, $m$ is automatically set to $L\\cdot k$.\n",
- "\n",
- "##### ProtoNN hyper-parameters (optional)\n",
- "- **-W**: Projection sparsity ($\\lambda_W$). [**Default:** $1.0$]\n",
- "- **-B**: Prototype sparsity. [**Default:** $1.0$]\n",
- "- **-Z**: Label Sparsity. [**Default:** $1.0$]\n",
- "- **-g**: GammaNumerator.\n",
- " - On setting GammaNumerator, the RBF kernel parameter $\\gamma$ is set as;\n",
- " - $\\gamma = (2.5 \\cdot GammaNumerator)/(median(||B_j,W - X_i||_2^2))$\n",
- " - **Default:** $1.0$\n",
- "- **-N**: Normalization. Specify one of: \n",
- " - **Default**: 0 (no normalization)\n",
- " - 1 (min-max normalization wherein each feature is linearly scaled to lie with 0 and 1)\n",
- " - 2 (l2-normalization wherein each data-point is normalized to unit l2-norm)\n",
- "- **-R**: A random number seed which can be used to re-generate previously obtained experimental results. [**Default:** $42$]\n",
- "\n",
- "##### ProtoNN optimization hyper-parameters (optional)\n",
- "- **-b**: Batch size for mini-batch stochastic gradient descent. [**Default:** $1024$]\n",
- "- **-T**: Total number of optimization iterations. [**Default:** $20$]\n",
- "- **-E**: Number of epochs (complete see-through's) of the data for each iteration, and each parameter. [**Default:** $20$] \n",
- "\n",
- "##### Final Execution\n",
- "The script in this section combines all the specified hyper-parameters to create an execution command. This command is printed to stdout, and then executed.\n",
- "Most users should copy this section directly to all their ProtoNN execution scripts without change. We provide a single option here that is commented out by default: \n",
- "- **gdb --args**: Run ProtoNN with given hyper-parameters in debug mode using gdb. \n",
- "\n",
- "## Testing a trained model\n",
- "Once a ProtoNN model has been trained, one can test it on a new dataset. \n",
- "##### A sample execution with 10-class USPS\n",
- "The model trained using the sample script mentioned before can be tested with the following script: \n",
- "```bash\n",
- "sh run_ProtoNNPredict_usps10.sh\n",
- "```\n",
- "\n",
- "##### Explanation of parameters: \n",
- "- **-I**: File that contains test data. \n",
- "- **-M**: The model file in non-human readable format that is output on running ProtoNN. \n",
- "- **-n**: Normalization files if data was normalized when ProtoNN was trained. \n",
- "- **-O**: Folder to store output (see output section below). \n",
- "- **-F**: Input format for data (same as described in training section).\n",
- "- **-e**: Number of test points.\n",
- "- **-b**: [Optional] If unspecified, testing happens for each data-point separately (to simulate a real-world scenario). For faster prediction when prototyping, use the parameter to specify a batch on which prediction happens in one go. \n",
- "\t\n",
- "\n",
- "\n",
- "## Disclaimers\n",
- "- The training data is not automatically shuffled in the code. If possible, **pre-shuffle** the data before passing to ProtoNN. For instance, all examples of a single class should not occur consecutively.\n",
- "- **Normalization**: Ideally, the user should provide **standardized** (Mean-Variance normalized) data. If this is not possible, use one of the normalization options that we provide. The code may be unstable in the absence of normalization.\n",
- "- The results on various datasets as reported in the ProtoNN paper were using **Gradient Descent** as the optimization algorithm, whereas this repository uses **Stochastic Gradient Descent**. It is possible that the results don't match exactly. We will publish an update to this repository with Gradient Descent implemented. \n",
- "- We do not provide support for **Cross-Validation**, only **Train-Test** style runs. The user can write a bash wrapper to perform Cross-Validation. \n",
- "\n",
- "## Interpreting the output\n",
- "##### Output of ProtoNNTrainer:\n",
-
- "- The following information is printed to **std::cout**: \n",
- " - The chosen value of $\\gamma$.\n",
- " - **Training, testing accuracy, and training objective value**, thrice for each iteration, once after optimizing each parameter. For multilabel problems, **prec@1** is output instead. \n",
- " - On enabling the `VERBOSE` flag in `config.mk`, additional informative output is printed to stdout. \n",
- "\n",
- "- **Errors and warnings** are printed to **std::cerr**. \n",
- "\n",
- "- Additional **parameter dumps**, **timer logs** and other **debugging logs** will be placed in the output folder specified with the `-O` flag above. The user should have read-write permissions on the folder. \n",
- " - On execution, a folder will be created in the output directory that will indicate to the user the list of parameters with which the run was instantiated. In this folder, upto **7 files** and **2 folders** will be created depending on which flags are set in `config.mk`: \n",
- " - **runInfo**: This file contains the hyperparameters and meta-information for the respective instantiation of ProtoNN. It also shows you the exact bash script call that was made, which is helpful for reproducing results purposes. Additionally, the training, testing accuracy and objective value at the end of each iteration is printed in a readable format. **This file is created at the end of the ProtoNN optimization.**\n",
- " - **W, B, Z**: These files contain the learnt parameter matrices $W$, $B$ and $Z$ in human-readable tsv format. The dimensions of storage are $(d, D)$, $(d, m)$ and $(L, m)$ respectively. **These files are created at the end of the ProtoNN optimization.**\n",
- " - **gamma**: This file contains a single number, the chosen value of $\\gamma$, the RBF kernel parameter. **This file is created at the end of the ProtoNN optimization.**\n",
- " - **model**: This file contains the final trained model with all the parameters and hyperparameters in a non-human readable format. This is to facilitate the prediction code. **This file is created at the end of the ProtoNN optimization.**\n",
- " - **diagnosticLog**: Created on using the `LOGGER` or `LIGHT_LOGGER` flags. This file stores logging information such as the call flow of ProtoNN and the min, max, norms of various matrices. This is mainly for debugging/optimization purposes and requires a more detailed understanding of the code to interpret. It may contain useful information if your code did not run as expected. **The diagnosticLog file is populated synchronously while the ProtoNN optimization is executing.** \n",
- " - **timerLog**: Created on using the `TIMER` flag. This file stores proc time and wall time taken to execute various function calls in the code. Indicates the degree of parallelization and is useful for identifying bottlenecks to optimize the code. On specifying the `CONCISE` flag, timing information will only be printed if running time is higher than a threshold specified in `src/common/timer.cpp`. **The timerLog file is populated synchronously while the ProtoNN optimization is executing.** \n",
- " - **dump**: A folder that is created on using the `DUMP` flag. The parameter matrices are outputted after each iteration in this folder. **This folder is populated synchronously while the ProtoNN optimization is executing.**\n",
- " - **verify**: A folder that is created on using the `VERIFY` flag. Code for backward verification with legacy Matlab code. **This folder is populated synchronously while the ProtoNN optimization is executing.**\n",
- "\n",
- "The files **W, B, Z**, and **gamma** can be used to continue training of ProtoNN by initializing with these previously learned matrices. Use the **-P** option for this (see above). On doing so, the starting train/test accuracies should match the final accuracy as specified in the runInfo file. \n",
- "\n",
- "##### Output of ProtoNNPredictor:\n",
- "On execution, the test accuracy, or precision@1,3,5 will be output to stdout. Additionally, a folder will be created in the output directory that will indicate to the user the list of parameters with which the model model to be tested was trained. In this folder, there will be one file detailedPrediction. This file contains for each test point the true labels of that point as well as the scores of the top 5 predicted labels. \n",
-
- "\n",
- "## Choosing hyperparameters\n",
- "##### Model size as a function of hyperparameters\n",
- "The user presented with a model-size budget has to make a decision regarding the following 5 hyper-parameters: \n",
- "- The projection dimension $d$\n",
- "- The number of prototypes $m$\n",
- "- The 3 sparsity parameters: $\\lambda_W$, $\\lambda_B$, $\\lambda_Z$\n",
- " \n",
- "Each parameter requires the following number of non-zero values for storage:\n",
- "- $S_W: min(1, 2\\lambda_W) \\cdot d \\cdot D$\n",
- "- $S_B: min(1, 2\\lambda_B) \\cdot d \\cdot m$\n",
- "- $S_Z: min(1, 2\\lambda_Z) \\cdot L \\cdot m$\n",
- "\n",
- "The factor of 2 is for storing the index of a sparse matrix, apart from the value at that index. Clearly, if a matrix is more than 50% dense ($\\lambda > 0.5$), it is better to store the matrix as dense instead of incurring the overhead of storing indices along with the values. Hence the minimum operator. \n",
- "Suppose each value is a single-precision floating point (4 bytes), then the total space required by ProtoNN is $4\\cdot(S_W + S_B + S_Z)$. This value is computed and output to screen on running ProtoNN. \n",
- "\n",
- "##### Pointers on choosing hyperparameters\n",
- "Choosing the right hyperparameters may seem to be a daunting task in the beginning but becomes much easier with a little bit of thought. To get an idea of default parameters on some sample datasets, see the ([paper](publications/ProtoNN.pdf)). Few rules of thumb:\n",
- "- $S_B$ is typically small, and hence $\\lambda_B \\approx 1.0$. \n",
- "- One can set $m$ to $min(10\\cdot L, 0.01\\cdot numTrainingPoints)$, and $d$ to $15$ for an initial experiment. Typically, you want to cross-validate for $m$ and $d$. \n",
- "- Depending on $L$ and $D$, $S_W$ or $S_Z$ is the biggest contributors to model size. $\\lambda_W$ and $\\lambda_Z$ can be adjusted accordingly or cross-validated for. \n",
- "\n",
- "## Additional ProtoNN flags \n",
- "#### [Beta] Alternative optimization routine\n",
- "One can use the `BTLS` flag in `src/ProtoNN/Makefile` (variable PROTONN_FLAGS) to enable optimization with [Back Tracking Line Search]. This is a faster and more stable optimization route. \n",
- "\n",
- "#### [Beta] ProtoNN for Extreme Multilabel Learning (XML)\n",
- "[XML](http://manikvarma.org/downloads/XC/XMLRepository.html) refers to a difficult class of multi-label learning problems where the number of labels is large (ranging from a few thousands to a few millions). ProtoNN has been written to be compatible with these datasets. This mode can be enabled by the `XML` flag. \n",
- "\n",
- "## Formal details\n",
- "##### Prediction function\n",
- "ProtoNN predicts on a new test-point in the following manner. For a test-point $X$, ProtoNN computes the following $L$ dimensional score vector:\n",
- "$Y_{score}=\\sum_{j=0}^{m}\\space \\left(RBF_\\gamma(W\\cdot X,B_j)\\cdot Z_j\\right)$, where\n",
- "$RBF_\\gamma (U, V) = exp\\left[-\\gamma^2||U - V||_2^2\\right]$\n",
- "The prediction label is then $\\space max(Y_{score})$. \n",
- "\n",
- "##### Training \n",
- "While training, we are presented with training examples $X_1, X_2, ... X_n$ along with their label vectors $Y_1, Y_2, ... Y_n$ respectively. $Y_i$ is an L-dimensional vector that is $0$ everywhere, except the component to which the training point belongs, where it is $1$. For example, for a $3$ class problem, for a data-point that belongs to class $2$, $Y=[0, 1, 0]$. \n",
- "We optimize the $l_2$-square loss over all training points as follows: $\\sum_{i=0}^{n} = ||Y_i-\\sum_{j=0}^{m}\\space \\left(exp\\left[-\\gamma^2||W\\cdot X_i - B_j||^2\\right]\\cdot Z_j\\right)||_2^2$. \n",
- "While performing stochastic gradient descent, we hard threshold after each gradient update step to ensure that the three memory constraints (one each for $\\lambda_W, \\lambda_B, \\lambda_Z$) are satisfied by the matrices $W$, $B$ and $Z$. \n"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.1"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/docs/README_PROTONN_TLC.md b/docs/README_PROTONN_TLC.md
deleted file mode 100644
index efa59e20e..000000000
--- a/docs/README_PROTONN_TLC.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# ProtoNN: Compressed and accurate KNN for resource-constrained devices
-
-ProtoNN ([paper](http://manikvarma.org/pubs/gupta17.pdf)) has been developed for machine learning applications where the intended footprint of the ML model is small. ProtoNN models have memory requirements that are several orders of magnitude lower than other modern ML algorithms. At prediction time, ProtoNN is fast, precise, and accurate.
-
-One example of a ubiquitous real-world application where such a model is desirable are resource-scarce devices such as an Internet of Things (IoT) sensor. To make real-time predictions locally on IoT devices, without connecting to the cloud, we need models that are just a few kilobytes large. ProtoNN shines in this setting, beating all other algorithms by a significant margin.
-
-## The model
-Suppose a single data-point is D-dimensional. Suppose also that there are a total of L labels to predict.
-
-ProtoNN learns 3 parameters:
-- A projection matrix W of dimension (d,\space D) projects the datapoints to a small dimension d
-- m prototypes in the projected space, each d-dimensional: B = [B_1,\space B_2, ... \space B_m]
-- m label vectors for each of the prototypes to allow a single prototype to store information for multiple labels, each L-dimensional: Z = [Z_1,\space Z_2, ... \space Z_m]
-
-ProtoNN also assumes an RBF-kernel parametrized by a single parameter \gamma. Each of the three matrices are trained to be sparse. The user can specify the maximum proportion of entries that can be non-zero in each of these matrices using the parameters \lambda_W, \lambda_B and \lambda_Z:
-- ||W||_0 < \lambda_W \cdot size(W)
-- ||B||_0 < \lambda_B \cdot size(B)
-- ||Z||_0 < \lambda_Z \cdot size(Z)
-
-## Effect of various parameters
-The user presented with a model-size budget has to make a decision regarding the following 5 parameters:
-- The projection dimension d
-- The number of prototypes m
-- The 3 sparsity parameters: \lambda_W, \lambda_B, \lambda_Z
-
-Each parameter requires the following number of non-zero values for storage:
-- S_W: min(1, 2\lambda_W) \cdot d \cdot D
-- S_B: min(1, 2\lambda_B) \cdot d \cdot m
-- S_Z: min(1, 2\lambda_Z) \cdot L \cdot m
-
-The factor of 2 is for storing the index of a sparse matrix, apart from the value at that index. Clearly, if a matrix is more than 50% dense (\lambda > 0.5), it is better to store the matrix as dense instead of incurring the overhead of storing indices along with the values. Hence the minimum operator.
-Suppose each value is a single-precision floating point (4 bytes), then the total space required by ProtoNN is 4\cdot(S_W + S_B + S_Z).
-
-## Prediction
-Given these parameters, ProtoNN predicts on a new test-point in the following manner. For a test-point X, ProtoNN computes the following L dimensional score vector:
-Y_{score}=\sum_{j=0}^{m}\space \left(RBF_\gamma(W\cdot X,B_j)\cdot Z_j\right), where
-RBF_\gamma (U, V) = exp\left[-\gamma^2||U - V||_2^2\right]
-The prediction label is then \space max(Y_{score}).
-
-## Training
-While training, we are presented with training examples X_1, X_2, ... X_n along with their label vectors Y_1, Y_2, ... Y_n respectively. Y_i is an L-dimensional vector that is 0 everywhere, except the component to which the training point belongs, where it is 1. For example, for a 3 class problem, for a data-point that belongs to class 2, Y=[0, 1, 0].
-
-We optimize the l_2-square loss over all training points as follows: \sum_{i=0}^{n} = ||Y_i-\sum_{j=0}^{m}\space \left(exp\left[-\gamma^2||W\cdot X_i - B_j||^2\right]\cdot Z_j\right)||_2^2.
-While performing stochastic gradient descent, we hard threshold after each gradient update step to ensure that the three memory constraints (one each for \lambda_W, \lambda_B, \lambda_Z) are satisfied by the matrices W, B and Z.
-
-
-## Parameters
-- Projection Dimension (d): this is the dimension into which the data is projected
-- Clustering Init: This option specifies whether the initialization for the prototypes is performed by clustering the entire training data (OverallKmeans), or clustering data-points belonging to different classes separately (PerClassKmeans).
-- Num Prototypes (m): This is the number of prototypes. This parameter is only used if Clustering Init is specified as OverallKmeans.
-- Num Prototypes Per Class (k): This is the number of prototypes per class. This parameter is only used if Clustering Init is specified as PerClassKmeans. On using it, m becomes L\cdot k where L is the number of classes.
-- gammaNumerator:
- - On setting gammaNumerator, the RBF kernel parameter \gamma is set as;
- - \gamma = (2.5 \cdot gammaNumerator)/(median(||B_j,W - X_i||_2^2))
-- sparsity parameters (described in detail above): Projection sparsity (\lambda_W), Prototype Sparsity (\lambda_B), Label Sparsity(\lambda_Z).
-- Batch size: Batch size for mini-batch stochastic gradient descent.
-- Number of iterations: total number of optimization iterations.
-- Epochs: Number of see-through's of the data for each iteration, and each parameter.
-- Seed: A random number seed which can be used to re-generate previously obtained experimental results.
diff --git a/drivers/Bonsai/CMakeLists.txt b/drivers/Bonsai/CMakeLists.txt
deleted file mode 100644
index d9adbf148..000000000
--- a/drivers/Bonsai/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# cmake file for Bonsai drivers
-#
-
-add_subdirectory(trainer)
-add_subdirectory(predictor)
-#add_subdirectory(ingestTest)
-
diff --git a/drivers/Bonsai/ingestTest/BonsaiIngestTest.cpp b/drivers/Bonsai/ingestTest/BonsaiIngestTest.cpp
deleted file mode 100644
index 7b8f98f29..000000000
--- a/drivers/Bonsai/ingestTest/BonsaiIngestTest.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT license.
-
-#include "Bonsai.h"
-
-using namespace EdgeML;
-using namespace EdgeML::Bonsai;
-
-int main()
-{
- BonsaiModel::BonsaiHyperParams hyperParam;
-
- hyperParam.problem_type = ProblemFormat::multiclass;
- hyperParam.dataformat_type = DataFormat::interface_ingest_format;
- hyperParam.normalization_type = NormalizationFormat::none;
-
- hyperParam.seed = 41;
-
- hyperParam.batch_size = 1;
- hyperParam.iters = 20;
- hyperParam.epochs = 1;
-
- hyperParam.dataDimension = 784;
- hyperParam.projectionDimension = 30;
-
- hyperParam.numClasses = 10;
-
- hyperParam.ntest = 0;
- hyperParam.ntrain = 5000;
-
- hyperParam.Sigma = 1.0;
- hyperParam.treeDepth = 3;
-
- hyperParam.internalNodes = (pow(2, hyperParam.treeDepth) - 1);
- hyperParam.totalNodes = 2 * hyperParam.internalNodes + 1;
-
- hyperParam.regList.lW = 1.0e-3;
- hyperParam.regList.lZ = 1.0e-5;
- hyperParam.regList.lV = 1.0e-3;
- hyperParam.regList.lTheta = 1.0e-3;
-
-
- hyperParam.lambda_W = 10/30;
- hyperParam.lambda_Z = 150/785;
- hyperParam.lambda_V = 10/30;
- hyperParam.lambda_Theta = 10/30;
-
- hyperParam.finalizeHyperParams();
-
- // trivial data set
- {
- BonsaiTrainer trainer(DataIngestType::InterfaceIngest, hyperParam);
-
- string inputFilePath;
- std::ifstream ifs(inputFilePath);
- FP_TYPE *trainvals = new FP_TYPE[hyperParam.dataDimension];
- memset(trainvals, 0, sizeof(FP_TYPE)*(hyperParam.dataDimension));
-
-
- FP_TYPE temp;
- // labelCount_t *temp1 = new labelCount_t[hyperParam.numClasses];
- labelCount_t *labve = new labelCount_t[1];
- for(int i=0; i>temp;
- // std::cout<> temp;
- trainvals[count] = temp;
- count++;
- }
- // std::cout<> temp1[j];
- // if(temp1[j] != 0) {
- // labve[0] = j;
- // }
- // }
-
- trainer.feedDenseData(trainvals, labve, 1);
- // if(i%5000 == 0) std::cout<>temp;
-
- labve[0] = (labelCount_t)temp;
- while(count < hyperParam.dataDimension) {
- ifw >> temp;
- trainvals[count] = temp;
- count++;
- }
-
- predictor.scoreDenseDataPoint(scoreArray, trainvals);
-
- labelCount_t predLabel = 0;
- FP_TYPE max_score = scoreArray[0];
- for(int j=0; jfeedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- // trainer->feedSparseData (trainPts + 6, indices, numIndices, labels + 1, 1);
- // for (int i=4; i<7; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- // trainer->feedSparseData (trainPts + 14, indices, numIndices, labels + 2, 1);
- // for (int i=8; i<11; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+1, 1);
- // trainer->feedSparseData (trainPts + 22, indices, numIndices, labels + 2, 1);
- // for (int i=12; i<15; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+2, 1);
- // trainer->feedSparseData (trainPts + 30, indices, numIndices, labels + 1, 1);
-
- // trainer->feedSparseData (trainPts + 32, indices, numIndices, labels+2, 1);
-
- // trainer->finalizeData();
-
- // trainer->train();
-
- // auto modelBytes = trainer->getModelSize();
- // auto model = new char[modelBytes];
-
- // trainer->exportModel(modelBytes, model);
- // auto predictor = new BonsaiPredictor(modelBytes, model);
-
- // FP_TYPE scoreArray[3] = {0.0, 0.0, 0.0};
-
- // FP_TYPE testPts[2*5] = {-1.0, -1.0,
- // 1.0, 1.0,
- // -1.0, 1.0,
- // 1.0, -1.0,
- // 0.5, 0.5};
-
- // for (int t=0; t<5; ++t) {
- // predictor->scoreDenseDataPoint(scoreArray, testPts + 2*t);
- // for(int i=0;i<3;++i) std::cout<feedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- // trainer->feedSparseData (trainPts + 6, indices, numIndices, labels + 1, 1);
- // for (int i=4; i<7; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- // trainer->feedSparseData (trainPts + 14, indices, numIndices, labels + 2, 1);
- // for (int i=8; i<11; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+1, 1);
- // trainer->feedSparseData (trainPts + 22, indices, numIndices, labels + 2, 1);
- // for (int i=12; i<15; ++i)
- // trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+2, 1);
- // trainer->feedSparseData (trainPts + 30, indices, numIndices, labels + 1, 1);
-
- // trainer->feedSparseData (trainPts + 32, indices, numIndices, labels+2, 1);
-
- // trainer->finalizeData();
-
- // trainer->train();
-
- // auto modelBytes = trainer->getModelSize();
- // auto model = new char[modelBytes];
-
- // trainer->exportModel(modelBytes, model);
- // auto predictor = new BonsaiPredictor(modelBytes, model);
-
- // FP_TYPE scoreArray[3] = {0.0, 0.0, 0.0};
-
- // FP_TYPE testPts[2*5] = {-1.0, -1.0,
- // 1.0, 1.0,
- // -1.0, 1.0,
- // 1.0, -1.0,
- // 0.5, 0.5};
-
- // for (int t=0; t<5; ++t) {
- // predictor->scoreSparseDataPoint(scoreArray, testPts + 2*t, indices, numIndices);
- // for(int i=0;i<3;++i) std::cout<feedDenseData (trainPts + 2*i, labels, 1);
- for (int i=4; i<8; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels, 1);
- for (int i=8; i<12; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels+1, 1);
- for (int i=12; i<16; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels+2, 1);
-
- trainer->finalizeData();
-
- trainer->train();
-
- auto modelBytes = trainer->getModelSize();
- auto model = new char[modelBytes];
-
- trainer->exportModel(modelBytes, model);
- auto predictor = new ProtoNNPredictor(modelBytes, model);
-
- FP_TYPE scoreArray[3] = {0.0, 0.0, 0.0};
- FP_TYPE testPts[2*4] = {-1.0, -1.0,
- 1.0, 1.0,
- -1.0, 1.0,
- 1.0, -1.0};
-
- for (int t=0; t<4; ++t) {
- predictor->scoreDenseDataPoint(scoreArray, testPts + 2*t);
- for(int i=0;i<3;++i) std::cout<feedDenseData (trainPts + 2*i, labels, 1);
- trainer->feedDenseData (trainPts + 6, labels + 1, 1);
- for (int i=4; i<7; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels, 1);
- trainer->feedDenseData (trainPts + 14, labels + 2, 1);
- for (int i=8; i<11; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels+1, 1);
- trainer->feedDenseData (trainPts + 22, labels + 2, 1);
- for (int i=12; i<15; ++i)
- trainer->feedDenseData (trainPts + 2*i, labels+2, 1);
- trainer->feedDenseData (trainPts + 30, labels + 1, 1);
-
- trainer->feedDenseData (trainPts + 32, labels+2, 1);
-
- trainer->finalizeData();
-
- trainer->train();
-
- auto modelBytes = trainer->getModelSize();
- auto model = new char[modelBytes];
-
- trainer->exportModel(modelBytes, model);
- auto predictor = new ProtoNNPredictor(modelBytes, model);
-
- FP_TYPE scoreArray[3] = {0.0, 0.0, 0.0};
-
- FP_TYPE testPts[2*5] = {-1.0, -1.0,
- 1.0, 1.0,
- -1.0, 1.0,
- 1.0, -1.0,
- 0.5, 0.5};
-
- for (int t=0; t<5; ++t) {
- predictor->scoreDenseDataPoint(scoreArray, testPts + 2*t);
- for(int i=0;i<3;++i) std::cout<feedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- trainer->feedSparseData (trainPts + 6, indices, numIndices, labels + 1, 1);
- for (int i=4; i<7; ++i)
- trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels, 1);
- trainer->feedSparseData (trainPts + 14, indices, numIndices, labels + 2, 1);
- for (int i=8; i<11; ++i)
- trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+1, 1);
- trainer->feedSparseData (trainPts + 22, indices, numIndices, labels + 2, 1);
- for (int i=12; i<15; ++i)
- trainer->feedSparseData (trainPts + 2*i, indices, numIndices, labels+2, 1);
- trainer->feedSparseData (trainPts + 30, indices, numIndices, labels + 1, 1);
-
- trainer->feedSparseData (trainPts + 32, indices, numIndices, labels+2, 1);
-
- trainer->finalizeData();
-
- trainer->train();
-
- auto modelBytes = trainer->getModelSize();
- auto model = new char[modelBytes];
-
- trainer->exportModel(modelBytes, model);
- auto predictor = new ProtoNNPredictor(modelBytes, model);
-
- FP_TYPE scoreArray[3] = {0.0, 0.0, 0.0};
-
- FP_TYPE testPts[2*5] = {-1.0, -1.0,
- 1.0, 1.0,
- -1.0, 1.0,
- 1.0, -1.0,
- 0.5, 0.5};
-
- for (int t=0; t<5; ++t) {
- //predictor->scoreDenseDataPoint(scoreArray, testPts + 2*t);
- // both dense and sparse scoring work
- predictor -> scoreSparseDataPoint(scoreArray, testPts + 2*t, indices, 2);
- for(int i=0;i<3;++i) std::cout<
-
-using namespace EdgeML;
-
-int main(int argc, char **argv) {
-#ifdef LINUX
- trapfpe();
- struct sigaction sa;
- sigemptyset (&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = fpehandler;
- sigaction (SIGFPE, &sa, NULL);
-#endif
-
- assert(sizeof(MKL_INT) == 8 && "need large enough indices to store matrices");
- assert(sizeof(MKL_INT) == sizeof(Eigen::Index) && "MKL BLAS routines are called directly on data of an Eigen matrix. Hence, the index sizes should match.");
-
- ProtoNN::ProtoNNPredictor predictor(argc, (const char**)argv);
- EdgeML::ResultStruct res;
-
- res = predictor.test();
-
- predictor.saveTopKScores();
-
- switch(res.problemType) {
- case binary:
- case multiclass:
- LOG_INFO("Accuracy: " + std::to_string(res.accuracy));
- break;
- case multilabel:
- LOG_INFO("Prec@1: " + std::to_string(res.precision1));
- LOG_INFO("Prec@3: " + std::to_string(res.precision3));
- LOG_INFO("Prec@5: " + std::to_string(res.precision5));
- break;
- default:
- assert(false);
- }
-}
diff --git a/drivers/ProtoNN/trainer/CMakeLists.txt b/drivers/ProtoNN/trainer/CMakeLists.txt
deleted file mode 100644
index b952a1dec..000000000
--- a/drivers/ProtoNN/trainer/CMakeLists.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-set (tool_name ProtoNNTrain)
-
-set (src ProtoNNTrainDriver.cpp)
-
-source_group("src" FILES ${src})
-
-set (CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_SOURCE_DIR})
-set (CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_SOURCE_DIR})
-
-add_executable(${tool_name} ${src} ${include})
-target_include_directories(${tool_name} PRIVATE ${CMAKE_SOURCE_DIR}/src/common ${CMAKE_SOURCE_DIR}/src/ProtoNN)
-
-IF(CMAKE_COMPILER_IS_GNUCC)
- target_link_libraries(${tool_name} common ProtoNN mkl_intel_ilp64 mkl_core mkl_gnu_thread gomp pthread cilkrts)
-ENDIF (CMAKE_COMPILER_IS_GNUCC)
-
-IF(NOT CMAKE_COMPILER_IS_GNUCC)
- target_link_libraries(${tool_name} common ProtoNN mkl_intel_ilp64 mkl_intel_thread mkl_core libiomp5md)
-ENDIF (NOT CMAKE_COMPILER_IS_GNUCC)
-
-set_property(TARGET ${tool_name} PROPERTY FOLDER "drivers/ProtoNN")
diff --git a/drivers/ProtoNN/trainer/Makefile b/drivers/ProtoNN/trainer/Makefile
deleted file mode 100644
index 9c1b972ea..000000000
--- a/drivers/ProtoNN/trainer/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT license.
-
-include ../../../config.mk
-
-SOURCE_DIR=../../../src
-
-COMMON_DIR=$(SOURCE_DIR)/common
-PROTONN_DIR=$(SOURCE_DIR)/ProtoNN
-IFLAGS = -I ../../../eigen -I$(MKL_ROOT)/include \
- -I$(COMMON_DIR) -I$(PROTONN_DIR)
-
-all: ../../../ProtoNNTrainDriver.o
-
-../../../ProtoNNTrainDriver.o: ProtoNNTrainDriver.cpp
- $(CC) -c -o $@ $(IFLAGS) $(CFLAGS) $<
-
-.PHONY: clean cleanest
-
-clean:
- rm -f ../../../ProtoNNTrainDriver.o
-
-cleanest: clean
- rm *~
diff --git a/drivers/ProtoNN/trainer/ProtoNNTrainDriver.cpp b/drivers/ProtoNN/trainer/ProtoNNTrainDriver.cpp
deleted file mode 100644
index d9ac24fd6..000000000
--- a/drivers/ProtoNN/trainer/ProtoNNTrainDriver.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT license.
-
-
-#include "ProtoNN.h"
-#include "logger.h"
-
-using namespace EdgeML::ProtoNN;
-
-int main(int argc, char **argv)
-{
-#ifdef LINUX
- trapfpe();
- struct sigaction sa;
- sigemptyset (&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- sa.sa_sigaction = fpehandler;
- sigaction (SIGFPE, &sa, NULL);
-#endif
-
- assert(sizeof(MKL_INT) == 8 && "need large enough indices to store matrices");
- assert(sizeof(MKL_INT) == sizeof(Eigen::Index) && "MKL BLAS routines are called directly on data of an Eigen matrix. Hence, the index sizes should match.");
- EdgeML::ProtoNN::ProtoNNTrainer trainer(argc, (const char**)argv);
-
- trainer.train();
-
- return 0;
-}
diff --git a/eigen/.hg_archival.txt b/eigen/.hg_archival.txt
deleted file mode 100644
index 677d28282..000000000
--- a/eigen/.hg_archival.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-repo: 8a21fd850624c931e448cbcfb38168cb2717c790
-node: 26667be4f70baf4f0d39e96f330714c87b399090
-branch: default
-tag: 3.3.0
diff --git a/eigen/.hgeol b/eigen/.hgeol
deleted file mode 100644
index 5327df161..000000000
--- a/eigen/.hgeol
+++ /dev/null
@@ -1,11 +0,0 @@
-[patterns]
-*.sh = LF
-*.MINPACK = CRLF
-scripts/*.in = LF
-debug/msvc/*.dat = CRLF
-debug/msvc/*.natvis = CRLF
-unsupported/test/mpreal/*.* = CRLF
-** = native
-
-[repository]
-native = LF
diff --git a/eigen/.hgignore b/eigen/.hgignore
deleted file mode 100644
index 769a47f1f..000000000
--- a/eigen/.hgignore
+++ /dev/null
@@ -1,34 +0,0 @@
-syntax: glob
-qrc_*cxx
-*.orig
-*.pyc
-*.diff
-diff
-*.save
-save
-*.old
-*.gmo
-*.qm
-core
-core.*
-*.bak
-*~
-build*
-*.moc.*
-*.moc
-ui_*
-CMakeCache.txt
-tags
-.*.swp
-activity.png
-*.out
-*.php*
-*.log
-*.orig
-*.rej
-log
-patch
-a
-a.*
-lapack/testing
-lapack/reference
diff --git a/eigen/.hgtags b/eigen/.hgtags
deleted file mode 100644
index 7036de122..000000000
--- a/eigen/.hgtags
+++ /dev/null
@@ -1,29 +0,0 @@
-2db9468678c6480c9633b6272ff0e3599d1e11a3 2.0-beta3
-375224817dce669b6fa31d920d4c895a63fabf32 2.0-beta1
-3b8120f077865e2a072e10f5be33e1d942b83a06 2.0-rc1
-19dfc0e7666bcee26f7a49eb42f39a0280a3485e 2.0-beta5
-7a7d8a9526f003ffa2430dfb0c2c535b5add3023 2.0-beta4
-7d14ad088ac23769c349518762704f0257f6a39b 2.0.1
-b9d48561579fd7d4c05b2aa42235dc9de6484bf2 2.0-beta6
-e17630a40408243cb1a51ad0fe3a99beb75b7450 before-hg-migration
-eda654d4cda2210ce80719addcf854773e6dec5a 2.0.0
-ee9a7c468a9e73fab12f38f02bac24b07f29ed71 2.0-beta2
-d49097c25d8049e730c254a2fed725a240ce4858 after-hg-migration
-655348878731bcb5d9bbe0854077b052e75e5237 actual-start-from-scratch
-12a658962d4e6dfdc9a1c350fe7b69e36e70675c 3.0-beta1
-5c4180ad827b3f869b13b1d82f5a6ce617d6fcee 3.0-beta2
-7ae24ca6f3891d5ac58ddc7db60ad413c8d6ec35 3.0-beta3
-c40708b9088d622567fecc9208ad4a426621d364 3.0-beta4
-b6456624eae74f49ae8683d8e7b2882a2ca0342a 3.0-rc1
-a810d5dbab47acfe65b3350236efdd98f67d4d8a 3.1.0-alpha1
-304c88ca3affc16dd0b008b1104873986edd77af 3.1.0-alpha2
-920fc730b5930daae0a6dbe296d60ce2e3808215 3.1.0-beta1
-8383e883ebcc6f14695ff0b5e20bb631abab43fb 3.1.0-rc1
-bf4cb8c934fa3a79f45f1e629610f0225e93e493 3.1.0-rc2
-da195914abcc1d739027cbee7c52077aab30b336 3.2-beta1
-a8e0d153fc5e239ef8b06e3665f1f9e8cb8d49c8 before-evaluators
-09a8e21866106b49c5dec1d6d543e5794e82efa0 3.3-alpha1
-ce5a455b34c0a0ac3545a1497cb4a16c38ed90e8 3.3-beta1
-69d418c0699907bcd0bf9e0b3ba0a112ed091d85 3.3-beta2
-bef509908b9da05d0d07ffc0da105e2c8c6d3996 3.3-rc1
-04ab5fa4b241754afcf631117572276444c67239 3.3-rc2
diff --git a/eigen/CMakeLists.txt b/eigen/CMakeLists.txt
deleted file mode 100644
index f38e22973..000000000
--- a/eigen/CMakeLists.txt
+++ /dev/null
@@ -1,525 +0,0 @@
-project(Eigen3)
-
-cmake_minimum_required(VERSION 2.8.5)
-
-# guard against in-source builds
-
-if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
- message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
-endif()
-
-# Alias Eigen_*_DIR to Eigen3_*_DIR:
-
-set(Eigen_SOURCE_DIR ${Eigen3_SOURCE_DIR})
-set(Eigen_BINARY_DIR ${Eigen3_BINARY_DIR})
-
-# guard against bad build-type strings
-
-if (NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Release")
-endif()
-
-string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
-if( NOT cmake_build_type_tolower STREQUAL "debug"
- AND NOT cmake_build_type_tolower STREQUAL "release"
- AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
- message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, RelWithDebInfo (case-insensitive).")
-endif()
-
-
-#############################################################################
-# retrieve version infomation #
-#############################################################################
-
-# automatically parse the version number
-file(READ "${PROJECT_SOURCE_DIR}/Eigen/src/Core/util/Macros.h" _eigen_version_header)
-string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen_world_version_match "${_eigen_version_header}")
-set(EIGEN_WORLD_VERSION "${CMAKE_MATCH_1}")
-string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen_major_version_match "${_eigen_version_header}")
-set(EIGEN_MAJOR_VERSION "${CMAKE_MATCH_1}")
-string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen_minor_version_match "${_eigen_version_header}")
-set(EIGEN_MINOR_VERSION "${CMAKE_MATCH_1}")
-set(EIGEN_VERSION_NUMBER ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})
-
-# if the mercurial program is absent, this will leave the EIGEN_HG_CHANGESET string empty,
-# but won't stop CMake.
-execute_process(COMMAND hg tip -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_HGTIP_OUTPUT)
-execute_process(COMMAND hg branch -R ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE EIGEN_BRANCH_OUTPUT)
-
-# if this is the default (aka development) branch, extract the mercurial changeset number from the hg tip output...
-if(EIGEN_BRANCH_OUTPUT MATCHES "default")
-string(REGEX MATCH "^changeset: *[0-9]*:([0-9;a-f]+).*" EIGEN_HG_CHANGESET_MATCH "${EIGEN_HGTIP_OUTPUT}")
-set(EIGEN_HG_CHANGESET "${CMAKE_MATCH_1}")
-endif(EIGEN_BRANCH_OUTPUT MATCHES "default")
-#...and show it next to the version number
-if(EIGEN_HG_CHANGESET)
- set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER} (mercurial changeset ${EIGEN_HG_CHANGESET})")
-else(EIGEN_HG_CHANGESET)
- set(EIGEN_VERSION "${EIGEN_VERSION_NUMBER}")
-endif(EIGEN_HG_CHANGESET)
-
-
-include(CheckCXXCompilerFlag)
-include(GNUInstallDirs)
-
-set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
-
-#############################################################################
-# find how to link to the standard libraries #
-#############################################################################
-
-find_package(StandardMathLibrary)
-
-
-set(EIGEN_TEST_CUSTOM_LINKER_FLAGS "" CACHE STRING "Additional linker flags when linking unit tests.")
-set(EIGEN_TEST_CUSTOM_CXX_FLAGS "" CACHE STRING "Additional compiler flags when compiling unit tests.")
-
-set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "")
-
-if(NOT STANDARD_MATH_LIBRARY_FOUND)
-
- message(FATAL_ERROR
- "Can't link to the standard math library. Please report to the Eigen developers, telling them about your platform.")
-
-else()
-
- if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
- set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${STANDARD_MATH_LIBRARY}")
- else()
- set(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO "${STANDARD_MATH_LIBRARY}")
- endif()
-
-endif()
-
-if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
- message(STATUS "Standard libraries to link to explicitly: ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO}")
-else()
- message(STATUS "Standard libraries to link to explicitly: none")
-endif()
-
-option(EIGEN_BUILD_BTL "Build benchmark suite" OFF)
-
-# Disable pkgconfig only for native Windows builds
-if(NOT WIN32 OR NOT CMAKE_HOST_SYSTEM_NAME MATCHES Windows)
- option(EIGEN_BUILD_PKGCONFIG "Build pkg-config .pc file for Eigen" ON)
-endif()
-
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
-
-option(EIGEN_SPLIT_LARGE_TESTS "Split large tests into smaller executables" ON)
-
-option(EIGEN_DEFAULT_TO_ROW_MAJOR "Use row-major as default matrix storage order" OFF)
-if(EIGEN_DEFAULT_TO_ROW_MAJOR)
- add_definitions("-DEIGEN_DEFAULT_TO_ROW_MAJOR")
-endif()
-
-set(EIGEN_TEST_MAX_SIZE "320" CACHE STRING "Maximal matrix/vector size, default is 320")
-
-macro(ei_add_cxx_compiler_flag FLAG)
- string(REGEX REPLACE "-" "" SFLAG1 ${FLAG})
- string(REGEX REPLACE "\\+" "p" SFLAG ${SFLAG1})
- check_cxx_compiler_flag(${FLAG} COMPILER_SUPPORT_${SFLAG})
- if(COMPILER_SUPPORT_${SFLAG})
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAG}")
- endif()
-endmacro(ei_add_cxx_compiler_flag)
-
-if(NOT MSVC)
- # We assume that other compilers are partly compatible with GNUCC
-
- # clang outputs some warnings for unknown flags that are not caught by check_cxx_compiler_flag
- # adding -Werror turns such warnings into errors
- check_cxx_compiler_flag("-Werror" COMPILER_SUPPORT_WERROR)
- if(COMPILER_SUPPORT_WERROR)
- set(CMAKE_REQUIRED_FLAGS "-Werror")
- endif()
- ei_add_cxx_compiler_flag("-pedantic")
- ei_add_cxx_compiler_flag("-Wall")
- ei_add_cxx_compiler_flag("-Wextra")
- #ei_add_cxx_compiler_flag("-Weverything") # clang
-
- ei_add_cxx_compiler_flag("-Wundef")
- ei_add_cxx_compiler_flag("-Wcast-align")
- ei_add_cxx_compiler_flag("-Wchar-subscripts")
- ei_add_cxx_compiler_flag("-Wnon-virtual-dtor")
- ei_add_cxx_compiler_flag("-Wunused-local-typedefs")
- ei_add_cxx_compiler_flag("-Wpointer-arith")
- ei_add_cxx_compiler_flag("-Wwrite-strings")
- ei_add_cxx_compiler_flag("-Wformat-security")
- ei_add_cxx_compiler_flag("-Wshorten-64-to-32")
- ei_add_cxx_compiler_flag("-Wlogical-op")
- ei_add_cxx_compiler_flag("-Wenum-conversion")
- ei_add_cxx_compiler_flag("-Wc++11-extensions")
- ei_add_cxx_compiler_flag("-Wdouble-promotion")
-# ei_add_cxx_compiler_flag("-Wconversion")
-
- # -Wshadow is insanely too strict with gcc, hopefully it will become usable with gcc 6
- # if(NOT CMAKE_COMPILER_IS_GNUCXX OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "5.0.0"))
- if(NOT CMAKE_COMPILER_IS_GNUCXX)
- ei_add_cxx_compiler_flag("-Wshadow")
- endif()
-
- ei_add_cxx_compiler_flag("-Wno-psabi")
- ei_add_cxx_compiler_flag("-Wno-variadic-macros")
- ei_add_cxx_compiler_flag("-Wno-long-long")
-
- ei_add_cxx_compiler_flag("-fno-check-new")
- ei_add_cxx_compiler_flag("-fno-common")
- ei_add_cxx_compiler_flag("-fstrict-aliasing")
- ei_add_cxx_compiler_flag("-wd981") # disable ICC's "operands are evaluated in unspecified order" remark
- ei_add_cxx_compiler_flag("-wd2304") # disable ICC's "warning #2304: non-explicit constructor with single argument may cause implicit type conversion" produced by -Wnon-virtual-dtor
-
-
- # The -ansi flag must be added last, otherwise it is also used as a linker flag by check_cxx_compiler_flag making it fails
- # Moreover we should not set both -strict-ansi and -ansi
- check_cxx_compiler_flag("-strict-ansi" COMPILER_SUPPORT_STRICTANSI)
- ei_add_cxx_compiler_flag("-Qunused-arguments") # disable clang warning: argument unused during compilation: '-ansi'
-
- if(COMPILER_SUPPORT_STRICTANSI)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -strict-ansi")
- else()
- ei_add_cxx_compiler_flag("-ansi")
- endif()
-
- if(ANDROID_NDK)
- ei_add_cxx_compiler_flag("-pie")
- ei_add_cxx_compiler_flag("-fPIE")
- endif()
-
- set(CMAKE_REQUIRED_FLAGS "")
-
- option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
- if(EIGEN_TEST_SSE2)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2")
- message(STATUS "Enabling SSE2 in tests/examples")
- endif()
-
- option(EIGEN_TEST_SSE3 "Enable/Disable SSE3 in tests/examples" OFF)
- if(EIGEN_TEST_SSE3)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse3")
- message(STATUS "Enabling SSE3 in tests/examples")
- endif()
-
- option(EIGEN_TEST_SSSE3 "Enable/Disable SSSE3 in tests/examples" OFF)
- if(EIGEN_TEST_SSSE3)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3")
- message(STATUS "Enabling SSSE3 in tests/examples")
- endif()
-
- option(EIGEN_TEST_SSE4_1 "Enable/Disable SSE4.1 in tests/examples" OFF)
- if(EIGEN_TEST_SSE4_1)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
- message(STATUS "Enabling SSE4.1 in tests/examples")
- endif()
-
- option(EIGEN_TEST_SSE4_2 "Enable/Disable SSE4.2 in tests/examples" OFF)
- if(EIGEN_TEST_SSE4_2)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
- message(STATUS "Enabling SSE4.2 in tests/examples")
- endif()
-
- option(EIGEN_TEST_AVX "Enable/Disable AVX in tests/examples" OFF)
- if(EIGEN_TEST_AVX)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
- message(STATUS "Enabling AVX in tests/examples")
- endif()
-
- option(EIGEN_TEST_FMA "Enable/Disable FMA in tests/examples" OFF)
- if(EIGEN_TEST_FMA AND NOT EIGEN_TEST_NEON)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
- message(STATUS "Enabling FMA in tests/examples")
- endif()
-
- option(EIGEN_TEST_AVX512 "Enable/Disable AVX512 in tests/examples" OFF)
- if(EIGEN_TEST_AVX512)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -fabi-version=6 -DEIGEN_ENABLE_AVX512")
- message(STATUS "Enabling AVX512 in tests/examples")
- endif()
-
- option(EIGEN_TEST_F16C "Enable/Disable F16C in tests/examples" OFF)
- if(EIGEN_TEST_F16C)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mf16c")
- message(STATUS "Enabling F16C in tests/examples")
- endif()
-
- option(EIGEN_TEST_ALTIVEC "Enable/Disable AltiVec in tests/examples" OFF)
- if(EIGEN_TEST_ALTIVEC)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec -mabi=altivec")
- message(STATUS "Enabling AltiVec in tests/examples")
- endif()
-
- option(EIGEN_TEST_VSX "Enable/Disable VSX in tests/examples" OFF)
- if(EIGEN_TEST_VSX)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64 -mvsx")
- message(STATUS "Enabling VSX in tests/examples")
- endif()
-
- option(EIGEN_TEST_NEON "Enable/Disable Neon in tests/examples" OFF)
- if(EIGEN_TEST_NEON)
- if(EIGEN_TEST_FMA)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon-vfpv4")
- else()
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon")
- endif()
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfloat-abi=hard")
- message(STATUS "Enabling NEON in tests/examples")
- endif()
-
- option(EIGEN_TEST_NEON64 "Enable/Disable Neon in tests/examples" OFF)
- if(EIGEN_TEST_NEON64)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- message(STATUS "Enabling NEON in tests/examples")
- endif()
-
- option(EIGEN_TEST_ZVECTOR "Enable/Disable S390X(zEC13) ZVECTOR in tests/examples" OFF)
- if(EIGEN_TEST_ZVECTOR)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=z13 -mzvector")
- message(STATUS "Enabling S390X(zEC13) ZVECTOR in tests/examples")
- endif()
-
- check_cxx_compiler_flag("-fopenmp" COMPILER_SUPPORT_OPENMP)
- if(COMPILER_SUPPORT_OPENMP)
- option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF)
- if(EIGEN_TEST_OPENMP)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
- message(STATUS "Enabling OpenMP in tests/examples")
- endif()
- endif()
-
-else(NOT MSVC)
-
- # C4127 - conditional expression is constant
- # C4714 - marked as __forceinline not inlined (I failed to deactivate it selectively)
- # We can disable this warning in the unit tests since it is clear that it occurs
- # because we are oftentimes returning objects that have a destructor or may
- # throw exceptions - in particular in the unit tests we are throwing extra many
- # exceptions to cover indexing errors.
- # C4505 - unreferenced local function has been removed (impossible to deactive selectively)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /wd4127 /wd4505 /wd4714")
-
- # replace all /Wx by /W4
- string(REGEX REPLACE "/W[0-9]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
-
- check_cxx_compiler_flag("/openmp" COMPILER_SUPPORT_OPENMP)
- if(COMPILER_SUPPORT_OPENMP)
- option(EIGEN_TEST_OPENMP "Enable/Disable OpenMP in tests/examples" OFF)
- if(EIGEN_TEST_OPENMP)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /openmp")
- message(STATUS "Enabling OpenMP in tests/examples")
- endif()
- endif()
-
- option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
- if(EIGEN_TEST_SSE2)
- if(NOT CMAKE_CL_64)
- # arch is not supported on 64 bit systems, SSE is enabled automatically.
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
- endif(NOT CMAKE_CL_64)
- message(STATUS "Enabling SSE2 in tests/examples")
- endif(EIGEN_TEST_SSE2)
-endif(NOT MSVC)
-
-option(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION "Disable explicit vectorization in tests/examples" OFF)
-option(EIGEN_TEST_X87 "Force using X87 instructions. Implies no vectorization." OFF)
-option(EIGEN_TEST_32BIT "Force generating 32bit code." OFF)
-
-if(EIGEN_TEST_X87)
- set(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION ON)
- if(CMAKE_COMPILER_IS_GNUCXX)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpmath=387")
- message(STATUS "Forcing use of x87 instructions in tests/examples")
- else()
- message(STATUS "EIGEN_TEST_X87 ignored on your compiler")
- endif()
-endif()
-
-if(EIGEN_TEST_32BIT)
- if(CMAKE_COMPILER_IS_GNUCXX)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
- message(STATUS "Forcing generation of 32-bit code in tests/examples")
- else()
- message(STATUS "EIGEN_TEST_32BIT ignored on your compiler")
- endif()
-endif()
-
-if(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION)
- add_definitions(-DEIGEN_DONT_VECTORIZE=1)
- message(STATUS "Disabling vectorization in tests/examples")
-endif()
-
-option(EIGEN_TEST_NO_EXPLICIT_ALIGNMENT "Disable explicit alignment (hence vectorization) in tests/examples" OFF)
-if(EIGEN_TEST_NO_EXPLICIT_ALIGNMENT)
- add_definitions(-DEIGEN_DONT_ALIGN=1)
- message(STATUS "Disabling alignment in tests/examples")
-endif()
-
-option(EIGEN_TEST_NO_EXCEPTIONS "Disables C++ exceptions" OFF)
-if(EIGEN_TEST_NO_EXCEPTIONS)
- ei_add_cxx_compiler_flag("-fno-exceptions")
- message(STATUS "Disabling exceptions in tests/examples")
-endif()
-
-option(EIGEN_TEST_CXX11 "Enable testing with C++11 and C++11 features (e.g. Tensor module)." OFF)
-
-set(EIGEN_CUDA_COMPUTE_ARCH 30 CACHE STRING "The CUDA compute architecture level to target when compiling CUDA code")
-
-include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
-
-# Backward compatibility support for EIGEN_INCLUDE_INSTALL_DIR
-if(EIGEN_INCLUDE_INSTALL_DIR)
- message(WARNING "EIGEN_INCLUDE_INSTALL_DIR is deprecated. Use INCLUDE_INSTALL_DIR instead.")
-endif()
-
-if(EIGEN_INCLUDE_INSTALL_DIR AND NOT INCLUDE_INSTALL_DIR)
- set(INCLUDE_INSTALL_DIR ${EIGEN_INCLUDE_INSTALL_DIR}
- CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed")
-else()
- set(INCLUDE_INSTALL_DIR
- "${CMAKE_INSTALL_INCLUDEDIR}/eigen3"
- CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen header files are installed"
- )
-endif()
-set(CMAKEPACKAGE_INSTALL_DIR
- "${CMAKE_INSTALL_LIBDIR}/cmake/eigen3"
- CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where Eigen3Config.cmake is installed"
- )
-set(PKGCONFIG_INSTALL_DIR
- "${CMAKE_INSTALL_DATADIR}/pkgconfig"
- CACHE PATH "The directory relative to CMAKE_PREFIX_PATH where eigen3.pc is installed"
- )
-
-
-# similar to set_target_properties but append the property instead of overwriting it
-macro(ei_add_target_property target prop value)
-
- get_target_property(previous ${target} ${prop})
- # if the property wasn't previously set, ${previous} is now "previous-NOTFOUND" which cmake allows catching with plain if()
- if(NOT previous)
- set(previous "")
- endif(NOT previous)
- set_target_properties(${target} PROPERTIES ${prop} "${previous} ${value}")
-endmacro(ei_add_target_property)
-
-install(FILES
- signature_of_eigen3_matrix_library
- DESTINATION ${INCLUDE_INSTALL_DIR} COMPONENT Devel
- )
-
-if(EIGEN_BUILD_PKGCONFIG)
- configure_file(eigen3.pc.in eigen3.pc @ONLY)
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/eigen3.pc
- DESTINATION ${PKGCONFIG_INSTALL_DIR}
- )
-endif()
-
-add_subdirectory(Eigen)
-
-add_subdirectory(doc EXCLUDE_FROM_ALL)
-
-include(EigenConfigureTesting)
-
-# fixme, not sure this line is still needed:
-enable_testing() # must be called from the root CMakeLists, see man page
-
-
-if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
- add_subdirectory(test) # can't do EXCLUDE_FROM_ALL here, breaks CTest
-else()
- add_subdirectory(test EXCLUDE_FROM_ALL)
-endif()
-
-if(EIGEN_LEAVE_TEST_IN_ALL_TARGET)
- add_subdirectory(blas)
- add_subdirectory(lapack)
-else()
- add_subdirectory(blas EXCLUDE_FROM_ALL)
- add_subdirectory(lapack EXCLUDE_FROM_ALL)
-endif()
-
-# add SYCL
-option(EIGEN_TEST_SYCL "Add Sycl support." OFF)
-if(EIGEN_TEST_SYCL)
- set (CMAKE_MODULE_PATH "${CMAKE_ROOT}/Modules" "cmake/Modules/" "${CMAKE_MODULE_PATH}")
- include(FindComputeCpp)
-endif()
-
-add_subdirectory(unsupported)
-
-add_subdirectory(demos EXCLUDE_FROM_ALL)
-
-# must be after test and unsupported, for configuring buildtests.in
-add_subdirectory(scripts EXCLUDE_FROM_ALL)
-
-# TODO: consider also replacing EIGEN_BUILD_BTL by a custom target "make btl"?
-if(EIGEN_BUILD_BTL)
- add_subdirectory(bench/btl EXCLUDE_FROM_ALL)
-endif(EIGEN_BUILD_BTL)
-
-if(NOT WIN32)
- add_subdirectory(bench/spbench EXCLUDE_FROM_ALL)
-endif(NOT WIN32)
-
-configure_file(scripts/cdashtesting.cmake.in cdashtesting.cmake @ONLY)
-
-ei_testing_print_summary()
-
-message(STATUS "")
-message(STATUS "Configured Eigen ${EIGEN_VERSION_NUMBER}")
-message(STATUS "")
-
-option(EIGEN_FAILTEST "Enable failtests." OFF)
-if(EIGEN_FAILTEST)
- add_subdirectory(failtest)
-endif()
-
-string(TOLOWER "${CMAKE_GENERATOR}" cmake_generator_tolower)
-if(cmake_generator_tolower MATCHES "makefile")
- message(STATUS "Some things you can do now:")
- message(STATUS "--------------+--------------------------------------------------------------")
- message(STATUS "Command | Description")
- message(STATUS "--------------+--------------------------------------------------------------")
- message(STATUS "make install | Install Eigen. Headers will be installed to:")
- message(STATUS " | /")
- message(STATUS " | Using the following values:")
- message(STATUS " | CMAKE_INSTALL_PREFIX: ${CMAKE_INSTALL_PREFIX}")
- message(STATUS " | INCLUDE_INSTALL_DIR: ${INCLUDE_INSTALL_DIR}")
- message(STATUS " | Change the install location of Eigen headers using:")
- message(STATUS " | cmake . -DCMAKE_INSTALL_PREFIX=yourprefix")
- message(STATUS " | Or:")
- message(STATUS " | cmake . -DINCLUDE_INSTALL_DIR=yourdir")
- message(STATUS "make doc | Generate the API documentation, requires Doxygen & LaTeX")
- message(STATUS "make check | Build and run the unit-tests. Read this page:")
- message(STATUS " | http://eigen.tuxfamily.org/index.php?title=Tests")
- message(STATUS "make blas | Build BLAS library (not the same thing as Eigen)")
- message(STATUS "make uninstall| Removes files installed by make install")
- message(STATUS "--------------+--------------------------------------------------------------")
-else()
- message(STATUS "To build/run the unit tests, read this page:")
- message(STATUS " http://eigen.tuxfamily.org/index.php?title=Tests")
-endif()
-
-message(STATUS "")
-
-
-set ( EIGEN_VERSION_STRING ${EIGEN_VERSION_NUMBER} )
-set ( EIGEN_VERSION_MAJOR ${EIGEN_WORLD_VERSION} )
-set ( EIGEN_VERSION_MINOR ${EIGEN_MAJOR_VERSION} )
-set ( EIGEN_VERSION_PATCH ${EIGEN_MINOR_VERSION} )
-set ( EIGEN_DEFINITIONS "")
-set ( EIGEN_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/${INCLUDE_INSTALL_DIR}" )
-set ( EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR} )
-set ( EIGEN_ROOT_DIR ${CMAKE_INSTALL_PREFIX} )
-
-configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3Config.cmake.in
- ${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
- @ONLY ESCAPE_QUOTES
- )
-
-install ( FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/UseEigen3.cmake
- ${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
- DESTINATION ${CMAKEPACKAGE_INSTALL_DIR}
- )
-
-# Add uninstall target
-add_custom_target ( uninstall
- COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/EigenUninstall.cmake)
diff --git a/eigen/COPYING.BSD b/eigen/COPYING.BSD
deleted file mode 100644
index 11971ffe2..000000000
--- a/eigen/COPYING.BSD
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- Copyright (c) 2011, Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors may
- be used to endorse or promote products derived from this software without
- specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
- ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
\ No newline at end of file
diff --git a/eigen/COPYING.GPL b/eigen/COPYING.GPL
deleted file mode 100644
index 94a9ed024..000000000
--- a/eigen/COPYING.GPL
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see .
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- Copyright (C)
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-.
diff --git a/eigen/COPYING.LGPL b/eigen/COPYING.LGPL
deleted file mode 100644
index 4362b4915..000000000
--- a/eigen/COPYING.LGPL
+++ /dev/null
@@ -1,502 +0,0 @@
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL. It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
- Preamble
-
- The licenses for most software are designed to take away your
-freedom to share and change it. By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
- This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it. You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
- When we speak of free software, we are referring to freedom of use,
-not price. Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
- To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights. These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
- For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you. You must make sure that they, too, receive or can get the source
-code. If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it. And you must show them these terms so they know their rights.
-
- We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
- To protect each distributor, we want to make it very clear that
-there is no warranty for the free library. Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-
- Finally, software patents pose a constant threat to the existence of
-any free program. We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder. Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
- Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License. This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License. We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
- When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library. The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom. The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
- We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License. It also provides other free software developers Less
-of an advantage over competing non-free programs. These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries. However, the Lesser license provides advantages in certain
-special circumstances.
-
- For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard. To achieve this, non-free programs must be
-allowed to use the library. A more frequent case is that a free
-library does the same job as widely used non-free libraries. In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
- In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software. For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
- Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
- The precise terms and conditions for copying, distribution and
-modification follow. Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library". The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-
- GNU LESSER GENERAL PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
- A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
- The "Library", below, refers to any such software library or work
-which has been distributed under these terms. A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language. (Hereinafter, translation is
-included without limitation in the term "modification".)
-
- "Source code" for a work means the preferred form of the work for
-making modifications to it. For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
- Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it). Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-
- 1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
- You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-
- 2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
- a) The modified work must itself be a software library.
-
- b) You must cause the files modified to carry prominent notices
- stating that you changed the files and the date of any change.
-
- c) You must cause the whole of the work to be licensed at no
- charge to all third parties under the terms of this License.
-
- d) If a facility in the modified Library refers to a function or a
- table of data to be supplied by an application program that uses
- the facility, other than as an argument passed when the facility
- is invoked, then you must make a good faith effort to ensure that,
- in the event an application does not supply such function or
- table, the facility still operates, and performs whatever part of
- its purpose remains meaningful.
-
- (For example, a function in a library to compute square roots has
- a purpose that is entirely well-defined independent of the
- application. Therefore, Subsection 2d requires that any
- application-supplied function or table used by this function must
- be optional: if the application does not supply it, the square
- root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
- 3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library. To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License. (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.) Do not make any other change in
-these notices.
-
- Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
- This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
- 4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
- If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
- 5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library". Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
- However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library". The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
- When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library. The
-threshold for this to be true is not precisely defined by law.
-
- If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work. (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
- Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-
- 6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
- You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License. You must supply a copy of this License. If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License. Also, you must do one
-of these things:
-
- a) Accompany the work with the complete corresponding
- machine-readable source code for the Library including whatever
- changes were used in the work (which must be distributed under
- Sections 1 and 2 above); and, if the work is an executable linked
- with the Library, with the complete machine-readable "work that
- uses the Library", as object code and/or source code, so that the
- user can modify the Library and then relink to produce a modified
- executable containing the modified Library. (It is understood
- that the user who changes the contents of definitions files in the
- Library will not necessarily be able to recompile the application
- to use the modified definitions.)
-
- b) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (1) uses at run time a
- copy of the library already present on the user's computer system,
- rather than copying library functions into the executable, and (2)
- will operate properly with a modified version of the library, if
- the user installs one, as long as the modified version is
- interface-compatible with the version that the work was made with.
-
- c) Accompany the work with a written offer, valid for at
- least three years, to give the same user the materials
- specified in Subsection 6a, above, for a charge no more
- than the cost of performing this distribution.
-
- d) If distribution of the work is made by offering access to copy
- from a designated place, offer equivalent access to copy the above
- specified materials from the same place.
-
- e) Verify that the user has already received a copy of these
- materials or that you have already sent this user a copy.
-
- For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it. However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
- It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system. Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-
- 7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
- a) Accompany the combined library with a copy of the same work
- based on the Library, uncombined with any other library
- facilities. This must be distributed under the terms of the
- Sections above.
-
- b) Give prominent notice with the combined library of the fact
- that part of it is a work based on the Library, and explaining
- where to find the accompanying uncombined form of the same work.
-
- 8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License. Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License. However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
- 9. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Library or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
- 10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions. You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-
- 11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all. For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices. Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
- 12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded. In such case, this License incorporates the limitation as if
-written in the body of this License.
-
- 13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-
- 14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission. For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this. Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
- NO WARRANTY
-
- 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Libraries
-
- If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change. You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
- To apply these terms, attach the following notices to the library. It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the
- library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
- , 1 April 1990
- Ty Coon, President of Vice
-
-That's all there is to it!
diff --git a/eigen/COPYING.MINPACK b/eigen/COPYING.MINPACK
deleted file mode 100644
index ae7984dae..000000000
--- a/eigen/COPYING.MINPACK
+++ /dev/null
@@ -1,52 +0,0 @@
-Minpack Copyright Notice (1999) University of Chicago. All rights reserved
-
-Redistribution and use in source and binary forms, with or
-without modification, are permitted provided that the
-following conditions are met:
-
-1. Redistributions of source code must retain the above
-copyright notice, this list of conditions and the following
-disclaimer.
-
-2. Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following
-disclaimer in the documentation and/or other materials
-provided with the distribution.
-
-3. The end-user documentation included with the
-redistribution, if any, must include the following
-acknowledgment:
-
- "This product includes software developed by the
- University of Chicago, as Operator of Argonne National
- Laboratory.
-
-Alternately, this acknowledgment may appear in the software
-itself, if and wherever such third-party acknowledgments
-normally appear.
-
-4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
-WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
-UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
-THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
-OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
-OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
-USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
-THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
-DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
-UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
-BE CORRECTED.
-
-5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
-HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
-ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
-INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
-ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
-PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
-SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
-(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
-EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
-POSSIBILITY OF SUCH LOSS OR DAMAGES.
-
diff --git a/eigen/COPYING.MPL2 b/eigen/COPYING.MPL2
deleted file mode 100644
index 14e2f777f..000000000
--- a/eigen/COPYING.MPL2
+++ /dev/null
@@ -1,373 +0,0 @@
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/eigen/COPYING.README b/eigen/COPYING.README
deleted file mode 100644
index de5b63215..000000000
--- a/eigen/COPYING.README
+++ /dev/null
@@ -1,18 +0,0 @@
-Eigen is primarily MPL2 licensed. See COPYING.MPL2 and these links:
- http://www.mozilla.org/MPL/2.0/
- http://www.mozilla.org/MPL/2.0/FAQ.html
-
-Some files contain third-party code under BSD or LGPL licenses, whence the other
-COPYING.* files here.
-
-All the LGPL code is either LGPL 2.1-only, or LGPL 2.1-or-later.
-For this reason, the COPYING.LGPL file contains the LGPL 2.1 text.
-
-If you want to guarantee that the Eigen code that you are #including is licensed
-under the MPL2 and possibly more permissive licenses (like BSD), #define this
-preprocessor symbol:
- EIGEN_MPL2_ONLY
-For example, with most compilers, you could add this to your project CXXFLAGS:
- -DEIGEN_MPL2_ONLY
-This will cause a compilation error to be generated if you #include any code that is
-LGPL licensed.
diff --git a/eigen/CTestConfig.cmake b/eigen/CTestConfig.cmake
deleted file mode 100644
index 4c0027824..000000000
--- a/eigen/CTestConfig.cmake
+++ /dev/null
@@ -1,17 +0,0 @@
-## This file should be placed in the root directory of your project.
-## Then modify the CMakeLists.txt file in the root directory of your
-## project to incorporate the testing dashboard.
-## # The following are required to uses Dart and the Cdash dashboard
-## ENABLE_TESTING()
-## INCLUDE(CTest)
-set(CTEST_PROJECT_NAME "Eigen")
-set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
-
-set(CTEST_DROP_METHOD "http")
-set(CTEST_DROP_SITE "manao.inria.fr")
-set(CTEST_DROP_LOCATION "/CDash/submit.php?project=Eigen")
-set(CTEST_DROP_SITE_CDASH TRUE)
-set(CTEST_PROJECT_SUBPROJECTS
-Official
-Unsupported
-)
diff --git a/eigen/CTestCustom.cmake.in b/eigen/CTestCustom.cmake.in
deleted file mode 100644
index 9fed9d327..000000000
--- a/eigen/CTestCustom.cmake.in
+++ /dev/null
@@ -1,3 +0,0 @@
-
-set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "2000")
-set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS "2000")
diff --git a/eigen/Eigen/CMakeLists.txt b/eigen/Eigen/CMakeLists.txt
deleted file mode 100644
index 9eb502b79..000000000
--- a/eigen/Eigen/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-include(RegexUtils)
-test_escape_string_as_regex()
-
-file(GLOB Eigen_directory_files "*")
-
-escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
-
-foreach(f ${Eigen_directory_files})
- if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src")
- list(APPEND Eigen_directory_files_to_install ${f})
- endif()
-endforeach(f ${Eigen_directory_files})
-
-install(FILES
- ${Eigen_directory_files_to_install}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel
- )
-
-install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h")
diff --git a/eigen/Eigen/Cholesky b/eigen/Eigen/Cholesky
deleted file mode 100644
index 369d1f5ec..000000000
--- a/eigen/Eigen/Cholesky
+++ /dev/null
@@ -1,41 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CHOLESKY_MODULE_H
-#define EIGEN_CHOLESKY_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup Cholesky_Module Cholesky module
- *
- *
- *
- * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
- * Those decompositions are also accessible via the following methods:
- * - MatrixBase::llt()
- * - MatrixBase::ldlt()
- * - SelfAdjointView::llt()
- * - SelfAdjointView::ldlt()
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/Cholesky/LLT.h"
-#include "src/Cholesky/LDLT.h"
-#ifdef EIGEN_USE_LAPACKE
-#include "src/misc/lapacke.h"
-#include "src/Cholesky/LLT_LAPACKE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_CHOLESKY_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/CholmodSupport b/eigen/Eigen/CholmodSupport
deleted file mode 100644
index bed8924d3..000000000
--- a/eigen/Eigen/CholmodSupport
+++ /dev/null
@@ -1,48 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H
-#define EIGEN_CHOLMODSUPPORT_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-extern "C" {
- #include
-}
-
-/** \ingroup Support_modules
- * \defgroup CholmodSupport_Module CholmodSupport module
- *
- * This module provides an interface to the Cholmod library which is part of the suitesparse package.
- * It provides the two following main factorization classes:
- * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
- * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
- *
- * For the sake of completeness, this module also propose the two following classes:
- * - class CholmodSimplicialLLT
- * - class CholmodSimplicialLDLT
- * Note that these classes does not bring any particular advantage compared to the built-in
- * SimplicialLLT and SimplicialLDLT factorization classes.
- *
- * \code
- * #include
- * \endcode
- *
- * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies.
- * The dependencies depend on how cholmod has been compiled.
- * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task.
- *
- */
-
-#include "src/CholmodSupport/CholmodSupport.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_CHOLMODSUPPORT_MODULE_H
-
diff --git a/eigen/Eigen/Core b/eigen/Eigen/Core
deleted file mode 100644
index 82558155e..000000000
--- a/eigen/Eigen/Core
+++ /dev/null
@@ -1,512 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008 Gael Guennebaud
-// Copyright (C) 2007-2011 Benoit Jacob
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_CORE_H
-#define EIGEN_CORE_H
-
-// first thing Eigen does: stop the compiler from committing suicide
-#include "src/Core/util/DisableStupidWarnings.h"
-
-// Handle NVCC/CUDA/SYCL
-#if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__)
- // Do not try asserts on CUDA and SYCL!
- #ifndef EIGEN_NO_DEBUG
- #define EIGEN_NO_DEBUG
- #endif
-
- #ifdef EIGEN_INTERNAL_DEBUGGING
- #undef EIGEN_INTERNAL_DEBUGGING
- #endif
-
- #ifdef EIGEN_EXCEPTIONS
- #undef EIGEN_EXCEPTIONS
- #endif
-
- // All functions callable from CUDA code must be qualified with __device__
- #ifdef __CUDACC__
- // Do not try to vectorize on CUDA and SYCL!
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-
- #define EIGEN_DEVICE_FUNC __host__ __device__
- // We need math_functions.hpp to ensure that that EIGEN_USING_STD_MATH macro
- // works properly on the device side
- #include
- #else
- #define EIGEN_DEVICE_FUNC
- #endif
-
-#else
- #define EIGEN_DEVICE_FUNC
-
-#endif
-
-// When compiling CUDA device code with NVCC, pull in math functions from the
-// global namespace. In host mode, and when device doee with clang, use the
-// std versions.
-#if defined(__CUDA_ARCH__) && defined(__NVCC__)
- #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC;
-#else
- #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;
-#endif
-
-#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL)
- #define EIGEN_EXCEPTIONS
-#endif
-
-#ifdef EIGEN_EXCEPTIONS
- #include
-#endif
-
-// then include this file where all our macros are defined. It's really important to do it first because
-// it's where we do all the alignment settings (platform detection and honoring the user's will if he
-// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.
-#include "src/Core/util/Macros.h"
-
-// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)
-// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.
-#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6)
- #pragma GCC optimize ("-fno-ipa-cp-clone")
-#endif
-
-#include
-
-// this include file manages BLAS and MKL related macros
-// and inclusion of their respective header files
-#include "src/Core/util/MKL_support.h"
-
-// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into
-// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks
-#if EIGEN_MAX_ALIGN_BYTES==0
- #ifndef EIGEN_DONT_VECTORIZE
- #define EIGEN_DONT_VECTORIZE
- #endif
-#endif
-
-#if EIGEN_COMP_MSVC
- #include // for _aligned_malloc -- need it regardless of whether vectorization is enabled
- #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later
- // Remember that usage of defined() in a #define is undefined by the standard.
- // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.
- #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64
- #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER
- #endif
- #endif
-#else
- // Remember that usage of defined() in a #define is undefined by the standard
- #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )
- #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC
- #endif
-#endif
-
-#ifndef EIGEN_DONT_VECTORIZE
-
- #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)
-
- // Defines symbols for compile-time detection of which instructions are
- // used.
- // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_SSE
- #define EIGEN_VECTORIZE_SSE2
-
- // Detect sse3/ssse3/sse4:
- // gcc and icc defines __SSE3__, ...
- // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you
- // want to force the use of those instructions with msvc.
- #ifdef __SSE3__
- #define EIGEN_VECTORIZE_SSE3
- #endif
- #ifdef __SSSE3__
- #define EIGEN_VECTORIZE_SSSE3
- #endif
- #ifdef __SSE4_1__
- #define EIGEN_VECTORIZE_SSE4_1
- #endif
- #ifdef __SSE4_2__
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX__
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_SSE3
- #define EIGEN_VECTORIZE_SSSE3
- #define EIGEN_VECTORIZE_SSE4_1
- #define EIGEN_VECTORIZE_SSE4_2
- #endif
- #ifdef __AVX2__
- #define EIGEN_VECTORIZE_AVX2
- #endif
- #ifdef __FMA__
- #define EIGEN_VECTORIZE_FMA
- #endif
- #if defined(__AVX512F__) && defined(EIGEN_ENABLE_AVX512)
- #define EIGEN_VECTORIZE_AVX512
- #define EIGEN_VECTORIZE_AVX2
- #define EIGEN_VECTORIZE_AVX
- #define EIGEN_VECTORIZE_FMA
- #ifdef __AVX512DQ__
- #define EIGEN_VECTORIZE_AVX512DQ
- #endif
- #endif
-
- // include files
-
- // This extern "C" works around a MINGW-w64 compilation issue
- // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354
- // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).
- // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations
- // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;
- // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too.
- // notice that since these are C headers, the extern "C" is theoretically needed anyways.
- extern "C" {
- // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.
- // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:
- #if EIGEN_COMP_ICC >= 1110
- #include
- #else
- #include
- #include
- #include
- #ifdef EIGEN_VECTORIZE_SSE3
- #include
- #endif
- #ifdef EIGEN_VECTORIZE_SSSE3
- #include
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_1
- #include
- #endif
- #ifdef EIGEN_VECTORIZE_SSE4_2
- #include
- #endif
- #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)
- #include
- #endif
- #endif
- } // end extern "C"
- #elif defined __VSX__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_VSX
- #include
- // We need to #undef all these ugly tokens defined in
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif defined __ALTIVEC__
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ALTIVEC
- #include
- // We need to #undef all these ugly tokens defined in
- // => use __vector instead of vector
- #undef bool
- #undef vector
- #undef pixel
- #elif (defined __ARM_NEON) || (defined __ARM_NEON__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_NEON
- #include
- #elif (defined __s390x__ && defined __VEC__)
- #define EIGEN_VECTORIZE
- #define EIGEN_VECTORIZE_ZVECTOR
- #include
- #endif
-#endif
-
-#if defined(__F16C__) && !defined(EIGEN_COMP_CLANG)
- // We can use the optimized fp16 to float and float to fp16 conversion routines
- #define EIGEN_HAS_FP16_C
-#endif
-
-#if defined __CUDACC__
- #define EIGEN_VECTORIZE_CUDA
- #include
- #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
- #define EIGEN_HAS_CUDA_FP16
- #endif
-#endif
-
-#if defined EIGEN_HAS_CUDA_FP16
- #include
- #include
-#endif
-
-#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)
- #define EIGEN_HAS_OPENMP
-#endif
-
-#ifdef EIGEN_HAS_OPENMP
-#include
-#endif
-
-// MSVC for windows mobile does not have the errno.h file
-#if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM
-#define EIGEN_HAS_ERRNO
-#endif
-
-#ifdef EIGEN_HAS_ERRNO
-#include
-#endif
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include // for CHAR_BIT
-// for min/max:
-#include
-
-// for std::is_nothrow_move_assignable
-#ifdef EIGEN_INCLUDE_TYPE_TRAITS
-#include
-#endif
-
-// for outputting debug info
-#ifdef EIGEN_DEBUG_ASSIGN
-#include
-#endif
-
-// required for __cpuid, needs to be included after cmath
-#if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE
- #include
-#endif
-
-/** \brief Namespace containing all symbols from the %Eigen library. */
-namespace Eigen {
-
-inline static const char *SimdInstructionSetsInUse(void) {
-#if defined(EIGEN_VECTORIZE_AVX512)
- return "AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_AVX)
- return "AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_2)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2";
-#elif defined(EIGEN_VECTORIZE_SSE4_1)
- return "SSE, SSE2, SSE3, SSSE3, SSE4.1";
-#elif defined(EIGEN_VECTORIZE_SSSE3)
- return "SSE, SSE2, SSE3, SSSE3";
-#elif defined(EIGEN_VECTORIZE_SSE3)
- return "SSE, SSE2, SSE3";
-#elif defined(EIGEN_VECTORIZE_SSE2)
- return "SSE, SSE2";
-#elif defined(EIGEN_VECTORIZE_ALTIVEC)
- return "AltiVec";
-#elif defined(EIGEN_VECTORIZE_VSX)
- return "VSX";
-#elif defined(EIGEN_VECTORIZE_NEON)
- return "ARM NEON";
-#elif defined(EIGEN_VECTORIZE_ZVECTOR)
- return "S390X ZVECTOR";
-#else
- return "None";
-#endif
-}
-
-} // end namespace Eigen
-
-#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT
-// This will generate an error message:
-#error Eigen2-support is only available up to version 3.2. Please go to "http://eigen.tuxfamily.org/index.php?title=Eigen2" for further information
-#endif
-
-// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to
-// ensure QNX/QCC support
-using std::size_t;
-// gcc 4.6.0 wants std:: for ptrdiff_t
-using std::ptrdiff_t;
-
-/** \defgroup Core_Module Core module
- * This is the main module of Eigen providing dense matrix and vector support
- * (both fixed and dynamic size) with all the features corresponding to a BLAS library
- * and much more...
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/Core/util/Constants.h"
-#include "src/Core/util/Meta.h"
-#include "src/Core/util/ForwardDeclarations.h"
-#include "src/Core/util/StaticAssert.h"
-#include "src/Core/util/XprHelper.h"
-#include "src/Core/util/Memory.h"
-
-#include "src/Core/NumTraits.h"
-#include "src/Core/MathFunctions.h"
-#include "src/Core/GenericPacketMath.h"
-#include "src/Core/MathFunctionsImpl.h"
-
-#if defined EIGEN_VECTORIZE_AVX512
- #include "src/Core/arch/SSE/PacketMath.h"
- #include "src/Core/arch/AVX/PacketMath.h"
- #include "src/Core/arch/AVX512/PacketMath.h"
- #include "src/Core/arch/AVX512/MathFunctions.h"
-#elif defined EIGEN_VECTORIZE_AVX
- // Use AVX for floats and doubles, SSE for integers
- #include "src/Core/arch/SSE/PacketMath.h"
- #include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/MathFunctions.h"
- #include "src/Core/arch/AVX/PacketMath.h"
- #include "src/Core/arch/AVX/MathFunctions.h"
- #include "src/Core/arch/AVX/Complex.h"
- #include "src/Core/arch/AVX/TypeCasting.h"
-#elif defined EIGEN_VECTORIZE_SSE
- #include "src/Core/arch/SSE/PacketMath.h"
- #include "src/Core/arch/SSE/MathFunctions.h"
- #include "src/Core/arch/SSE/Complex.h"
- #include "src/Core/arch/SSE/TypeCasting.h"
-#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
- #include "src/Core/arch/AltiVec/PacketMath.h"
- #include "src/Core/arch/AltiVec/MathFunctions.h"
- #include "src/Core/arch/AltiVec/Complex.h"
-#elif defined EIGEN_VECTORIZE_NEON
- #include "src/Core/arch/NEON/PacketMath.h"
- #include "src/Core/arch/NEON/MathFunctions.h"
- #include "src/Core/arch/NEON/Complex.h"
-#elif defined EIGEN_VECTORIZE_ZVECTOR
- #include "src/Core/arch/ZVector/PacketMath.h"
- #include "src/Core/arch/ZVector/MathFunctions.h"
- #include "src/Core/arch/ZVector/Complex.h"
-#endif
-
-// Half float support
-#include "src/Core/arch/CUDA/Half.h"
-#include "src/Core/arch/CUDA/PacketMathHalf.h"
-#include "src/Core/arch/CUDA/TypeCasting.h"
-
-#if defined EIGEN_VECTORIZE_CUDA
- #include "src/Core/arch/CUDA/PacketMath.h"
- #include "src/Core/arch/CUDA/MathFunctions.h"
-#endif
-
-#include "src/Core/arch/Default/Settings.h"
-
-#include "src/Core/functors/TernaryFunctors.h"
-#include "src/Core/functors/BinaryFunctors.h"
-#include "src/Core/functors/UnaryFunctors.h"
-#include "src/Core/functors/NullaryFunctors.h"
-#include "src/Core/functors/StlFunctors.h"
-#include "src/Core/functors/AssignmentFunctors.h"
-
-// Specialized functors to enable the processing of complex numbers
-// on CUDA devices
-#include "src/Core/arch/CUDA/Complex.h"
-
-#include "src/Core/DenseCoeffsBase.h"
-#include "src/Core/DenseBase.h"
-#include "src/Core/MatrixBase.h"
-#include "src/Core/EigenBase.h"
-
-#include "src/Core/Product.h"
-#include "src/Core/CoreEvaluators.h"
-#include "src/Core/AssignEvaluator.h"
-
-#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
- // at least confirmed with Doxygen 1.5.5 and 1.5.6
- #include "src/Core/Assign.h"
-#endif
-
-#include "src/Core/ArrayBase.h"
-#include "src/Core/util/BlasUtil.h"
-#include "src/Core/DenseStorage.h"
-#include "src/Core/NestByValue.h"
-
-// #include "src/Core/ForceAlignedAccess.h"
-
-#include "src/Core/ReturnByValue.h"
-#include "src/Core/NoAlias.h"
-#include "src/Core/PlainObjectBase.h"
-#include "src/Core/Matrix.h"
-#include "src/Core/Array.h"
-#include "src/Core/CwiseTernaryOp.h"
-#include "src/Core/CwiseBinaryOp.h"
-#include "src/Core/CwiseUnaryOp.h"
-#include "src/Core/CwiseNullaryOp.h"
-#include "src/Core/CwiseUnaryView.h"
-#include "src/Core/SelfCwiseBinaryOp.h"
-#include "src/Core/Dot.h"
-#include "src/Core/StableNorm.h"
-#include "src/Core/Stride.h"
-#include "src/Core/MapBase.h"
-#include "src/Core/Map.h"
-#include "src/Core/Ref.h"
-#include "src/Core/Block.h"
-#include "src/Core/VectorBlock.h"
-#include "src/Core/Transpose.h"
-#include "src/Core/DiagonalMatrix.h"
-#include "src/Core/Diagonal.h"
-#include "src/Core/DiagonalProduct.h"
-#include "src/Core/Redux.h"
-#include "src/Core/Visitor.h"
-#include "src/Core/Fuzzy.h"
-#include "src/Core/IO.h"
-#include "src/Core/Swap.h"
-#include "src/Core/CommaInitializer.h"
-#include "src/Core/GeneralProduct.h"
-#include "src/Core/Solve.h"
-#include "src/Core/Inverse.h"
-#include "src/Core/SolverBase.h"
-#include "src/Core/PermutationMatrix.h"
-#include "src/Core/Transpositions.h"
-#include "src/Core/TriangularMatrix.h"
-#include "src/Core/SelfAdjointView.h"
-#include "src/Core/products/GeneralBlockPanelKernel.h"
-#include "src/Core/products/Parallelizer.h"
-#include "src/Core/ProductEvaluators.h"
-#include "src/Core/products/GeneralMatrixVector.h"
-#include "src/Core/products/GeneralMatrixMatrix.h"
-#include "src/Core/SolveTriangular.h"
-#include "src/Core/products/GeneralMatrixMatrixTriangular.h"
-#include "src/Core/products/SelfadjointMatrixVector.h"
-#include "src/Core/products/SelfadjointMatrixMatrix.h"
-#include "src/Core/products/SelfadjointProduct.h"
-#include "src/Core/products/SelfadjointRank2Update.h"
-#include "src/Core/products/TriangularMatrixVector.h"
-#include "src/Core/products/TriangularMatrixMatrix.h"
-#include "src/Core/products/TriangularSolverMatrix.h"
-#include "src/Core/products/TriangularSolverVector.h"
-#include "src/Core/BandMatrix.h"
-#include "src/Core/CoreIterators.h"
-#include "src/Core/ConditionEstimator.h"
-
-#include "src/Core/BooleanRedux.h"
-#include "src/Core/Select.h"
-#include "src/Core/VectorwiseOp.h"
-#include "src/Core/Random.h"
-#include "src/Core/Replicate.h"
-#include "src/Core/Reverse.h"
-#include "src/Core/ArrayWrapper.h"
-
-#ifdef EIGEN_USE_BLAS
-#include "src/Core/products/GeneralMatrixMatrix_BLAS.h"
-#include "src/Core/products/GeneralMatrixVector_BLAS.h"
-#include "src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h"
-#include "src/Core/products/SelfadjointMatrixMatrix_BLAS.h"
-#include "src/Core/products/SelfadjointMatrixVector_BLAS.h"
-#include "src/Core/products/TriangularMatrixMatrix_BLAS.h"
-#include "src/Core/products/TriangularMatrixVector_BLAS.h"
-#include "src/Core/products/TriangularSolverMatrix_BLAS.h"
-#endif // EIGEN_USE_BLAS
-
-#ifdef EIGEN_USE_MKL_VML
-#include "src/Core/Assign_MKL.h"
-#endif
-
-#include "src/Core/GlobalFunctions.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_CORE_H
diff --git a/eigen/Eigen/Dense b/eigen/Eigen/Dense
deleted file mode 100644
index 5768910bd..000000000
--- a/eigen/Eigen/Dense
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "Core"
-#include "LU"
-#include "Cholesky"
-#include "QR"
-#include "SVD"
-#include "Geometry"
-#include "Eigenvalues"
diff --git a/eigen/Eigen/Eigen b/eigen/Eigen/Eigen
deleted file mode 100644
index 654c8dc63..000000000
--- a/eigen/Eigen/Eigen
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "Dense"
-#include "Sparse"
diff --git a/eigen/Eigen/Eigenvalues b/eigen/Eigen/Eigenvalues
deleted file mode 100644
index 009e529e1..000000000
--- a/eigen/Eigen/Eigenvalues
+++ /dev/null
@@ -1,57 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_EIGENVALUES_MODULE_H
-#define EIGEN_EIGENVALUES_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include "Cholesky"
-#include "Jacobi"
-#include "Householder"
-#include "LU"
-#include "Geometry"
-
-/** \defgroup Eigenvalues_Module Eigenvalues module
- *
- *
- *
- * This module mainly provides various eigenvalue solvers.
- * This module also provides some MatrixBase methods, including:
- * - MatrixBase::eigenvalues(),
- * - MatrixBase::operatorNorm()
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/misc/RealSvd2x2.h"
-#include "src/Eigenvalues/Tridiagonalization.h"
-#include "src/Eigenvalues/RealSchur.h"
-#include "src/Eigenvalues/EigenSolver.h"
-#include "src/Eigenvalues/SelfAdjointEigenSolver.h"
-#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h"
-#include "src/Eigenvalues/HessenbergDecomposition.h"
-#include "src/Eigenvalues/ComplexSchur.h"
-#include "src/Eigenvalues/ComplexEigenSolver.h"
-#include "src/Eigenvalues/RealQZ.h"
-#include "src/Eigenvalues/GeneralizedEigenSolver.h"
-#include "src/Eigenvalues/MatrixBaseEigenvalues.h"
-#ifdef EIGEN_USE_LAPACKE
-#include "src/misc/lapacke.h"
-#include "src/Eigenvalues/RealSchur_LAPACKE.h"
-#include "src/Eigenvalues/ComplexSchur_LAPACKE.h"
-#include "src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_EIGENVALUES_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/Geometry b/eigen/Eigen/Geometry
deleted file mode 100644
index 716d52952..000000000
--- a/eigen/Eigen/Geometry
+++ /dev/null
@@ -1,62 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_GEOMETRY_MODULE_H
-#define EIGEN_GEOMETRY_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include "SVD"
-#include "LU"
-#include
-
-/** \defgroup Geometry_Module Geometry module
- *
- * This module provides support for:
- * - fixed-size homogeneous transformations
- * - translation, scaling, 2D and 3D rotations
- * - \link Quaternion quaternions \endlink
- * - cross products (\ref MatrixBase::cross, \ref MatrixBase::cross3)
- * - orthognal vector generation (\ref MatrixBase::unitOrthogonal)
- * - some linear components: \link ParametrizedLine parametrized-lines \endlink and \link Hyperplane hyperplanes \endlink
- * - \link AlignedBox axis aligned bounding boxes \endlink
- * - \link umeyama least-square transformation fitting \endlink
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/Geometry/OrthoMethods.h"
-#include "src/Geometry/EulerAngles.h"
-
-#include "src/Geometry/Homogeneous.h"
-#include "src/Geometry/RotationBase.h"
-#include "src/Geometry/Rotation2D.h"
-#include "src/Geometry/Quaternion.h"
-#include "src/Geometry/AngleAxis.h"
-#include "src/Geometry/Transform.h"
-#include "src/Geometry/Translation.h"
-#include "src/Geometry/Scaling.h"
-#include "src/Geometry/Hyperplane.h"
-#include "src/Geometry/ParametrizedLine.h"
-#include "src/Geometry/AlignedBox.h"
-#include "src/Geometry/Umeyama.h"
-
-// Use the SSE optimized version whenever possible. At the moment the
-// SSE version doesn't compile when AVX is enabled
-#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
-#include "src/Geometry/arch/Geometry_SSE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_GEOMETRY_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
-
diff --git a/eigen/Eigen/Householder b/eigen/Eigen/Householder
deleted file mode 100644
index 89cd81b1a..000000000
--- a/eigen/Eigen/Householder
+++ /dev/null
@@ -1,30 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_HOUSEHOLDER_MODULE_H
-#define EIGEN_HOUSEHOLDER_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup Householder_Module Householder module
- * This module provides Householder transformations.
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/Householder/Householder.h"
-#include "src/Householder/HouseholderSequence.h"
-#include "src/Householder/BlockHouseholder.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_HOUSEHOLDER_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/IterativeLinearSolvers b/eigen/Eigen/IterativeLinearSolvers
deleted file mode 100644
index 957d5750b..000000000
--- a/eigen/Eigen/IterativeLinearSolvers
+++ /dev/null
@@ -1,48 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
-#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
-
-#include "SparseCore"
-#include "OrderingMethods"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/**
- * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module
- *
- * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse.
- * Those solvers are accessible via the following classes:
- * - ConjugateGradient for selfadjoint (hermitian) matrices,
- * - LeastSquaresConjugateGradient for rectangular least-square problems,
- * - BiCGSTAB for general square matrices.
- *
- * These iterative solvers are associated with some preconditioners:
- * - IdentityPreconditioner - not really useful
- * - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices.
- * - IncompleteLUT - incomplete LU factorization with dual thresholding
- *
- * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport.
- *
- \code
- #include
- \endcode
- */
-
-#include "src/IterativeLinearSolvers/SolveWithGuess.h"
-#include "src/IterativeLinearSolvers/IterativeSolverBase.h"
-#include "src/IterativeLinearSolvers/BasicPreconditioners.h"
-#include "src/IterativeLinearSolvers/ConjugateGradient.h"
-#include "src/IterativeLinearSolvers/LeastSquareConjugateGradient.h"
-#include "src/IterativeLinearSolvers/BiCGSTAB.h"
-#include "src/IterativeLinearSolvers/IncompleteLUT.h"
-#include "src/IterativeLinearSolvers/IncompleteCholesky.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H
diff --git a/eigen/Eigen/Jacobi b/eigen/Eigen/Jacobi
deleted file mode 100644
index 17c1d785a..000000000
--- a/eigen/Eigen/Jacobi
+++ /dev/null
@@ -1,33 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_JACOBI_MODULE_H
-#define EIGEN_JACOBI_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup Jacobi_Module Jacobi module
- * This module provides Jacobi and Givens rotations.
- *
- * \code
- * #include
- * \endcode
- *
- * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation:
- * - MatrixBase::applyOnTheLeft()
- * - MatrixBase::applyOnTheRight().
- */
-
-#include "src/Jacobi/Jacobi.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_JACOBI_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
-
diff --git a/eigen/Eigen/LU b/eigen/Eigen/LU
deleted file mode 100644
index 6f6c55629..000000000
--- a/eigen/Eigen/LU
+++ /dev/null
@@ -1,46 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_LU_MODULE_H
-#define EIGEN_LU_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup LU_Module LU module
- * This module includes %LU decomposition and related notions such as matrix inversion and determinant.
- * This module defines the following MatrixBase methods:
- * - MatrixBase::inverse()
- * - MatrixBase::determinant()
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/misc/Kernel.h"
-#include "src/misc/Image.h"
-#include "src/LU/FullPivLU.h"
-#include "src/LU/PartialPivLU.h"
-#ifdef EIGEN_USE_LAPACKE
-#include "src/misc/lapacke.h"
-#include "src/LU/PartialPivLU_LAPACKE.h"
-#endif
-#include "src/LU/Determinant.h"
-#include "src/LU/InverseImpl.h"
-
-// Use the SSE optimized version whenever possible. At the moment the
-// SSE version doesn't compile when AVX is enabled
-#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX
- #include "src/LU/arch/Inverse_SSE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_LU_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/MetisSupport b/eigen/Eigen/MetisSupport
deleted file mode 100644
index 85c41bf34..000000000
--- a/eigen/Eigen/MetisSupport
+++ /dev/null
@@ -1,35 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_METISSUPPORT_MODULE_H
-#define EIGEN_METISSUPPORT_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-extern "C" {
-#include
-}
-
-
-/** \ingroup Support_modules
- * \defgroup MetisSupport_Module MetisSupport module
- *
- * \code
- * #include
- * \endcode
- * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis).
- * It can be used just as any other built-in method as explained in \link OrderingMethods_Module here. \endlink
- */
-
-
-#include "src/MetisSupport/MetisSupport.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_METISSUPPORT_MODULE_H
diff --git a/eigen/Eigen/OrderingMethods b/eigen/Eigen/OrderingMethods
deleted file mode 100644
index d8ea36193..000000000
--- a/eigen/Eigen/OrderingMethods
+++ /dev/null
@@ -1,73 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_ORDERINGMETHODS_MODULE_H
-#define EIGEN_ORDERINGMETHODS_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/**
- * \defgroup OrderingMethods_Module OrderingMethods module
- *
- * This module is currently for internal use only
- *
- * It defines various built-in and external ordering methods for sparse matrices.
- * They are typically used to reduce the number of elements during
- * the sparse matrix decomposition (LLT, LU, QR).
- * Precisely, in a preprocessing step, a permutation matrix P is computed using
- * those ordering methods and applied to the columns of the matrix.
- * Using for instance the sparse Cholesky decomposition, it is expected that
- * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A).
- *
- *
- * Usage :
- * \code
- * #include
- * \endcode
- *
- * A simple usage is as a template parameter in the sparse decomposition classes :
- *
- * \code
- * SparseLU > solver;
- * \endcode
- *
- * \code
- * SparseQR > solver;
- * \endcode
- *
- * It is possible as well to call directly a particular ordering method for your own purpose,
- * \code
- * AMDOrdering ordering;
- * PermutationMatrix perm;
- * SparseMatrix A;
- * //Fill the matrix ...
- *
- * ordering(A, perm); // Call AMD
- * \endcode
- *
- * \note Some of these methods (like AMD or METIS), need the sparsity pattern
- * of the input matrix to be symmetric. When the matrix is structurally unsymmetric,
- * Eigen computes internally the pattern of \f$A^T*A\f$ before calling the method.
- * If your matrix is already symmetric (at leat in structure), you can avoid that
- * by calling the method with a SelfAdjointView type.
- *
- * \code
- * // Call the ordering on the pattern of the lower triangular matrix A
- * ordering(A.selfadjointView(), perm);
- * \endcode
- */
-
-#ifndef EIGEN_MPL2_ONLY
-#include "src/OrderingMethods/Amd.h"
-#endif
-
-#include "src/OrderingMethods/Ordering.h"
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_ORDERINGMETHODS_MODULE_H
diff --git a/eigen/Eigen/PaStiXSupport b/eigen/Eigen/PaStiXSupport
deleted file mode 100644
index de3a63b4d..000000000
--- a/eigen/Eigen/PaStiXSupport
+++ /dev/null
@@ -1,48 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PASTIXSUPPORT_MODULE_H
-#define EIGEN_PASTIXSUPPORT_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-extern "C" {
-#include
-#include
-}
-
-#ifdef complex
-#undef complex
-#endif
-
-/** \ingroup Support_modules
- * \defgroup PaStiXSupport_Module PaStiXSupport module
- *
- * This module provides an interface to the PaSTiX library.
- * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver.
- * It provides the two following main factorization classes:
- * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization.
- * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization.
- * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern).
- *
- * \code
- * #include
- * \endcode
- *
- * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies.
- * The dependencies depend on how PaSTiX has been compiled.
- * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task.
- *
- */
-
-#include "src/PaStiXSupport/PaStiXSupport.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_PASTIXSUPPORT_MODULE_H
diff --git a/eigen/Eigen/PardisoSupport b/eigen/Eigen/PardisoSupport
deleted file mode 100644
index 340edf51f..000000000
--- a/eigen/Eigen/PardisoSupport
+++ /dev/null
@@ -1,35 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_PARDISOSUPPORT_MODULE_H
-#define EIGEN_PARDISOSUPPORT_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include
-
-/** \ingroup Support_modules
- * \defgroup PardisoSupport_Module PardisoSupport module
- *
- * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers.
- *
- * \code
- * #include
- * \endcode
- *
- * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies.
- * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration.
- *
- */
-
-#include "src/PardisoSupport/PardisoSupport.h"
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_PARDISOSUPPORT_MODULE_H
diff --git a/eigen/Eigen/QR b/eigen/Eigen/QR
deleted file mode 100644
index 80838e3bd..000000000
--- a/eigen/Eigen/QR
+++ /dev/null
@@ -1,47 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_QR_MODULE_H
-#define EIGEN_QR_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include "Cholesky"
-#include "Jacobi"
-#include "Householder"
-
-/** \defgroup QR_Module QR module
- *
- *
- *
- * This module provides various QR decompositions
- * This module also provides some MatrixBase methods, including:
- * - MatrixBase::householderQr()
- * - MatrixBase::colPivHouseholderQr()
- * - MatrixBase::fullPivHouseholderQr()
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/QR/HouseholderQR.h"
-#include "src/QR/FullPivHouseholderQR.h"
-#include "src/QR/ColPivHouseholderQR.h"
-#include "src/QR/CompleteOrthogonalDecomposition.h"
-#ifdef EIGEN_USE_LAPACKE
-#include "src/misc/lapacke.h"
-#include "src/QR/HouseholderQR_LAPACKE.h"
-#include "src/QR/ColPivHouseholderQR_LAPACKE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_QR_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/QtAlignedMalloc b/eigen/Eigen/QtAlignedMalloc
deleted file mode 100644
index 4044d5ac5..000000000
--- a/eigen/Eigen/QtAlignedMalloc
+++ /dev/null
@@ -1,40 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_QTMALLOC_MODULE_H
-#define EIGEN_QTMALLOC_MODULE_H
-
-#include "Core"
-
-#if (!EIGEN_MALLOC_ALREADY_ALIGNED)
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-void *qMalloc(size_t size)
-{
- return Eigen::internal::aligned_malloc(size);
-}
-
-void qFree(void *ptr)
-{
- Eigen::internal::aligned_free(ptr);
-}
-
-void *qRealloc(void *ptr, size_t size)
-{
- void* newPtr = Eigen::internal::aligned_malloc(size);
- memcpy(newPtr, ptr, size);
- Eigen::internal::aligned_free(ptr);
- return newPtr;
-}
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif
-
-#endif // EIGEN_QTMALLOC_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/SPQRSupport b/eigen/Eigen/SPQRSupport
deleted file mode 100644
index f70390c17..000000000
--- a/eigen/Eigen/SPQRSupport
+++ /dev/null
@@ -1,34 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPQRSUPPORT_MODULE_H
-#define EIGEN_SPQRSUPPORT_MODULE_H
-
-#include "SparseCore"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include "SuiteSparseQR.hpp"
-
-/** \ingroup Support_modules
- * \defgroup SPQRSupport_Module SuiteSparseQR module
- *
- * This module provides an interface to the SPQR library, which is part of the suitesparse package.
- *
- * \code
- * #include
- * \endcode
- *
- * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...).
- * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules
- *
- */
-
-#include "src/CholmodSupport/CholmodSupport.h"
-#include "src/SPQRSupport/SuiteSparseQRSupport.h"
-
-#endif
diff --git a/eigen/Eigen/SVD b/eigen/Eigen/SVD
deleted file mode 100644
index 86143c23d..000000000
--- a/eigen/Eigen/SVD
+++ /dev/null
@@ -1,47 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SVD_MODULE_H
-#define EIGEN_SVD_MODULE_H
-
-#include "QR"
-#include "Householder"
-#include "Jacobi"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/** \defgroup SVD_Module SVD module
- *
- *
- *
- * This module provides SVD decomposition for matrices (both real and complex).
- * Two decomposition algorithms are provided:
- * - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones.
- * - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems.
- * These decompositions are accessible via the respective classes and following MatrixBase methods:
- * - MatrixBase::jacobiSvd()
- * - MatrixBase::bdcSvd()
- *
- * \code
- * #include
- * \endcode
- */
-
-#include "src/misc/RealSvd2x2.h"
-#include "src/SVD/UpperBidiagonalization.h"
-#include "src/SVD/SVDBase.h"
-#include "src/SVD/JacobiSVD.h"
-#include "src/SVD/BDCSVD.h"
-#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
-#include "src/misc/lapacke.h"
-#include "src/SVD/JacobiSVD_LAPACKE.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_SVD_MODULE_H
-/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/eigen/Eigen/Sparse b/eigen/Eigen/Sparse
deleted file mode 100644
index a2ef7a665..000000000
--- a/eigen/Eigen/Sparse
+++ /dev/null
@@ -1,34 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSE_MODULE_H
-#define EIGEN_SPARSE_MODULE_H
-
-/** \defgroup Sparse_Module Sparse meta-module
- *
- * Meta-module including all related modules:
- * - \ref SparseCore_Module
- * - \ref OrderingMethods_Module
- * - \ref SparseCholesky_Module
- * - \ref SparseLU_Module
- * - \ref SparseQR_Module
- * - \ref IterativeLinearSolvers_Module
- *
- \code
- #include
- \endcode
- */
-
-#include "SparseCore"
-#include "OrderingMethods"
-#include "SparseCholesky"
-#include "SparseLU"
-#include "SparseQR"
-#include "IterativeLinearSolvers"
-
-#endif // EIGEN_SPARSE_MODULE_H
-
diff --git a/eigen/Eigen/SparseCholesky b/eigen/Eigen/SparseCholesky
deleted file mode 100644
index b6a320c40..000000000
--- a/eigen/Eigen/SparseCholesky
+++ /dev/null
@@ -1,45 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// Copyright (C) 2008-2013 Gael Guennebaud
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSECHOLESKY_MODULE_H
-#define EIGEN_SPARSECHOLESKY_MODULE_H
-
-#include "SparseCore"
-#include "OrderingMethods"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-/**
- * \defgroup SparseCholesky_Module SparseCholesky module
- *
- * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.
- * Those decompositions are accessible via the following classes:
- * - SimplicialLLt,
- * - SimplicialLDLt
- *
- * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module.
- *
- * \code
- * #include
- * \endcode
- */
-
-#ifdef EIGEN_MPL2_ONLY
-#error The SparseCholesky module has nothing to offer in MPL2 only mode
-#endif
-
-#include "src/SparseCholesky/SimplicialCholesky.h"
-
-#ifndef EIGEN_MPL2_ONLY
-#include "src/SparseCholesky/SimplicialCholesky_impl.h"
-#endif
-
-#include "src/Core/util/ReenableStupidWarnings.h"
-
-#endif // EIGEN_SPARSECHOLESKY_MODULE_H
diff --git a/eigen/Eigen/SparseCore b/eigen/Eigen/SparseCore
deleted file mode 100644
index 76966c4c4..000000000
--- a/eigen/Eigen/SparseCore
+++ /dev/null
@@ -1,69 +0,0 @@
-// This file is part of Eigen, a lightweight C++ template library
-// for linear algebra.
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-#ifndef EIGEN_SPARSECORE_MODULE_H
-#define EIGEN_SPARSECORE_MODULE_H
-
-#include "Core"
-
-#include "src/Core/util/DisableStupidWarnings.h"
-
-#include
-#include