@article {773, title = {EvoAAA: An evolutionary methodology for automated neural autoencoder architecture search}, journal = {Integrated Computer-Aided Engineering}, volume = {27}, number = {3}, year = {2020}, month = {05/2020}, pages = {211-231}, abstract = {Machine learning models work better when curated features are provided to them. Feature engineering methods have been usually used as a preprocessing step to obtain or build a proper feature set. In late years, autoencoders (a specific type of symmetrical neural network) have been widely used to perform representation learning, proving their competitiveness against classical feature engineering algorithms. The main obstacle in the use of autoencoders is finding a good architecture, a process that most experts confront manually. An automated autoencoder symmetrical architecture search procedure, based on evolutionary methods, is proposed in this paper. The methodology is tested against nine heterogeneous data sets. The obtained results show the ability of this approach to find better architectures, able to concentrate most of the useful information in a minimized encoding, in a reduced time. }, keywords = {Autoencoder, evolutionary methods, Representation learning}, doi = {https://doi.org/10.3233/ICA-200619}, author = {Francisco Charte and A.J. Rivera-Rivas and Francisco Mart{\'\i}nez and M. J. del Jesus} } @conference {771, title = {Automating Autoencoder Architecture Configuration: An Evolutionary Approach}, booktitle = {International Work-Conference on the Interplay Between Natural and Artificial Computation}, year = {2019}, note = {TIN2015-68454-R}, month = {05/2019}, pages = {339-349}, abstract = {Learning from existing data allows building models able to classify patterns, infer association rules, predict future values in time series and much more. Choosing the right features is a vital step of the learning process, specially while dealing with high-dimensional spaces. Autoencoders (AEs) have shown ability to conduct manifold learning, compressing the original feature space without losing useful information. However, there is no optimal AE architecture for all datasets. In this paper we show how to use evolutionary approaches to automate AE architecture configuration. First, a coding to embed the AE configuration in a chromosome is proposed. Then, two evolutionary alternatives are compared against exhaustive search. The results show the great superiority of the evolutionary way.}, keywords = {Autoencoder, Deep learning, Evolutionary, Optimization}, doi = {https://doi.org/10.1007/978-3-030-19591-5_35}, author = {Francisco Charte and A.J. Rivera-Rivas and Francisco Mart{\'\i}nez and M. J. del Jesus} }