@article {783, title = {An analysis on the use of autoencoders for representation learning: Fundamentals, learning task case studies, explainability and challenges}, journal = {Neurocomputing}, volume = {404}, year = {2020}, note = {TIN2015-68854-R; TIN2017-89517-P; DeepSCOP Ayudas Fundaci{\'o}n BBVA a Equipos de Investigaci{\'o}n Cient{\'\i}fica en Big Data 2018}, pages = {93-107}, abstract = {In many machine learning tasks, learning a good representation of the data can be the key to building a well-performant solution. This is because most learning algorithms operate with the features in order to find models for the data. For instance, classification performance can improve if the data is mapped to a space where classes are easily separated, and regression can be facilitated by finding a manifold of data in the feature space. As a general rule, features are transformed by means of statistical methods such as principal component analysis, or manifold learning techniques such as Isomap or locally linear embedding. From a plethora of representation learning methods, one of the most versatile tools is the autoencoder. In this paper we aim to demonstrate how to influence its learned representations to achieve the desired learning behavior. To this end, we present a series of learning tasks: data embedding for visualization, image denoising, semantic hashing, detection of abnormal behaviors and instance generation. We model them from the representation learning perspective, following the state of the art methodologies in each field. A solution is proposed for each task employing autoencoders as the only learning method. The theoretical developments are put into practice using a selection of datasets for the different problems and implementing each solution, followed by a discussion of the results in each case study and a brief explanation of other six learning applications. We also explore the current challenges and approaches to explainability in the context of autoencoders. All of this helps conclude that, thanks to alterations in their structure as well as their objective function, autoencoders may be the core of a possible solution to many problems which can be modeled as a transformation of the feature space.}, keywords = {Autoencoders, Deep learning, Feature extraction, Representation learning}, doi = {https://doi.org/10.1016/j.neucom.2020.04.057}, author = {David Charte and Francisco Charte and M. J. del Jesus and F. Herrera} } @article {773, title = {EvoAAA: An evolutionary methodology for automated neural autoencoder architecture search}, journal = {Integrated Computer-Aided Engineering}, volume = {27}, number = {3}, year = {2020}, month = {05/2020}, pages = {211-231}, abstract = {Machine learning models work better when curated features are provided to them. Feature engineering methods have been usually used as a preprocessing step to obtain or build a proper feature set. In late years, autoencoders (a specific type of symmetrical neural network) have been widely used to perform representation learning, proving their competitiveness against classical feature engineering algorithms. The main obstacle in the use of autoencoders is finding a good architecture, a process that most experts confront manually. An automated autoencoder symmetrical architecture search procedure, based on evolutionary methods, is proposed in this paper. The methodology is tested against nine heterogeneous data sets. The obtained results show the ability of this approach to find better architectures, able to concentrate most of the useful information in a minimized encoding, in a reduced time. }, keywords = {Autoencoder, evolutionary methods, Representation learning}, doi = {https://doi.org/10.3233/ICA-200619}, author = {Francisco Charte and A.J. Rivera-Rivas and Francisco Mart{\'\i}nez and M. J. del Jesus} }