@article {781, title = {ClEnDAE: A classifier based on ensembles with built-in dimensionality reduction through denoising autoencoders}, journal = {Information Sciences}, volume = {565}, year = {2021}, note = {TIN2015-68454-R; PID2019-107793GB-I00 / AEI /10.13039/501100011033}, pages = {146-176}, abstract = {High dimensionality is an issue that affects most classification algorithms. This factor implies that the predictive performance of many traditional classifiers decreases considerably as the number of features increases. Therefore, there are numerous proposals that try to mitigate the effects of this issue. This study proposes ClEnDAE, a new classifier based on ensembles whose components incorporate denoising autoencoders (DAEs) to reduce the dimensionality of the input space. On the one hand, the use of ensembles improves the predictive performance by using several components that work jointly. On the other hand, the use of DAEs allows a new higher-level, smaller-sized feature space to be generated, reducing high dimensionality effects. Finally, an experimentation is conducted with the goal of evaluating the behavior of ClEnDAE. The first part of the test compares the performance of ClEnDAE to a model based on basic DAE and to the original untreated data. The second part analyzes the results of ClEnDAE and other traditional methods of dimensionality reduction in order to determine the improvement achieved with the proposed algorithm. In both parts of the experimentation, conclusions show that ClEnDAE offers better predictive performance than the other analyzed models. The main advantage of the ClEnDAE method is the combination of the potential of the ensemble-based methodology, where several components work in parallel, and DAEs, which generate new low-dimensional features that provide more relevant information. Therefore, the classification performance is better than with other classic proposals.}, keywords = {classification, Deep learning, Denoising autoencoders, Dimensionality reduction, Ensembles feature fusion}, doi = {https://doi.org/10.1016/j.ins.2021.02.060}, author = {Pulgar, Francisco J. and Francisco Charte and A.J. Rivera-Rivas and M. J. del Jesus} } @article {783, title = {An analysis on the use of autoencoders for representation learning: Fundamentals, learning task case studies, explainability and challenges}, journal = {Neurocomputing}, volume = {404}, year = {2020}, note = {TIN2015-68854-R; TIN2017-89517-P; DeepSCOP Ayudas Fundaci{\'o}n BBVA a Equipos de Investigaci{\'o}n Cient{\'\i}fica en Big Data 2018}, pages = {93-107}, abstract = {In many machine learning tasks, learning a good representation of the data can be the key to building a well-performant solution. This is because most learning algorithms operate with the features in order to find models for the data. For instance, classification performance can improve if the data is mapped to a space where classes are easily separated, and regression can be facilitated by finding a manifold of data in the feature space. As a general rule, features are transformed by means of statistical methods such as principal component analysis, or manifold learning techniques such as Isomap or locally linear embedding. From a plethora of representation learning methods, one of the most versatile tools is the autoencoder. In this paper we aim to demonstrate how to influence its learned representations to achieve the desired learning behavior. To this end, we present a series of learning tasks: data embedding for visualization, image denoising, semantic hashing, detection of abnormal behaviors and instance generation. We model them from the representation learning perspective, following the state of the art methodologies in each field. A solution is proposed for each task employing autoencoders as the only learning method. The theoretical developments are put into practice using a selection of datasets for the different problems and implementing each solution, followed by a discussion of the results in each case study and a brief explanation of other six learning applications. We also explore the current challenges and approaches to explainability in the context of autoencoders. All of this helps conclude that, thanks to alterations in their structure as well as their objective function, autoencoders may be the core of a possible solution to many problems which can be modeled as a transformation of the feature space.}, keywords = {Autoencoders, Deep learning, Feature extraction, Representation learning}, doi = {https://doi.org/10.1016/j.neucom.2020.04.057}, author = {David Charte and Francisco Charte and M. J. del Jesus and F. Herrera} } @article {784, title = {Artificial intelligence within the interplay between natural and artificial computation: Advances in data science, trends and applications}, journal = {Neurocomputing}, volume = {410}, year = {2020}, note = {TIN2017-85827-P; RTI2018-098913-B-I00; PSI2015-65848-R; PGC2018-098813-B-C31; PGC2018-098813-B-C32; RTI2018-101114-B-I; TIN2017-90135-R; RTI2018-098743-B-I00; RTI2018-094645-B-I00; FPU15/06512; FPU17/04154; FJCI-2017{\textendash}33022; UMA18-FEDERJA-084; ED431C2017/12; ED431G/08; ED431C2018/29; Y2018/EMT-5062; ED431F2018/02; U01 AG024904; W81XWH-12-2-0012}, pages = {237-270}, abstract = {Artificial intelligence and all its supporting tools, e.g. machine and deep learning in computational intelligence-based systems, are rebuilding our society (economy, education, life-style, etc.) and promising a new era for the social welfare state. In this paper we summarize recent advances in data science and artificial intelligence within the interplay between natural and artificial computation. A review of recent works published in the latter field and the state the art are summarized in a comprehensive and self-contained way to provide a baseline framework for the international community in artificial intelligence. Moreover, this paper aims to provide a complete analysis and some relevant discussions of the current trends and insights within several theoretical and application fields covered in the essay, from theoretical models in artificial intelligence and machine learning to the most prospective applications in robotics, neuroscience, brain computer interfaces, medicine and society, in general.}, keywords = {AI for social well-being, Alzheimer, Artificial intelligence (AI), Artificial neural networks (ANNs), Autism, Big Data, Computational neuroethology, Deep learning, Dyslexia, Emotion recognition, evolutionary computation, Glaucoma, Human{\textendash}machine interaction, machine learning, Neuroscience, Ontologies, Parkinson, Reinforcement learning, Robotics, Virtual reality}, doi = {https://doi.org/10.1016/j.neucom.2020.05.078}, author = {Juan M.G{\'o}rriz and Javier Ram{\'\i}rez and Andr{\'e}s Ort{\'\i}z and Francisco J. Mart{\'\i}nez-Murcia and Ferm{\'\i}n Segovia and John Suckling and Matthew Leming and Yu-Dong Zhang and Jos{\'e} Ram{\'o}n {\'A}lvarez-S{\'a}nchez and Guido Bologna and Paula Bonomini and Fernando E. Casado and David Charte and Francisco Charte and Ricardo Contreras and Alfredo Cuesta Infante and Richard J. Duro and Antonio Fern{\'a}ndez Caballero and Eduardo Fern{\'a}ndez Jover and Pedro G{\'o}mez Vilda and Manuel Gra{\~n}a and F. Herrera and Roberto Iglesias and Anna Lekova and Javier de Lope and Ezequiel L{\'o}pez Rubio and Rafael Mart{\'\i}nez Tom{\'a}s and Miguel A. Molina-Cabello and Antonio S. Montemayor and Paulo Novais and Daniel Palacios-Alonso and Juan J. Pantrigo and Bryson R. Payne and F{\'e}lix de la Paz L{\'o}pez and Mar{\'\i}a Ang{\'e}lica Pinninghoff and Mariano Rinc{\'o}n and Jos{\'e} Santos and Karl Thurnhofer-Hemsi and Athanasios Tsanas and Ramiro Varela and Jose M. Ferr{\'a}ndez} } @article {774, title = {Choosing the proper autoencoder for feature fusion based on data complexity and classifiers: Analysis, tips and guidelines}, journal = {Information Fusion}, volume = {54}, year = {2020}, note = {TIN2015-68454-R}, month = {02/2020}, pages = {44-60}, abstract = {Classifying data patterns is one of the most recurrent applications in machine learning. The number of input features influences the predictive performance of many classification models. Most classifiers work with high-dimensional spaces. Therefore, there is a great interest in facing the task of reducing the input space. Manifold learning has been shown to perform better than classical dimensionality reduction approaches, such as Principal Component Analysis and Linear Discriminant Analysis. In this sense, Autoencoders (AEs) provide an automated way of performing feature fusion, finding the best manifold to reconstruct the data. There are several models and architectures of AEs. For this reason, in this study an exhaustive analysis of the predictive performance of different AEs models with a large number of datasets is proposed, aiming to provide a set of useful guidelines. These will allow users to choose the appropriate AE model for each case, depending on data traits and the classifier to be used. A thorough empirical analysis is conducted including four AE models, four classification paradigms and a group of datasets with a variety of traits. A convenient set of rules to follow is obtained as a result.}, keywords = {Autoencoders, classification, Deep learning, Dimensionality reduction, Feature fusion}, doi = {https://doi.org/10.1016/j.inffus.2019.07.004}, author = {Pulgar, Francisco J. and Francisco Charte and A.J. Rivera-Rivas and M. J. del Jesus} } @conference {770, title = {A Showcase of the Use of Autoencoders in Feature Learning Applications}, booktitle = {International Work-Conference on the Interplay Between Natural and Artificial Computation}, year = {2019}, note = {TIN2015-68854-R; TIN2017-89517-P}, month = {05/2019}, pages = {412-421}, abstract = {Autoencoders are techniques for data representation learning based on artificial neural networks. Differently to other feature learning methods which may be focused on finding specific transformations of the feature space, they can be adapted to fulfill many purposes, such as data visualization, denoising, anomaly detection and semantic hashing. This work presents these applications and provides details on how autoencoders can perform them, including code samples making use of an R package with an easy-to-use interface for autoencoder design and training, ruta. Along the way, the explanations on how each learning task has been achieved are provided with the aim to help the reader design their own autoencoders for these or other objectives.}, keywords = {Autoencoders, Deep learning, Feature learning}, doi = {https://doi.org/10.1007/978-3-030-19651-6_40}, author = {David Charte and Francisco Charte and M. J. del Jesus and F. Herrera} } @conference {771, title = {Automating Autoencoder Architecture Configuration: An Evolutionary Approach}, booktitle = {International Work-Conference on the Interplay Between Natural and Artificial Computation}, year = {2019}, note = {TIN2015-68454-R}, month = {05/2019}, pages = {339-349}, abstract = {Learning from existing data allows building models able to classify patterns, infer association rules, predict future values in time series and much more. Choosing the right features is a vital step of the learning process, specially while dealing with high-dimensional spaces. Autoencoders (AEs) have shown ability to conduct manifold learning, compressing the original feature space without losing useful information. However, there is no optimal AE architecture for all datasets. In this paper we show how to use evolutionary approaches to automate AE architecture configuration. First, a coding to embed the AE configuration in a chromosome is proposed. Then, two evolutionary alternatives are compared against exhaustive search. The results show the great superiority of the evolutionary way.}, keywords = {Autoencoder, Deep learning, Evolutionary, Optimization}, doi = {https://doi.org/10.1007/978-3-030-19591-5_35}, author = {Francisco Charte and A.J. Rivera-Rivas and Francisco Mart{\'\i}nez and M. J. del Jesus} }