@article {COUSO2016129, title = {Machine learning models, epistemic set-valued data and generalized loss functions: An encompassing approach}, journal = {Information Sciences}, volume = {358-359}, year = {2016}, pages = {129 - 150}, abstract = {We study those problems where the goal is to find {\textquotedblleft}optimal{\textquotedblright} models with respect to some specific criterion, in regression and supervised classification problems. Alternatives to the usual expected loss minimization criterion are proposed, and a general framework where this criterion can be seen as a particular instance of a general family of criteria is provided. In the new setting, each model is formally identified with a random variable that associates a loss value to each individual in the population. Based on this identification, different stochastic orderings between random variables lead to different criteria to compare pairs of models. Our general setting encompasses the classical criterion based on the minimization of the expected loss, but also other criteria where a numerical loss function is not available, and therefore the computation of its expectation does not make sense. The presentation of the new framework is divided into two stages. First, we consider the new framework under standard situations about the sample information, where both the collection of attributes and the response variables are observed with precision. Then, we assume that just incomplete information about them (expressed in terms of set-valued data sets) is provided. We cast some comparison criteria from the recent literature on learning methods from low-quality data as particular instances of our general approach.}, keywords = {classification, Generalized stochastic ordering, Loss function, Low-quality data, Regression, Set-valued data}, issn = {0020-0255}, doi = {https://doi.org/10.1016/j.ins.2016.04.016}, url = {http://www.sciencedirect.com/science/article/pii/S0020025516302511}, author = {In{\'e}s Couso and Luciano S{\'a}nchez} } @article {SANCHEZ20141583, title = {Comments on {\textquotedblleft}Learning from imprecise and fuzzy observations: Data disambiguation through generalized loss minimization{\textquotedblright} by Eyke H{\"u}llermeier}, journal = {International Journal of Approximate Reasoning}, volume = {55}, number = {7}, year = {2014}, note = {Special issue: Harnessing the information contained in low-quality data sources}, pages = {1583 - 1587}, abstract = {The paper by Eyke H{\"u}llermeier introduces a new set of techniques for learning models from imprecise data. The removal of the uncertainty in the training instances through the input{\textendash}output relationship described by the model is also considered. This discussion addresses three points of the paper: extension principle-based models, precedence operators between fuzzy losses and possible connections between data disambiguation and data imputation.}, keywords = {classification, fuzzy data, Imprecise data, Loss functions, machine learning, Regression}, issn = {0888-613X}, doi = {https://doi.org/10.1016/j.ijar.2014.04.008}, url = {http://www.sciencedirect.com/science/article/pii/S0888613X14000607}, author = {Luciano S{\'a}nchez} }