@article{a99f4830a08e11db8ed6000ea68e967b,
title = "Classification using Hierarchical Naive Bayes models",
abstract = "Classification problems have a long history in the machine learning literature. One of the simplest, and yet most consistently well-performing set of classifiers is the Na{\"i}ve Bayes models. However, an inherent problem with these classifiers is the assumption that all attributes used to describe an instance are conditionally independent given the class of that instance. When this assumption is violated (which is often the case in practice) it can reduce classification accuracy due to “information double-counting” and interaction omission. In this paper we focus on a relatively new set of models, termed Hierarchical Na{\"i}ve Bayes models. Hierarchical Na{\"i}ve Bayes models extend the modeling flexibility of Na{\"i}ve Bayes models by introducing latent variables to relax some of the independence statements in these models. We propose a simple algorithm for learning Hierarchical Na{\"i}ve Bayes models in the context of classification. Experimental results show that the learned models can significantly improve classification accuracy as compared to other frameworks.",
keywords = "Classification, Na{\"i}ve Bayes models",
author = "Helge Langseth and {Dyhre Nielsen}, Thomas",
year = "2006",
doi = "10.1007/s10994-006-6136-2",
language = "English",
volume = "63",
pages = "135--159",
journal = "Machine Learning",
issn = "0885-6125",
publisher = "Springer",
number = "2",
}