机器学习-采用正态贝叶斯分类器、决策树、随机森林对wine数据集分类
2017-11-15 21:58
453 查看
关于wine数据集描述:http://archive.ics.uci.edu/ml/datasets/Wine
#include "opencv2/ml/ml.hpp" #include "opencv2/core/core.hpp" #include "opencv2/core/utility.hpp" #include <stdio.h> #include <string> #include <map> #include <vector> #include<iostream> using namespace std; using namespace cv; using namespace cv::ml; static void help() { printf( "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n" "Usage:\n\t./tree_engine [-r <response_column>] [-ts type_spec] <csv filename>\n" "where -r <response_column> specified the 0-based index of the response (0 by default)\n" "-ts specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n" "<csv filename> is the name of training data file in comma-separated value format\n\n"); } static void train_and_print_errs(Ptr<StatModel> model, const Ptr<TrainData>& data) { bool ok = model->train(data); if (!ok) { printf("Training failed\n"); } else { printf("train error: %f\n", model->calcError(data, false, noArray())); printf("test error: %f\n\n", model->calcError(data, true, noArray())); } } int main(int argc, char** argv) { if (argc < 2) { help(); return 0; } const char* filename = 0; int response_idx = 0; std::string typespec; for (int i = 1; i < argc; i++) { if (strcmp(argv[i], "-r") == 0) sscanf(argv[++i], "%d", &response_idx); else if (strcmp(argv[i], "-ts") == 0) typespec = argv[++i]; else if (argv[i][0] != '-') filename = argv[i]; else { printf("Error. Invalid option %s\n", argv[i]); help(); return -1; } } printf("\nReading in %s...\n\n", filename); const double train_test_split_ratio = 0.5; //加载训练数据 Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx + 1, typespec); if (data.empty()) { printf("ERROR: File %s can not be read\n", filename); return 0; } data->setTrainTestSplitRatio(train_test_split_ratio); //预测数据 float test1[] = { 14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.06, .28, 2.29, 5.64, 1.04, 3.92, 1065 }; float test2[] = { 12.37, .94, 1.36, 10.6, 88, 1.98, .57, .28, .42, 1.95, 1.05, 1.82, 520 }; float test3[] = { 12.86, 1.35, 2.32, 18, 122, 1.51, 1.25, .21, .94, 4.1, .76, 1.29, 630 }; Mat test1Map(1, 13, CV_32FC1, test1); Mat test2Map(1, 13, CV_32FC1, test2); Mat test3Map(1, 13, CV_32FC1, test3); printf("============正太贝叶斯分类器================\n"); //创建正态贝叶斯分类器 Ptr<NormalBayesClassifier> bayes = NormalBayesClassifier::create(); //训练模型 train_and_print_errs(bayes, data); //保存模型 bayes->save("bayes_result.xml"); //读取模型,强行使用一下,为了强调这种用法,当然此处完全没必要 Ptr<NormalBayesClassifier> bayes2 = NormalBayesClassifier::load<NormalBayesClassifier>("bayes_result.xml"); cout << bayes2->predict(test1Map) << endl; cout << bayes2->predict(test2Map) << endl; cout << bayes2->predict(test3Map) << endl; cout << "============================================" << endl; printf("======DTREE=====\n"); //创建决策树 Ptr<DTrees> dtree = DTrees::create(); dtree->setMaxDepth(10); //设置决策树的最大深度 dtree->setMinSampleCount(2); //设置决策树叶子节点的最小样本数 dtree->setRegressionAccuracy(0); //设置回归精度 dtree->setUseSurrogates(false); //不使用替代分叉属性 dtree->setMaxCategories(16); //设置最大的类数量 dtree->setCVFolds(0); //设置不交叉验证 dtree->setUse1SERule(false); //不使用1SE规则 dtree->setTruncatePrunedTree(false); //不对分支进行修剪 dtree->setPriors(Mat()); //设置先验概率 train_and_print_errs(dtree, data); dtree->save("dtree_result.xml"); //读取模型,强行使用一下,为了强调这种用法,当然此处完全没必要 Ptr<DTrees> dtree2 = DTrees::load<DTrees>("dtree_result.xml"); cout << dtree2->predict(test1Map) << endl; cout << dtree2->predict(test2Map) << endl; cout << dtree2->predict(test3Map) << endl; cout << "============================================" << endl; //if ((int)data->getClassLabels().total() <= 2) // regression or 2-class classification problem //{ // printf("======BOOST=====\n"); // Ptr<Boost> boost = Boost::create(); // boost->setBoostType(Boost::GENTLE); // boost->setWeakCount(100); // boost->setWeightTrimRate(0.95); // boost->setMaxDepth(2); // boost->setUseSurrogates(false); // boost->setPriors(Mat()); // train_and_print_errs(boost, data); //} printf("======RTREES=====\n"); Ptr<RTrees> rtrees = RTrees::create(); rtrees->setMaxDepth(10); rtrees->setMinSampleCount(2); rtrees->setRegressionAccuracy(0); rtrees->setUseSurrogates(false); rtrees->setMaxCategories(16); rtrees->setPriors(Mat()); rtrees->setCalculateVarImportance(false); rtrees->setActiveVarCount(0); rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0)); train_and_print_errs(rtrees, data); cout << rtrees->predict(test1Map) << endl; cout << rtrees->predict(test2Map) << endl; cout << rtrees->predict(test3Map) << endl; cout << "============================================" << endl; return 0; }
此处可以看出,对于wine数据集的分类,效果比较 rtress > dtree > normalbayes
wine数据集包含为178条数据
相关文章推荐
- 机器学习-采用正态贝叶斯分类器、决策树、随机森林对abalone数据集分类
- 机器学习-采用正态贝叶斯分类器对wine分类
- 机器学习——决策树与随机森林
- 4000 <机器学习笔记-05 ><scikit-learn 05>决策树 & 随机森林
- 机器学习(五)-决策树和随机森林
- 关于机器学习中决策树的相关问题也谈随机森林
- 机器学习总结(四)——随机森林与GBDT(梯度提升决策树)
- 机器学习实战笔记3(决策树与随机森林)
- 机器学习之决策树(Decision Tree)&随机森林(Random forest)
- 机器学习总结8_从决策树到随机森林
- 机器学习实战--笔记3(决策树与随机森林)
- 看懂论文的机器学习基本知识(五)--随机森林、决策树
- 机器学习-采用决策树对wine分类
- 机器学习-决策树和随机森林
- [机器学习]决策树和随机森林
- 机器学习利器——决策树和随机森林
- 机器学习 —— 决策树及其集成算法(Bagging、随机森林、Boosting)
- 机器学习之决策树和随机森林及代码示例
- 机器学习利器——决策树和随机森林
- 七月机器学习之决策树随机森林和adaboost11