分词统计词频
2008-12-30 20:48
267 查看
package test;
/**
*
* 分词每个词语,并且统计词频
*
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
public class Segment {
public static void main(String args[]) throws IOException {
Segment s = new Segment();
String text = s.ReadFileByBufferdeReader("F://test.txt");
// System.out.println(text);
s.getWordByReader(text);
}
public String ReadFileByBufferdeReader(String readFileName) {
String temp = "";
File f = new File(readFileName);
InputStreamReader read;
try {
read = new InputStreamReader(new FileInputStream(f), "gb2312");
BufferedReader reader = new BufferedReader(read);
String line;
while ((line = reader.readLine()) != null) {
temp += line + "/n";
}
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return temp;
}
public Map getWordByReader(String text) {
// 采用正向最大匹配的中文分词算法
MMAnalyzer analyzer = new MMAnalyzer();
analyzer.addWord("任家坪");
Map<String, Integer> map = new HashMap<String, Integer>();
try {
System.out.println("Length = " + text.length());
Reader r = new StringReader(text);
TokenStream ts = analyzer.tokenStream(null, r);
//Creates a TokenStream which tokenizes all the text in the provided Reader.
System.out.println("开始分词.../n");
long begin = System.currentTimeMillis(); //返回以毫秒为单位的当前时间
for (Token t = ts.next(); t != null; t = ts.next()) {
String str = t.termText(); // Returns the Token's term text.
// System.out.println(str);
Object o = map.get(str); // 返回指定键所映射的值;如果此映射不包含该键的映射关系,则返回 null
if (o == null) {
map.put(str, new Integer(1));
} else {
Integer I = new Integer(((Integer) o).intValue() + 1);
map.put(str, I); //将指定的值与此映射中的指定键关联
}
}
for (Iterator iter = map.entrySet().iterator(); iter.hasNext();) {
Map.Entry entry = (Map.Entry) iter.next(); //映射项(键-值对)。
System.out.println(entry.getKey() + ":" + entry.getValue()); //输出词汇及频率
}
long end = System.currentTimeMillis();
// System.out.println("分词数量: " + map.size() + " 耗时 : " + (end - begin)
// + "ms");
return map;
} catch (IOException e) {
e.printStackTrace();
}
return map;
}
}
/**
*
* 分词每个词语,并且统计词频
*
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import jeasy.analysis.MMAnalyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
public class Segment {
public static void main(String args[]) throws IOException {
Segment s = new Segment();
String text = s.ReadFileByBufferdeReader("F://test.txt");
// System.out.println(text);
s.getWordByReader(text);
}
public String ReadFileByBufferdeReader(String readFileName) {
String temp = "";
File f = new File(readFileName);
InputStreamReader read;
try {
read = new InputStreamReader(new FileInputStream(f), "gb2312");
BufferedReader reader = new BufferedReader(read);
String line;
while ((line = reader.readLine()) != null) {
temp += line + "/n";
}
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return temp;
}
public Map getWordByReader(String text) {
// 采用正向最大匹配的中文分词算法
MMAnalyzer analyzer = new MMAnalyzer();
analyzer.addWord("任家坪");
Map<String, Integer> map = new HashMap<String, Integer>();
try {
System.out.println("Length = " + text.length());
Reader r = new StringReader(text);
TokenStream ts = analyzer.tokenStream(null, r);
//Creates a TokenStream which tokenizes all the text in the provided Reader.
System.out.println("开始分词.../n");
long begin = System.currentTimeMillis(); //返回以毫秒为单位的当前时间
for (Token t = ts.next(); t != null; t = ts.next()) {
String str = t.termText(); // Returns the Token's term text.
// System.out.println(str);
Object o = map.get(str); // 返回指定键所映射的值;如果此映射不包含该键的映射关系,则返回 null
if (o == null) {
map.put(str, new Integer(1));
} else {
Integer I = new Integer(((Integer) o).intValue() + 1);
map.put(str, I); //将指定的值与此映射中的指定键关联
}
}
for (Iterator iter = map.entrySet().iterator(); iter.hasNext();) {
Map.Entry entry = (Map.Entry) iter.next(); //映射项(键-值对)。
System.out.println(entry.getKey() + ":" + entry.getValue()); //输出词汇及频率
}
long end = System.currentTimeMillis();
// System.out.println("分词数量: " + map.size() + " 耗时 : " + (end - begin)
// + "ms");
return map;
} catch (IOException e) {
e.printStackTrace();
}
return map;
}
}
相关文章推荐
- Hadoop IK分词 词频统计
- Hadoop的改进实验(中文分词词频统计及英文词频统计)(1/4)
- 【python 编程】网页中文过滤分词及词频统计
- 使用 wordcloud, jieba, PIL, matplotlib, numpy 进行分词,统计词频,并绘制词云的一次尝试
- python结巴分词以及词频统计实例
- 中文分词,词频统计,词云图制作
- 【中文分词】使用IKAnalyzer分词统计词频
- PYTHON3.6对中文文本分词、去停用词以及词频统计
- Python jieba 中文分词与词频统计
- 使用Spark、Ansj分词进行词频统计
- Hadoop的改进实验(中文分词词频统计及英文词频统计)(2/4)
- 利用结巴分词的Python版本实现分词并统计词频
- 使用ES对中文文章进行分词,并进行词频统计排序
- python jieba分词并统计词频后输出结果到Excel和txt文档方法
- Python分词并进行词频统计
- 用R进行文本挖掘与分析--软件分词统计词频
- Hadoop的改进实验(中文分词词频统计及英文词频统计)(3/4)
- hadoop中文分词、词频统计及排序
- IK Analyzer分词及词频统计Java简单实现
- nltk学习之统计词频和分词nltk.word_tokenize nltk.FreqDist