Hbase进行分页显示
2018-03-31 22:40
190 查看
使用API对Hbase的存储的数据进行分页显示:
思路:使用pagefilter和rowfilter(比较器使用GREATER),前者控制每页显示数目,后者控制当前页的起始元素。
分类:1、当输入页数小于等于1时,直接显示第一页,
2、当输入页数为2时,求出第一页的最后一个行键,作为比较值,
3、当输入页数大于等于3时,递归调用方法,求出前一页的最后一个行键即可
实现代码:package lpj.hbase.filter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import habase.Utils.GetAdminAndTable;
import habase.Utils.PrintUtils;
public class FilterTest {
public static void main(String[] args) throws Exception {
//显示某张表的第几页pageIndex,几个值pageSize
getPageList("user_info",3, 2);
}
public static void getPageList(String tableName, int pageIndex, int pageSize){
Table table = GetAdminAndTable.geTable(tableName);
/**
* 通过过滤器获取结果
*/
Filter filter1 = new PageFilter(pageSize);//设置每页显示数目
/**
* 设置分页起始元素pageStartEle
*/
String pageStartEle = getPageStartEle(tableName,pageIndex,pageSize);
Filter filter2 = null;
if (pageStartEle == null) {
filter2 = new RowFilter(CompareOp.GREATER,new BinaryComparator(null));
}else {
filter2 = new RowFilter(CompareOp.GREATER,new BinaryComparator(pageStartEle.getBytes()));
}
Filter filter = new FilterList(filter1,filter2);
Scan scan = new Scan();
scan.setFilter(filter);
/**
* 获取并且打印元素
*/
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
PrintUtils.printResultScanner(scanner);
} catch (IOException e) {
e.printStackTrace();
}finally {
try {
table.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
@SuppressWarnings("deprecation")
private static String getPageStartEle(String tableName, int pageIndex, int pageSize) {
//如果分页小于1,直接返回空值,即为第一页
if (pageIndex <= 1) {
return null;
}
//如果分页为2,第二页的起始大于第一页的最后一个行键,只要求出第一页的最后一个行键即可
if (pageIndex == 2) {
ResultScanner scanner = getScanner(tableName,pageIndex - 1,pageSize);
Iterator<Result> iterator = scanner.iterator();
String lastRoeKey = getKey(iterator);
return lastRoeKey;
}
//递归,以前一页的最后一个行键作为比较
else {
String pageStartEle = getPageStartEle(tableName,pageIndex - 1,pageSize);
Filter filter2 = new RowFilter(CompareOp.GREATER, new BinaryComparator(pageStartEle.getBytes()));
Filter filter1 = new PageFilter(pageSize);
Filter filter = new FilterList(filter1,filter2);
Scan scan = new Scan();
scan.setFilter(filter);
Table table = GetAdminAndTable.geTable(tableName);
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace();
}
Iterator<Result> iterator = scanner.iterator();
String lastRoeKey = getKey(iterator);
return lastRoeKey;
}
}
//获取迭代器最后元素
private static String getKey(Iterator<Result> iterator) {
Result lastRresult = null;
while(iterator.hasNext()){
lastRresult = iterator.next();
}
List<Cell> cells = lastRresult.listCells();
String lastRoeKey = null;
for(Cell cell : cells){
lastRoeKey = new String(cell.getRow());
}
return lastRoeKey;
}
//初始页面
public static ResultScanner getScanner(String tableName, int pageIndex, int pageSize) {
ResultScanner scanner = null;
Table table = GetAdminAndTable.geTable(tableName);
Filter filter1 = new PageFilter(pageSize);
Scan scan = new Scan();
scan.setFilter(filter1);
try {
scanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace();
}
return scanner;
}
}辅助类:package habase.Utils;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
public class PrintUtils {
public static void printResultScanner(ResultScanner scanner) {
Iterator<Result> iterator = scanner.iterator();
while(iterator.hasNext()){
Result next = iterator.next();
printResult(next);
}
}
public static void printResult(Result result) {
List<Cell> cells = result.listCells();
for(Cell cell: cells){
printCell(cell);
}
}
public static void printCell(Cell cell) {
System.out.println(new String(cell.getRow()) + "\t" + new String(cell.getFamily()) + "\t"
+ new String(cell.getQualifier()) + "\t" + new String(cell.getValue()) +
"\t" + cell.getTimestamp());
}
}package habase.Utils;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
public class GetAdminAndTable {
private static final String CONNECT_KEY = "hbase.zookeeper.quorum";
private static final String CONNECT_VALUE = "hadoop02:2181,hadoop03:2181,hadoop04:2181";
private static Connection getConnect() {
Configuration config = HBaseConfiguration.create();
config.set(CONNECT_KEY, CONNECT_VALUE);
try {
return ConnectionFactory.createConnection(config);
} catch (IOException e) {
System.out.println("连接超时");
}
return null;
}
public static Admin getAdmin() {
try {
return getConnect().getAdmin();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
public static Table geTable(String tableName) {
try {
return getConnect().getTable(TableName.valueOf(tableName));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
}测试成功
思路:使用pagefilter和rowfilter(比较器使用GREATER),前者控制每页显示数目,后者控制当前页的起始元素。
分类:1、当输入页数小于等于1时,直接显示第一页,
2、当输入页数为2时,求出第一页的最后一个行键,作为比较值,
3、当输入页数大于等于3时,递归调用方法,求出前一页的最后一个行键即可
实现代码:package lpj.hbase.filter;
import java.io.IOException;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import habase.Utils.GetAdminAndTable;
import habase.Utils.PrintUtils;
public class FilterTest {
public static void main(String[] args) throws Exception {
//显示某张表的第几页pageIndex,几个值pageSize
getPageList("user_info",3, 2);
}
public static void getPageList(String tableName, int pageIndex, int pageSize){
Table table = GetAdminAndTable.geTable(tableName);
/**
* 通过过滤器获取结果
*/
Filter filter1 = new PageFilter(pageSize);//设置每页显示数目
/**
* 设置分页起始元素pageStartEle
*/
String pageStartEle = getPageStartEle(tableName,pageIndex,pageSize);
Filter filter2 = null;
if (pageStartEle == null) {
filter2 = new RowFilter(CompareOp.GREATER,new BinaryComparator(null));
}else {
filter2 = new RowFilter(CompareOp.GREATER,new BinaryComparator(pageStartEle.getBytes()));
}
Filter filter = new FilterList(filter1,filter2);
Scan scan = new Scan();
scan.setFilter(filter);
/**
* 获取并且打印元素
*/
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
PrintUtils.printResultScanner(scanner);
} catch (IOException e) {
e.printStackTrace();
}finally {
try {
table.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
@SuppressWarnings("deprecation")
private static String getPageStartEle(String tableName, int pageIndex, int pageSize) {
//如果分页小于1,直接返回空值,即为第一页
if (pageIndex <= 1) {
return null;
}
//如果分页为2,第二页的起始大于第一页的最后一个行键,只要求出第一页的最后一个行键即可
if (pageIndex == 2) {
ResultScanner scanner = getScanner(tableName,pageIndex - 1,pageSize);
Iterator<Result> iterator = scanner.iterator();
String lastRoeKey = getKey(iterator);
return lastRoeKey;
}
//递归,以前一页的最后一个行键作为比较
else {
String pageStartEle = getPageStartEle(tableName,pageIndex - 1,pageSize);
Filter filter2 = new RowFilter(CompareOp.GREATER, new BinaryComparator(pageStartEle.getBytes()));
Filter filter1 = new PageFilter(pageSize);
Filter filter = new FilterList(filter1,filter2);
Scan scan = new Scan();
scan.setFilter(filter);
Table table = GetAdminAndTable.geTable(tableName);
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace();
}
Iterator<Result> iterator = scanner.iterator();
String lastRoeKey = getKey(iterator);
return lastRoeKey;
}
}
//获取迭代器最后元素
private static String getKey(Iterator<Result> iterator) {
Result lastRresult = null;
while(iterator.hasNext()){
lastRresult = iterator.next();
}
List<Cell> cells = lastRresult.listCells();
String lastRoeKey = null;
for(Cell cell : cells){
lastRoeKey = new String(cell.getRow());
}
return lastRoeKey;
}
//初始页面
public static ResultScanner getScanner(String tableName, int pageIndex, int pageSize) {
ResultScanner scanner = null;
Table table = GetAdminAndTable.geTable(tableName);
Filter filter1 = new PageFilter(pageSize);
Scan scan = new Scan();
scan.setFilter(filter1);
try {
scanner = table.getScanner(scan);
} catch (IOException e) {
e.printStackTrace();
}
return scanner;
}
}辅助类:package habase.Utils;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
public class PrintUtils {
public static void printResultScanner(ResultScanner scanner) {
Iterator<Result> iterator = scanner.iterator();
while(iterator.hasNext()){
Result next = iterator.next();
printResult(next);
}
}
public static void printResult(Result result) {
List<Cell> cells = result.listCells();
for(Cell cell: cells){
printCell(cell);
}
}
public static void printCell(Cell cell) {
System.out.println(new String(cell.getRow()) + "\t" + new String(cell.getFamily()) + "\t"
+ new String(cell.getQualifier()) + "\t" + new String(cell.getValue()) +
"\t" + cell.getTimestamp());
}
}package habase.Utils;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
public class GetAdminAndTable {
private static final String CONNECT_KEY = "hbase.zookeeper.quorum";
private static final String CONNECT_VALUE = "hadoop02:2181,hadoop03:2181,hadoop04:2181";
private static Connection getConnect() {
Configuration config = HBaseConfiguration.create();
config.set(CONNECT_KEY, CONNECT_VALUE);
try {
return ConnectionFactory.createConnection(config);
} catch (IOException e) {
System.out.println("连接超时");
}
return null;
}
public static Admin getAdmin() {
try {
return getConnect().getAdmin();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
public static Table geTable(String tableName) {
try {
return getConnect().getTable(TableName.valueOf(tableName));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
}测试成功
相关文章推荐
- thinkphp中对数组元素进行分页显示
- ExtJs4.1将服务器中的数据进行分页显示
- 使用MVCPager进行博客园首页列表数据的分页显示功能
- js 将很长的内容进行页面分页显示
- thinkphp中对数组元素进行分页显示
- jeesite框架学习——对查询结果为Map的集合进行分页显示
- Java连接HBASE数据库,创建一个表,删除一张表,修改表,输出插入,修改,数据删除,数据获取,显示表信息,过滤查询,分页查询,地理hash
- Linux less --对文件或其他输出进行分页显示
- 文章,记录按内容分页显示,根据文章内容按字数进行分页
- 制作分页显示时用paginate()函数获取对象后如何将获取到的某一键值进行修改
- 如何在WinForm中对DataGrid进行分页显示
- java中对list进行分页显示数据到页面
- 分页(对查询出的数据进行分页显示)
- 基于hbase仿百度的分页显示
- 实现对过长文字内容进行分页显示
- 将实时取到的数据进行分页显示的插件
- 对数据进行分页,显示到table中。
- 关于分页插件pagination的应用及遇到的问题(用ajax请求数据进行拼接后分页显示)
- json一次取所有数据,然后在前后进行分页显示
- 对结果集进行分页显示