`
happmaoo
  • 浏览: 4334340 次
  • 性别: Icon_minigender_1
  • 来自: 杭州
社区版块
存档分类
最新评论

lucene全文检索应用示例及代码简析

阅读更多
<iframe align="top" marginwidth="0" marginheight="0" src="http://www.zealware.com/csdnblog01.html" frameborder="0" width="728" scrolling="no" height="90"></iframe>
  Lucene是apache软件基金会 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包及架构,提供了完整的查询引擎和索引引擎,实现了一些通用的分词算法,预留很多词法分析器接口。本文以myrss.easyjf.com网站系统中使用Lucene实现全文检索的代码为例,简单演示Lucene在实际项目中的应用。
  使用Lucene实现全文检索,主要有下面三个步骤:
  1、建立索引库:根据网站新闻信息库中的已有的数据资料建立Lucene索引文件。
  2、通过索引库搜索:有了索引后,即可使用标准的词法分析器或直接的词法分析器实现进行全文检索。
  3、维护索引库:网站新闻信息库中的信息会不断的变动,包括新增、修改及删除等,这些信息的变动都需要进一步反映到Lucene索引文件中。
下面是myrss.easyjf.com相关代码!
一、索引管理(建立及维护)
  索引管理类MyRssIndexManage主要实现根据网站信息库中的数据建立索引,维护索引等。由于索引的过程需要消耗一定的时间,因此,索引管理类实现Runnable接口,使得我们可以在程序中开新线程来运行。
package com.easyjf.lucene;
import java.util.Date;
import java.util.List;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import com.easyjf.dbo.EasyJDB;
import com.easyjf.news.business.NewsDir;
import com.easyjf.news.business.NewsDoc;
import com.easyjf.news.business.NewsUtil;
import com.easyjf.web.tools.IPageList;
public class MyRssIndexManage implements Runnable {
private String indexDir;
private String indexType="add";
public void run() {
// TODO Auto-generated method stub
if("add".equals(indexType))
normalIndex();
else if ("init".equals(indexType)) reIndexAll();
}
public void normalIndex()
{
try{
Date start = new Date();
int num=0;
IndexWriter writer=new IndexWriter(indexDir,new StandardAnalyzer(),false);
//NewsDir dir=NewsDir.readBySn();
String scope="(needIndexIPageList pList=NewsUtil.pageList(scope,1,50);
for(int p=0;p<plist.getpages></plist.getpages>{
pList=NewsUtil.pageList(scope,p,100);
List list=pList.getResult();
for(int i=0;i<list.size></list.size>{
NewsDoc doc=(NewsDoc)list.get(i);
writer.addDocument(newsdoc2lucenedoc(doc));
num++;
}
}
writer.optimize();
writer.close();
EasyJDB.getInstance().execute("update NewsDoc set needIndex=2 where "+scope);
Date end = new Date();
System.out.print("新增索引"+num+"条信息,一共花:"+(end.getTime() - start.getTime())/60000+"分钟!");
}
catch(Exception e)
{
e.printStackTrace();
}
}
public void reIndexAll()
{
try{
Date start = new Date();
int num=0;
IndexWriter writer=new IndexWriter(indexDir,new StandardAnalyzer(),true);
NewsDir dir=NewsDir.readBySn("easyjf");
IPageList pList=NewsUtil.pageList(dir,1,50);
for(int p=0;p<plist.getpages></plist.getpages>{
pList=NewsUtil.pageList(dir,p,100);
List list=pList.getResult();
for(int i=0;i<list.size></list.size>{
NewsDoc doc=(NewsDoc)list.get(i);
writer.addDocument(newsdoc2lucenedoc(doc));
num++;
}
}
writer.optimize();
writer.close();
EasyJDB.getInstance().execute("update NewsDoc set needIndex=2 where dirPath like 'easyjf%'");
Date end = new Date();
System.out.print("全部重新做了一次索引,一共处理了"+num+"条信息,花:"+(end.getTime() - start.getTime())/60000+"分钟!");
}
catch(Exception e)
{
e.printStackTrace();
}
}
private Document newsdoc2lucenedoc(NewsDoc doc)
{
Document lDoc=new Document();
lDoc.add(new Field("title",doc.getTitle(),Field.Store.YES,Field.Index.TOKENIZED));
lDoc.add(new Field("content",doc.getContent(),Field.Store.YES,Field.Index.TOKENIZED));
lDoc.add(new Field("url",doc.getRemark(),Field.Store.YES,Field.Index.NO));
lDoc.add(new Field("cid",doc.getCid(),Field.Store.YES,Field.Index.NO));
lDoc.add(new Field("source",doc.getSource(),Field.Store.YES,Field.Index.NO));
lDoc.add(new Field("inputTime",doc.getInputTime().toString(),Field.Store.YES,Field.Index.NO));
return lDoc;
}
public String getIndexDir() {
return indexDir;
}
public void setIndexDir(String indexDir) {
this.indexDir = indexDir;
}

public String getIndexType() {
return indexType;
}
public void setIndexType(String indexType) {
this.indexType = indexType;
}
}
二、使用Lucene实现全文搜索
下面是MyRssSearch类的源码,该类主要实现使用Lucene中Searcher及QueryParser实现从索引库中搜索关键词。
package com.easyjf.lucene;
import java.util.List;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.MultiFieldQueryParser;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import com.easyjf.search.MyRssUtil;
import com.easyjf.search.SearchContent;
import com.easyjf.web.tools.IPageList;
import com.easyjf.web.tools.PageList;
public class MyRssSearch {
private String indexDir;
IndexReader ir;
Searcher search;
public IPageList search(String key,int pageSize,int currentPage)
{
IPageList pList=new PageList(new HitsQuery(doSearch(key)));
pList.doList(pageSize,currentPage,"","",null);
if(pList!=null)
{
List list=pList.getResult();
if(list!=null){
for(int i=0;i<list.size></list.size>{
list.set(i,lucene2searchObj((Document)list.get(i),key));
}
}
}
try{
if(search!=null)search.close();
if(ir!=null)ir.close();
}
catch(Exception e)
{
e.printStackTrace();
}
return pList;
}
private SearchContent lucene2searchObj(Document doc,String key)
{
SearchContent searchObj=new SearchContent();
String title=doc.getField("title").stringValue();
searchObj.setTitle(title.replaceAll(key,""+key+""));
searchObj.setTvalue(doc.getField("cid").stringValue());
searchObj.setUrl(doc.getField("url").stringValue());
searchObj.setSource(doc.getField("source").stringValue());
searchObj.setLastUpdated(doc.getField("inputTime").stringValue());
searchObj.setIntro(MyRssUtil.content2intro(doc.getField("content").stringValue(),key));
return searchObj;
}
public Hits doSearch(String key)
{
Hits hits=null;
try{
ir=IndexReader.open(indexDir);
search=new IndexSearcher(ir);
String fields[]={"title","content"};
QueryParser parser=new MultiFieldQueryParser(fields,new StandardAnalyzer());
Query query=parser.parse(key);
hits=search.search(query);
}
catch(Exception e)
{
e.printStackTrace();
}
//System.out.println("搜索结果:"+hits.length());
return hits;
}

public String getIndexDir() {
return indexDir;
}
public void setIndexDir(String indexDir) {
this.indexDir = indexDir;
}
}
  在上面的代码中,search方法返回一个封装了分页查询结果的IPageList,IPageList是EasyJWeb Tools业务引擎中的分页引擎,对于IPageList的使用,请看本人写的这篇文章《EasyJWeb Tools中业务引擎分页的设计实现》:

  我们针对Lucene的的查询结果Hits结构,写了一个查询器HitsQuery。代码如下所示:
package com.easyjf.lucene;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.lucene.search.Hits;
import com.easyjf.web.tools.IQuery;
public class HitsQuery implements IQuery {
private int begin=0;
private int max=0;
private Hits hits;
public HitsQuery()
{

}
public HitsQuery(Hits hits)
{
if(hits!=null)
{
this.hits=hits;
this.max=hits.length();
}
}
public int getRows(String arg0) {
// TODO Auto-generated method stub
return (hits==null?0:hits.length());
}
public List getResult(String arg0) {
// TODO Auto-generated method stub
List list=new ArrayList();
for(int i=begin;i{
try{
list.add(hits.doc(i));
}
catch(Exception e)
{
e.printStackTrace();
}
}
return list;
}
public void setFirstResult(int begin) {
// TODO Auto-generated method stub
this.begin=begin;
}
public void setMaxResults(int max) {
// TODO Auto-generated method stub
this.max=max;
}
public void setParaValues(Collection arg0) {
// TODO Auto-generated method stub

}
public List getResult(String condition, int begin, int max) {
// TODO Auto-generated method stub
if((begin>=0)&&(begin<max></max>if(!(max>hits.length()))this.max=max;
return getResult(condition);
}
}
三、Web调用
  下面我们来看看在Web中如果调用商业逻辑层的全文检索功能。下面是处理用户请请的Action中关于搜索部分的源码:
package com.easyjf.news.action;
public class SearchAction implements IWebAction {
public Page doSearch(WebForm form,Module module)throws Exception
{
String key=CommUtil.null2String(form.get("v"));
key=URLDecoder.decode(URLEncoder.encode(key,"ISO8859_1"),"utf-8");
form.set("v",key);
form.addResult("v2",URLEncoder.encode(key,"utf-8"));
if(key.getBytes().length>2){
String orderBy=CommUtil.null2String(form.get("order"));
int currentPage=CommUtil.null2Int(form.get("page"));
int pageSize=CommUtil.null2Int(form.get("pageSize"));
if(currentPageif(pageSizeSearchEngine search=new SearchEngine(key,orderBy,pageSize,currentPage);
search.getLuceneSearch().setIndexDir(Globals.APP_BASE_DIR+"/WEB-INF/index");
search.doSearchByLucene();
IPageList pList=search.getResult();
if(pList!=null && pList.getRowCount()>0){
form.addResult("list",pList.getResult());
form.addResult("pages",new Integer(pList.getPages()));
form.addResult("rows",new Integer(pList.getRowCount()));
form.addResult("page",new Integer(pList.getCurrentPage()));
form.addResult("gotoPageHTML",CommUtil.showPageHtml(pList.getCurrentPage(),pList.getPages()));
}
else
{
form.addResult("notFound","true");//找不到数据
}
}
else
form.addResult("errMsg","您输入的关键字太短!");
form.addResult("hotSearch",SearchEngine.getHotSearch(20));
return null;
}
}
其中调用的SearchEngine类中有关Lucene部分的源码:
public class SearchEngine {
private MyRssSearch luceneSearch=new MyRssSearch();
public void doSearchByLucene()
{
SearchKey keyObj=readCache();
if(keyObj!=null){
result=luceneSearch.search(key,pageSize,currentPage);
if(updateStatus){
keyObj.setReadTimes(new Integer(keyObj.getReadTimes().intValue()+1));
keyObj.update();
}
}
else//缓存中没有该关键字信息,生成关键字搜索结果
{
keyObj=new SearchKey();
keyObj.setTitle(key);
keyObj.setLastUpdated(new Date());
keyObj.setReadTimes(new Integer(1));
keyObj.setStatus(new Integer(0));
keyObj.setSequence(new Integer(1));
keyObj.setVdate(new Date());
keyObj.save();
result=luceneSearch.search(key,pageSize,currentPage);;

}
}
}
四、程序演示效果
  这是EasyJF团队官方网站上提供java信息搜索的myrss.easyjf.com的运行效果。



Trackback: http://tb.blog.csdn.net/TrackBack.aspx?PostId=869449


分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics