打开APP
userphoto
未登录

开通VIP,畅享免费电子书等14项超值服

开通VIP
线程实现的Java爬虫_时光漫步-专注客户管理软件的开发,客户搜索软件开发--- 搜索引擎...

以下是一个Java爬虫程序,它能从指定主页开始,按照指定的深度抓取该站点域名下的网页并维护简单索引。本人补充给出该爬虫程序的流程图,希望对众读者更容易地读懂程序有所帮助。需要说明的是,流程图中省去生成抓取报告和索引文件的过程。

 

参数:private static int webDepth = 2;//爬虫深度。主页的深度为1,设置深度后超过该深度的网页不会抓取。

        private int intThreadNum = 10;//线程数。开启的线程数。

抓取时也会在程序源文件目录下生成一个report.txt文件记录爬虫的运行情况,并在抓取结束后生成一个fileindex.txt文件维护网页文件索引。

本程序用到了多线程(静态变量和同步),泛型,文件操作,URL类和连接,Hashtable类关联数组,正则表达式及其相关类。运行时需使用命令行参数,第一个参数应使用http://开头的有效URL字符串作为爬虫的主页,第二个参数(可选)应输入可转换为int型的字符串(用Integer.parseInt(String s)静态方法可以转换的字符串,如3)作为爬虫深度,如果没有,则默认深度为2。

本程序的不足之处是:只考虑了href= href=' href="后加绝对url的这三种情况(由于url地址在网页源文件中情况比较复杂,有时处理也会出现错误),还有相对url和window.open('的情况没有考虑。异常处理程序也只是简单处理。如果读者有改进办法可以把源代码帖出,不胜感激。

附上源代码如下(保存名为GetWeb.java):

import java.io.File;
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.Hashtable;

public class GetWeb {
private int webDepth = 2;               //爬虫深度
private int intThreadNum = 10;          //线程数
private String strHomePage = "";        //主页地址
private String myDomain;                //域名
private String fPath = "web";           //储存网页文件的目录名
private ArrayList<String> arrUrls = new ArrayList<String>();    //存储未处理URL
private ArrayList<String> arrUrl = new ArrayList<String>();     //存储所有URL供建立索引
private Hashtable<String,Integer> allUrls = new Hashtable<String,Integer>();     //存储所有URL的网页号
private Hashtable<String,Integer> deepUrls = new Hashtable<String,Integer>();    //存储所有URL深度
private int intWebIndex = 0;            //网页对应文件下标,从0开始
private String charset = "GB2312";
private String report = "";
private long startTime;
private int webSuccessed = 0;
private int webFailed = 0;


public GetWeb(String s){
this.strHomePage = s;
}

public GetWeb(String s,int i){
this.strHomePage = s;
this.webDepth = i;
}

public synchronized void addWebSuccessed(){
webSuccessed++;
}

public synchronized void addWebFailed(){
webFailed++;
}

public synchronized void addReport(String s){
try{
    report += s;
    PrintWriter pwReport = new PrintWriter(new FileOutputStream("report.txt"));
    pwReport.println(report);
    pwReport.close();
}catch(Exception e){
    System.out.println("生成报告文件失败!");
}
}

public synchronized String getAUrl(){
String tmpAUrl = arrUrls.get(0);
arrUrls.remove(0);
return tmpAUrl;
}

public synchronized String getUrl(){
String tmpUrl = arrUrl.get(0);
arrUrl.remove(0);
return tmpUrl;
}

public synchronized Integer getIntWebIndex(){
intWebIndex++;
return intWebIndex;
}

public static void main(String[] args){
if (args.length == 0 || args[0].equals("")){
   System.out.println("No input!");
   System.exit(1);
}
else if(args.length == 1){
   GetWeb gw = new GetWeb(args[0]);
   gw.getWebByHomePage();
}
else{
   GetWeb gw = new GetWeb(args[0],Integer.parseInt(args[1]));
   gw.getWebByHomePage();
}
}

public void getWebByHomePage(){ //由用户提供的域名站点开始,对所有链接页面进行抓取
startTime = System.currentTimeMillis();
this.myDomain = getDomain();
if (myDomain == null){
   System.out.println("Wrong input!");
   //System.exit(1);
   return;
}

System.out.println("Homepage = " + strHomePage);
addReport("Homepage = " + strHomePage + "!\n");
System.out.println("Domain = " + myDomain);
addReport("Domain = " + myDomain + "!\n");
arrUrls.add(strHomePage);
arrUrl.add(strHomePage);
allUrls.put(strHomePage,0);
deepUrls.put(strHomePage,1);

File fDir = new File(fPath);
if(!fDir.exists()){fDir.mkdir();}

System.out.println("Start!");
this.addReport("Start!\n");
String tmp = getAUrl(); //取出新的URL
this.getWebByUrl(tmp,charset,allUrls.get(tmp)+""); //对新URL所对应的网页进行抓取
int i = 0;
for (i=0;i<intThreadNum;i++){
   new Thread(new Processer(this)).start();
}
while (true){
   if(arrUrls.isEmpty() && Thread.activeCount() == 1){
    long finishTime = System.currentTimeMillis();
    long costTime = finishTime-startTime;
    System.out.println("\n\n\n\n\nFinished!");
    addReport("\n\n\n\n\nFinished!\n");
    System.out.println("Start time = " + startTime + "   " + "Finish time = " + finishTime + "   " + "Cost time = " +

costTime + "ms");
    addReport("Start time = " + startTime + "   " + "Finish time = " + finishTime + "   " + "Cost time = " + costTime + "ms"

+ "\n");
    System.out.println("Total url number = " + (webSuccessed+webFailed) + "   Successed: " + webSuccessed + "   Failed: " +

webFailed);
    addReport("Total url number = " + (webSuccessed+webFailed) + "   Successed: " + webSuccessed + "   Failed: " + webFailed

+ "\n");
   
    String strIndex = "";
    String tmpUrl = "";
    while (!arrUrl.isEmpty()){
     tmpUrl = getUrl();
     strIndex += "Web depth:" + deepUrls.get(tmpUrl) + "   Filepath: " + fPath + "/web" + allUrls.get(tmpUrl) + ".htm" + "  

url:" + tmpUrl + "\n\n";
    }
    System.out.println(strIndex);
    try{
     PrintWriter pwIndex = new PrintWriter(new FileOutputStream("fileindex.txt"));
     pwIndex.println(strIndex);
     pwIndex.close();
    }catch(Exception e){
     System.out.println("生成索引文件失败!");
    }
    break;
   }
}
}

public void getWebByUrl(String strUrl,String charset,String fileIndex)
{    //对后续解析出的url进行抓取
try
{
   //if(charset==null||"".equals(charset))charset="utf-8";
   System.out.println("Getting web by url: " + strUrl);
   addReport("Getting web by url: " + strUrl + "\n");

   URL url = new URL(strUrl);
   URLConnection conn = url.openConnection();
   conn.setDoOutput(true);
   InputStream is = null;
   is = url.openStream();
  
   String filePath = fPath + "/web" + fileIndex + ".htm";
   PrintWriter pw = null;
   FileOutputStream fos = new FileOutputStream(filePath);
   OutputStreamWriter writer = new OutputStreamWriter(fos);
   pw = new PrintWriter(writer);
   BufferedReader bReader = new BufferedReader(new InputStreamReader(is));
   StringBuffer sb = new StringBuffer();
   String rLine = null;
   String tmp_rLine = null;
   while ((rLine = bReader.readLine()) != null){
    tmp_rLine = rLine;
    int str_len = tmp_rLine.length();
    if (str_len > 0){
     sb.append("\n" + tmp_rLine);
     pw.println(tmp_rLine);
     pw.flush();
     if (deepUrls.get(strUrl) < webDepth)getUrlByString(tmp_rLine,strUrl);
    }
    tmp_rLine = null;
   }
   is.close();
   pw.close();
   System.out.println("Get web successfully! " + strUrl);
   addReport("Get web successfully! " + strUrl + "\n");
   addWebSuccessed();
}catch (Exception e){
   System.out.println("Get web failed!       " + strUrl);
   addReport("Get web failed!       " + strUrl + "\n");
   addWebFailed();
}
}

public String getDomain(){ //判断用户所提供URL是否为域名地址
String reg = "(?<=http\\://[a-zA-Z0-9]{0,100}[.]{0,1})[^.\\s]*?\\.(com|cn|net|org|biz|info|cc|tv)";
Pattern p = Pattern.compile(reg,Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(strHomePage);
boolean blnp = m.find();
if (blnp == true){
   return m.group(0);
}
return null;
}

public void getUrlByString(String inputArgs,String strUrl){   //解析新的网页,提取其中含有的链接信息
String tmpStr = inputArgs;
String regUrl = "(?<=(href=)[\"]?[\']?)[http://][^\\s\"\'\\?]*(" + myDomain + ")[^\\s\"\'>]*";
Pattern p = Pattern.compile(regUrl,Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(tmpStr);
boolean blnp = m.find();
//int i = 0;
while (blnp == true){
   if (!allUrls.containsKey(m.group(0))){
    System.out.println("Find a new url,depth:" + (deepUrls.get(strUrl)+1) + " "+ m.group(0));
    addReport("Find a new url,depth:" + (deepUrls.get(strUrl)+1) + " "+ m.group(0) + "\n");
    arrUrls.add(m.group(0));
    arrUrl.add(m.group(0));
    allUrls.put(m.group(0),getIntWebIndex());
    deepUrls.put(m.group(0),(deepUrls.get(strUrl)+1));
   }
   tmpStr = tmpStr.substring(m.end(),tmpStr.length());
   m = p.matcher(tmpStr);
   blnp = m.find();
}
}

class Processer implements Runnable{ //独立的抓取线程
GetWeb gw;
public Processer(GetWeb g){
    this.gw = g;
}
public void run(){
    //Thread.sleep(5000);
    while (!arrUrls.isEmpty()){
      String tmp = getAUrl();
      getWebByUrl(tmp,charset,allUrls.get(tmp)+"");
    }
}
}
}

运行方法:

使用命令行参数,第一个参数是url,注意用http://开头,不能省略。如下:
D:\tmp>javac GetWeb.java

D:\tmp>java GetWeb http://www.baidu.com 3

运行结果:

Homepage = http://www.baidu.com!
Domain = baidu.com!
Start!


Getting web by url: http://www.baidu.com

Find a new url,depth:2 http://passport.baidu.com/?login&tpl=mn

Find a new url,depth:2 http://news.baidu.com

Find a new url,depth:2 http://tieba.baidu.com

Find a new url,depth:2 http://zhidao.baidu.com

Find a new url,depth:2 http://mp3.baidu.com

Find a new url,depth:2 http://image.baidu.com

Find a new url,depth:2 http://video.baidu.com

Find a new url,depth:2 http://hi.baidu.com

Find a new url,depth:2 http://utility.baidu.com/traf/click.php?id=215&url=http://www.baidu.com

Find a new url,depth:2 http://bar.baidu.com/sobar/prom23.html

Find a new url,depth:2 http://e.baidu.com

Find a new url,depth:2 http://top.baidu.com

Find a new url,depth:2 http://ir.baidu.com

Find a new url,depth:2 http://www.baidu.com/duty/

Find a new url,depth:2 http://hi.baidu.com/baidu/

Get web successfully! http://www.baidu.com


Getting web by url: http://passport.baidu.com/?login&tpl=mn

Getting web by url: http://news.baidu.com

Getting web by url: http://tieba.baidu.com

Getting web by url: http://zhidao.baidu.com

Getting web by url: http://mp3.baidu.com

Getting web by url: http://image.baidu.com

Getting web by url: http://video.baidu.com

Getting web by url: http://utility.baidu.com/traf/click.php?id=215&url=http://www.baidu.com


Find a new url,depth:3 http://news.baidu.com/

Find a new url,depth:3 http://www.baidu.com/

Find a new url,depth:3 http://tieba.baidu.com/

Find a new url,depth:3 http://hi.baidu.com/

Find a new url,depth:3 http://tieba.baidu.com/

Find a new url,depth:3 http://zhidao.baidu.com/

Find a new url,depth:3 http://mp3.baidu.com/

Getting web by url: http://hi.baidu.com

Find a new url,depth:3 http://hi.baidu.com/

Find a new url,depth:3 http://image.baidu.com/

Find a new url,depth:3 http://video.baidu.com/

Find a new url,depth:3 https://passport.baidu.com/?getpass

本站仅提供存储服务,所有内容均由用户发布,如发现有害或侵权内容,请点击举报
打开APP,阅读全文并永久保存 查看更多类似文章
猜你喜欢
类似文章
【热】打开小程序,算一算2024你的财运
关于调用接口 Connection reset 问题(使用代理调接口)
IOS疯狂基础之NSURL
Servlet 数据库访问 | 菜鸟教程
openfire服务端消息回执插件(接收方离线时的情况),判断用户的在线状态
多级树形菜单设计
跨平台C++ Html Parser, basehtmlparser.h
更多类似文章 >>
生活服务
热点新闻
分享 收藏 导长图 关注 下载文章
绑定账号成功
后续可登录账号畅享VIP特权!
如果VIP功能使用有故障,
可点击这里联系客服!

联系客服