一种提取HTML网页正文的方法

package getContent;

import java.io.IOException;
import java.util.HashMap;
import java.util.Stack;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

public class getContent {

    /**
     * @param args
     * @throws IOException 
     */
    public static void main(String[] args){
        String url = "http://view.news.qq.com/original/intouchtoday/n3702.html";
        HashMap<String, String> hm = parseHtml(url);
        System.out.println("网页正文如下:\n"+hm.get("content"));
        System.out.println("标题:\n"+hm.get("title"));
        System.out.println("关键字:"+hm.get("keywords"));
        System.out.println("描述:"+ hm.get("description"));
    }

    /**
     * 
     * @param url to be parsed
     * @return
     */
    public static HashMap<String,String> parseHtml(String url) {
        Document doc = null;
        try {
            doc = Jsoup.connect(url)
                    .userAgent("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31")
                    .get();
        } catch (IOException e) {
            e.printStackTrace();
        } 
        String content = GetDocContent(doc);
        String title = doc.head().select("title").text();
        String keywords = doc.head().select("meta[name=Keywords]").attr("content");
        String description = doc.head().select("meta[name=description]").attr("content");

        HashMap<String, String> hm = new HashMap<String, String>();
        hm.put("content", content);
        hm.put("title", title);
        hm.put("keywords", keywords);
        hm.put("keywords", keywords);
        hm.put("description", description);

        return hm;
    }

    private static String GetDocContent(Document doc) {
        Elements divs =  doc.body().getElementsByTag("div");
        int max = -1;
        String content = null;
        for (int i=0; i<divs.size(); i++) {
            Element div = (Element)divs.get(i);
            String divContent = GetDivContent(div);
            if (divContent.length() > max) {
                max = divContent.length();
                content = divContent;
            }
        }
        return content;
    }

    private static String GetDivContent(Element div) {
        StringBuilder sb = new StringBuilder();
        //考虑div里标签内容的顺序,对div子树进行深度优先搜索
        Stack<Element> sk = new Stack<Element>();
        sk.push(div);
        while (!sk.empty()) {
            //
            Element e = sk.pop();
            //对于div中的div过滤掉
            if (e != div && e.tagName().equals("div")) continue;
            //考虑正文被包含在p标签中的情况,并且p标签里不能含有a标签
            if (e.tagName().equals("p") && e.getElementsByTag("a").size() == 0) {
                String className = e.className();
                if (className.length() != 0 && className.equals("pictext")) continue;
                sb.append(e.text());
                sb.append("\n");
                continue;
            } else if (e.tagName().equals("td")) {
            //考虑正文被包含在td标签中的情况
                if (e.getElementsByTag("div").size() != 0) continue;
                sb.append(e.text());
                sb.append("\n");
                continue;

            }
            //将孩子节点加入栈中
            Elements children = e.children();
            for (int i=children.size()-1; i>=0; i--) {
                sk.push((Element)children.get(i));
            }
        }

        return sb.toString();
    }

}

上面的代码使用了JSoup这个包。
转自(参考)博客

猜你喜欢

转载自blog.csdn.net/zhihaoma/article/details/53207100