package com.common; //http://127.0.0.1:8080/zz3zcwbwebhome/index.jsp //http://127.0.0.1:8080/zz3zcwbwebhome/reply.jsp import java.util. *; import java.net.*; import java.io. *; import java.util.regex. *; public class SearchCrawler implements Runnable { // error message list error message ArrayList<String> errorList = new ArrayList<String>(); // search result search result ArrayList<String> resultList = new ArrayList<String>(); String startUrl;// base URL for searching starting point for searching String searchString;// searching target String String to search (English) int maxUrl;// The maximum number of URLs processed boolean caseSensitive = false; // Whether Case Sensitivity is case sensitive boolean limitHost = false; // limit in host whether to search within the limited host HashMap<String, ArrayList<String>> disallowListCache = new HashMap<String, ArrayList<String>>(); public SearchCrawler() { } public SearchCrawler(String startUrl, int maxUrl, String searchString) { this.startUrl = startUrl; this.maxUrl = maxUrl; this.searchString = searchString; } public ArrayList<String> getResultList() { return resultList; } // start crawler thread starts the search thread public void run() { crawl (startUrl, maxUrl, searchString, limitHost, caseSensitive); } // check URL format Check URL format private URL verifyUrl(String url) { // only deal with HTTP URL 只处理HTTP URLs if (!url.toLowerCase().startsWith("http://")) return null; URL verifiedUrl = null; try { verifiedUrl = new URL(url); } catch (Exception e) { return null; } return verifiedUrl; } // check accessing URL Check if the robot is allowed to access the given URL. private boolean isRobotAllowed(URL urlToCheck) { // get host URL Get the host given the URL String host = urlToCheck.getHost().toLowerCase(); // not allow to search URL cache from host Get the URL cache that the host does not allow to search ArrayList<String> disallowList = disallowListCache.get(host); // if no cache, download and cache If there is no cache, download and cache. if (disallowList == null) { disallowList = new ArrayList<String>(); try { URL robotsFileUrl = new URL("http://" + host + "/robots.txt"); BufferedReader reader = new BufferedReader(new InputStreamReader(robotsFileUrl.openStream())); // read robot file and create not allow URL list // Read the robot file and create a list of paths that are not allowed to access. String line = ""; String disallowPath = ""; while ((line = reader.readLine()) != null) { // exists disallow does it contain "Disallow: if (line.indexOf("Disallow:") == 0) { // get not allow access URL to get the access path that is not allowed disallowPath = line.substring("Disallow:".length()); // check comments Checks for comments. int commentIndex = disallowPath.indexOf("#"); if (commentIndex != -1) { // get comments disallowPath = disallowPath.substring(0, commentIndex); } disallowPath = disallowPath.trim();// trim blank disallowList.add(disallowPath); } } disallowListCache.put(host, disallowList); } catch (Exception e) { return true; } } String file = urlToCheck.getFile(); for (int i = 0; i < disallowList.size(); i++) { String disallow = disallowList.get(i); if (file.startsWith(disallow)) { return false; } } return true; } // remove www from URL remove "www" from URL private String removeWwwFromUrl(String url) { int index = url.indexOf("://www."); if (index != -1) { return url.substring(0, index + 3) + url.substring(index + 7); } return (url); } // parse page and find link parse the page and find the link private ArrayList<String> retrieveLinks(URL pageUrl, String pageContents, HashSet crawledList, boolean limitHost) { // use regex to match Compile the linked match pattern with a regular expression. Pattern p = Pattern.compile("<a\\s+href\\s*=\\s*\"?(.*?)[\"|>]", Pattern.CASE_INSENSITIVE); Matcher m = p.matcher (pageContents); ArrayList<String> linkList = new ArrayList<String>(); while (m.find()) { String link = m.group(1).trim(); if (link.length() < 1) { continue; } // jump to page link Jump to the link in this page. if (link.charAt(0) == '#') { continue; } if (link.indexOf("mailto:") != -1) { continue; } if (link.toLowerCase().indexOf("javascript") != -1) { continue; } if (link.indexOf("://") == -1) { if (link.charAt(0) == '/') {// deal with absolute path deal with absolute path link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + link; } else { String file = pageUrl.getFile (); if (file.indexOf('/') == -1) {// deal with relative path // handle relative addresses link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + "/" + link; } else { String path = file.substring(0, file.lastIndexOf('/') + 1); link = "http://" + pageUrl.getHost() + ":" + pageUrl.getPort() + path + link; } } } int index = link.indexOf('#'); if (index != -1) { link = link.substring(0, index); } link = removeWwwFromUrl(link); URL verifiedLink = verifyUrl(link); if (verifiedLink == null) { continue; } /* * If qualified host, excluding those out condition of the URL * If restricting the host, exclude those URLs that do not qualify */ if (limitHost && !pageUrl.getHost().toLowerCase().equals(verifiedLink.getHost().toLowerCase())) { continue; } // jump over those already processing links. if (crawledList.contains(link)) { continue; } linkList.add(link); } return (linkList); } // Download the content of the Web page search, judgment in the page does // not specify a search string Search and download the content of the web page, and determine whether there is a specified search string in the page private boolean searchStringMatches(String pageContents, String searchString, boolean caseSensitive) { String searchContents = pageContents; if (!caseSensitive) {// not case sensitive searchContents = pageContents.toLowerCase(); } Pattern p = Pattern.compile("[\\s]+"); String[] terms = p.split(searchString); for (int i = 0; i < terms.length; i++) { if (caseSensitive) { if (searchContents.indexOf(terms[i]) == -1) { return false; } } else { if (searchContents.indexOf(terms[i].toLowerCase()) == -1) { return false; } } } return true; } // execute search operation to perform the actual search operation public ArrayList<String> crawl(String startUrl, int maxUrls, String searchString, boolean limithost, boolean caseSensitive) { System.out.println("searchString=" + searchString); HashSet<String> crawledList = new HashSet<String>(); LinkedHashSet<String> toCrawlList = new LinkedHashSet<String>(); if (maxUrls < 1) { errorList.add("Invalid Max URLs value."); System.out.println("Invalid Max URLs value."); } if (searchString.length() < 1) { errorList.add("Missing Search String."); System.out.println("Missing search String"); } if (errorList.size() > 0) { System.out.println("err!!!"); return errorList; } // remove www from URL remove www from start URL startUrl = removeWwwFromUrl(startUrl); toCrawlList.add(startUrl); while (toCrawlList.size() > 0) { if (maxUrls != -1) { if (crawledList.size() == maxUrls) { break; } } // Get URL at bottom of the list. String url = toCrawlList.iterator().next(); // Remove URL from the to crawl list. toCrawlList.remove(url); // Convert string url to URL object. URL verifiedUrl = verifyUrl(url); // Skip URL if robots are not allowed to access it. if (!isRobotAllowed(verifiedUrl)) { continue; } // add deal with URL to crawledList crawledList.add(url); String pageContents = downloadPage(verifiedUrl, "gb2312"); if (pageContents != null && pageContents.length() > 0) { // Get a valid link from the page ArrayList<String> links = retrieveLinks(verifiedUrl, pageContents, crawledList, limitHost); toCrawlList.addAll(links); if (searchStringMatches(pageContents, searchString, caseSensitive)) { resultList.add(url); System.out.println(url); } } } return resultList; } public String downloadPage (URL pageUrl) { try { HttpURLConnection conn = (HttpURLConnection) pageUrl.openConnection (); conn.setDoOutput(true); conn.setUseCaches(false); conn.setRequestMethod("GET"); conn.setRequestProperty("User-agent", "Mozilla/5.0 Chrome/18.0.1025.166 Safari/535.19"); conn.connect(); BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); // Read page into buffer. String line; StringBuffer pageBuffer = new StringBuffer (); while ((line = reader.readLine()) != null) { pageBuffer.append(line); } reader.close(); return pageBuffer.toString (); } catch (Exception e) { e.printStackTrace (); } return null; } public String downloadPage (URL pageUrl, String codingPattern) { try { HttpURLConnection conn = (HttpURLConnection) pageUrl.openConnection (); conn.setDoOutput(true); conn.setUseCaches(false); conn.setRequestMethod("GET"); conn.setRequestProperty("User-agent", "Mozilla/5.0 (Linux; Android 4.2.1; Nexus 7 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19"); conn.connect(); // Open connection to URL for reading. BufferedReader reader = new BufferedReader(new InputStreamReader(pageUrl.openStream(), codingPattern)); // Read page into buffer. String line = ""; StringBuffer pageBuffer = new StringBuffer (); while ((line = reader.readLine()) != null) { pageBuffer.append(line); } reader.close(); return pageBuffer.toString (); } catch (Exception e) { e.printStackTrace (); } return null; } }