Web Crawler simple implementation

Have access

public class SpiderTest {
  public static void main(String[] args) throws IOException {
    URL url = new URL("https://www.baidu.com");
    InputStream is = url.openStream();
    BufferedReader br = new BufferedReader(new InputStreamReader(is, "utf-8"));
    String msg = null;
    while(null!=(msg=br.readLine())){
      System.out.println(msg);
    }
    br.close();
  }
}

No access

public class SpiderTest2 {
  public static void main(String[] args) throws IOException {
    URL url = new URL("https://www.jd.com");
    HttpURLConnection conn = (HttpURLConnection)url.openConnection();
    conn.setRequestMethod("GET");
    conn.setRequestProperty("User-Agent","Mozilla/5.0 (Windows NT 6.1; W…) Gecko/20100101 Firefox/67.0");
    InputStream is = url.openStream();
    BufferedReader br = new BufferedReader(new InputStreamReader(conn.getInputStream(), "utf-8"));
    String msg = null;
    while(null!=(msg=br.readLine())){
      System.out.println(msg);
    }
    br.close();
  }
}

 

Guess you like

Origin www.cnblogs.com/5aixin/p/11094702.html