get.addHeader("Accept", "text/html");
get.addHeader("Accept-Charset", "utf-8");
get.addHeader("Accept-Encoding", "gzip");
get.addHeader("Accept-Language", "en-US,en");
get.addHeader("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.160 Safari/537.22");
response = client.execute(get);
HttpEntity entity = response.getEntity();
Header header = entity.getContentEncoding();
if (header != null)
{
HeaderElement[] codecs = header.getElements();
for (int i = 0; i < codecs.length; i++)
{
if (codecs[i].getName().equalsIgnoreCase("gzip"))
{
response.setEntity(new GzipDecompressingEntity(entity));
}
}
}
return response;
HttpResponse response = null;
HttpGet get = new HttpGet(url);
get.addHeader("Accept-Charset", "utf-8");
get.addHeader("Accept-Encoding", "gzip");
get.addHeader("Accept-Language", "en-US,en");
get.addHeader("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.160 Safari/537.22");
response = client.execute(get);
HttpEntity entity = response.getEntity();
Header header = entity.getContentEncoding();
if (header != null)
{
HeaderElement[] codecs = header.getElements();
for (int i = 0; i < codecs.length; i++)
{
if (codecs[i].getName().equalsIgnoreCase("gzip"))
{
response.setEntity(new GzipDecompressingEntity(entity));
}
}
}
return response;
需要的都设上就好了。如果需要很多不同的User-Agent轮流使用(同一个User-Agent对一个站点频繁访问容易被识别为爬虫而杯具),可以去网上找,也可以在自己的chrome浏览器里看或者用抓包软件抓。值得注意的是设置了Accept-Encoding为gzip之后,对站点回复的内容要检查是否是压缩格式的,如果是,则解压缩,如上面例程中第9行之后的代码所示。