求推荐一个一键提取网站里面的URL且根据域名去重复的小...
求推荐一个一键提取网站里面的URL且根据域名去重复的小工具比如:
http://a.com/b
http://a.com/bb.html
能自动去重复
#region 抓取全站链接
public static List<string> GetAllHref(string url)
{
List<string> allHref = new List<string>();
try
{
string strhtml = soso.getHtml(url, "", true);
if (strhtml != "error")
{
Regex reg = new Regex(@"(?is)<a[^>]*?href=(['""]?)(?<url>[^'""\s>]+)\1[^>]*>(?<text>(?:(?!</?a\b).)*)</a>");
MatchCollection mc = reg.Matches(strhtml);
foreach (Match m in mc)
{
Uri uri = new Uri(url);
Uri thisUri = new Uri(uri, m.Groups["url"].Value);
string fullUrl = "";
if (m.Groups["url"].Value.StartsWith("http"))
{
fullUrl = m.Groups["url"].Value;
}
else
{
fullUrl = thisUri.ToString();
}
allHref.Add(fullUrl);
//Console.WriteLine("原链接:" + m.Groups["url"].Value);
//Console.WriteLine("文本标记:" + m.Groups["text"].Value);
//Console.WriteLine("补全链接:" + fullUrl);
//Console.WriteLine("…………………………………………");
}
}
}
catch (Exception ex)
{ }
return allHref;
}
#endregion
#region 数据去重
/// <summary>
/// List<string>去重
/// </summary>
/// <param name="list"></param>
/// <returns></returns>
public static List<string> getUnqueList(List<string> list)
{
List<string> list1 = new List<string>();
Hashtable hash = new Hashtable();
foreach (string s in list)
{
if (!hash.ContainsKey(s))
{
hash.Add(s, s);
list1.Add(s);
}
}
hash.Clear();
hash = null;
return list1;
}
#endregion
李小冲 发表于 2016-2-26 16:17
十分感谢!!!非常感谢!! guys 发表于 2016-2-26 16:20
十分感谢!!!非常感谢!!
缺少一个gethtml,用下面这个:
public string getHtml(string url, string charSet, bool UseUTF8CharSet)//url是要访问的网站地址,charSet是目标网页的编码,如果传入的是null或者"",那就自动分析网页的编码
{
string strWebData = "error";
try
{
WebClient myWebClient = new WebClient(); //创建WebClient实例myWebClient
// 需要注意的:
//有的网页可能下不下来,有种种原因比如需要cookie,编码问题等等
//这是就要具体问题具体分析比如在头部加入cookie
// webclient.Headers.Add("Cookie", cookie);
//这样可能需要一些重载方法。根据需要写就可以了
myWebClient.Headers.Add("User-agent", "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)");
//myWebClient.Headers.Add("User-agent", "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)");
//获取或设置用于对向 Internet 资源的请求进行身份验证的网络凭据。
myWebClient.Credentials = CredentialCache.DefaultCredentials;
//如果服务器要验证用户名,密码
//NetworkCredential mycred = new NetworkCredential(struser, strpassword);
//myWebClient.Credentials = mycred;
//从资源下载数据并返回字节数组。(加@是因为网址中间有"/"符号)
byte[] myDataBuffer = myWebClient.DownloadData(url);
strWebData = Encoding.Default.GetString(myDataBuffer);
//获取网页字符编码描述信息
Match charSetMatch = Regex.Match(strWebData, "<meta([^<]*)charset=([^<]*)\"", RegexOptions.IgnoreCase | RegexOptions.Multiline);
string webCharSet = charSetMatch.Groups.Value;
if (charSet == null || charSet == "")
charSet = webCharSet;
if (charSet.Length > 0)
{
charSet = charSet.Replace("\"", "");
}
if (UseUTF8CharSet)
{
if (charSet == null || charSet.Length == 0)
{
charSet = "utf-8";
}
}
if (charSet != null && charSet != "" && Encoding.GetEncoding(charSet) != Encoding.Default)
strWebData = Encoding.GetEncoding(charSet).GetString(myDataBuffer);
}
catch (Exception)
{
strWebData = "error";
}
return strWebData;
}
weishaneweishane
页:
[1]