网络编程
位置:首页>> 网络编程>> Python编程>> Python爬虫获取整个站点中的所有外部链接代码示例

Python爬虫获取整个站点中的所有外部链接代码示例

作者:土肥圆的猿  发布时间:2021-03-01 09:52:39 

标签:python,爬虫

收集所有外部链接的网站爬虫程序流程图

Python爬虫获取整个站点中的所有外部链接代码示例

下例是爬取本站python绘制条形图方法代码详解的实例,大家可以参考下。

完整代码:


#! /usr/bin/env python
#coding=utf-8

import urllib2
from  bs4 import BeautifulSoup
import re
import datetime
import random

pages=set()
random.seed(datetime.datetime.now())
#Retrieves a list of all Internal links found on a page
def getInternalLinks(bsObj, includeUrl):
       internalLinks  =  []
       #Finds all links  that  begin  with  a  "/"
       for link  in bsObj.findAll("a", href=re.compile("^(/|.*"+includeUrl+")")):
               if link.attrs['href'] is not None:
                       if link.attrs['href'] not in internalLinks:
                               internalLinks.append(link.attrs['href'])
       return internalLinks
#Retrieves a list of all external links found on a page
def getExternalLinks(bsObj, excludeUrl):
       externalLinks  =  []
       #Finds all links  that  start  with  "http" or "www"  that  do
       #not  contain the current URL
       for link  in bsObj.findAll("a",
                             href=re.compile("^(http|www)((?!"+excludeUrl+").)*$")):
               if link.attrs['href'] is not None:
                       if link.attrs['href'] not in externalLinks:
                               externalLinks.append(link.attrs['href'])
       return externalLinks

def splitAddress(address):
       addressParts  =  address.replace("http://", "").split("/")
       return addressParts

def getRandomExternalLink(startingPage):
       html=  urllib2.urlopen(startingPage)
       bsObj  =  BeautifulSoup(html)
       externalLinks  =  getExternalLinks(bsObj, splitAddress(startingPage)[0])
       if len(externalLinks) == 0:
               internalLinks  =  getInternalLinks(startingPage)
               return internalLinks[random.randint(0, len(internalLinks)-1)]
       else:
               return externalLinks[random.randint(0, len(externalLinks)-1)]

def followExternalOnly(startingSite):
       externalLink=getRandomExternalLink("https://www.jb51.net/article/130968.htm")
       print("Random  external  link  is: "+externalLink)
       followExternalOnly(externalLink)

#Collects a list of all external URLs found on the site
allExtLinks=set()
allIntLinks=set()
def getAllExternalLinks(siteUrl):
   html=urllib2.urlopen(siteUrl)
   bsObj=BeautifulSoup(html)
   internalLinks  =  getInternalLinks(bsObj,splitAddress(siteUrl)[0])
   externalLinks  =  getExternalLinks(bsObj,splitAddress(siteUrl)[0])
   for link in externalLinks:
     if link not in allExtLinks:
       allExtLinks.add(link)
       print(link)
   for link in internalLinks:
     if link not in allIntLinks:
       print("About to get link:"+link)
       allIntLinks.add(link)
       getAllExternalLinks(link)

getAllExternalLinks("https://www.jb51.net/article/130968.htm")

爬取结果如下:

Python爬虫获取整个站点中的所有外部链接代码示例

来源:http://blog.csdn.net/qq_16103331/article/details/52690558

0
投稿

猜你喜欢

手机版 网络编程 asp之家 www.aspxhome.com