对Python3 解析html的几种操作方式小结
作者:极客点儿 发布时间:2021-04-07 20:13:12
解析html是爬虫后的重要的一个处理数据的环节。一下记录解析html的几种方式。
先介绍基础的辅助函数,主要用于获取html并输入解析后的结束
#把传递解析函数,便于下面的修改
def get_html(url, paraser=bs4_paraser):
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Host': 'www.360kan.com',
'Proxy-Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
response.encoding = 'utf-8'
if response.code == 200:
data = StringIO.StringIO(response.read())
gzipper = gzip.GzipFile(fileobj=data)
data = gzipper.read()
value = paraser(data) # open('E:/h5/haPkY0osd0r5UB.html').read()
return value
else:
pass
value = get_html('http://www.360kan.com/m/haPkY0osd0r5UB.html', paraser=lxml_parser)
for row in value:
print row
1,lxml.html的方式进行解析,
The lxml XML toolkit is a Pythonic binding for the C libraries libxml2 and libxslt. It is unique in that it combines the speed and XML feature completeness of these libraries with the simplicity of a native Python API, mostly compatible but superior to the well-known ElementTree API. The latest release works with all CPython versions from 2.6 to 3.5. See the introduction for more information about background and goals of the lxml project. Some common questions are answered in the FAQ. [官网](http://lxml.de/)
def lxml_parser(page):
data = []
doc = etree.HTML(page)
all_div = doc.xpath('//div[@class="yingping-list-wrap"]')
for row in all_div:
# 获取每一个影评,即影评的item
all_div_item = row.xpath('.//div[@class="item"]') # find_all('div', attrs={'class': 'item'})
for r in all_div_item:
value = {}
# 获取影评的标题部分
title = r.xpath('.//div[@class="g-clear title-wrap"][1]')
value['title'] = title[0].xpath('./a/text()')[0]
value['title_href'] = title[0].xpath('./a/@href')[0]
score_text = title[0].xpath('./div/span/span/@style')[0]
score_text = re.search(r'\d+', score_text).group()
value['score'] = int(score_text) / 20
# 时间
value['time'] = title[0].xpath('./div/span[@class="time"]/text()')[0]
# 多少人喜欢
value['people'] = int(
re.search(r'\d+', title[0].xpath('./div[@class="num"]/span/text()')[0]).group())
data.append(value)
return data
2,使用BeautifulSoup,不多说了,大家网上找资料看看
def bs4_paraser(html):
all_value = []
value = {}
soup = BeautifulSoup(html, 'html.parser')
# 获取影评的部分
all_div = soup.find_all('div', attrs={'class': 'yingping-list-wrap'}, limit=1)
for row in all_div:
# 获取每一个影评,即影评的item
all_div_item = row.find_all('div', attrs={'class': 'item'})
for r in all_div_item:
# 获取影评的标题部分
title = r.find_all('div', attrs={'class': 'g-clear title-wrap'}, limit=1)
if title is not None and len(title) > 0:
value['title'] = title[0].a.string
value['title_href'] = title[0].a['href']
score_text = title[0].div.span.span['style']
score_text = re.search(r'\d+', score_text).group()
value['score'] = int(score_text) / 20
# 时间
value['time'] = title[0].div.find_all('span', attrs={'class': 'time'})[0].string
# 多少人喜欢
value['people'] = int(
re.search(r'\d+', title[0].find_all('div', attrs={'class': 'num'})[0].span.string).group())
# print r
all_value.append(value)
value = {}
return all_value
3,使用SGMLParser,主要是通过start、end tag的方式进行了,解析工程比较明朗,但是有点麻烦,而且该案例的场景不太适合该方法,(哈哈)
class CommentParaser(SGMLParser):
def __init__(self):
SGMLParser.__init__(self)
self.__start_div_yingping = False
self.__start_div_item = False
self.__start_div_gclear = False
self.__start_div_ratingwrap = False
self.__start_div_num = False
# a
self.__start_a = False
# span 3中状态
self.__span_state = 0
# 数据
self.__value = {}
self.data = []
def start_div(self, attrs):
for k, v in attrs:
if k == 'class' and v == 'yingping-list-wrap':
self.__start_div_yingping = True
elif k == 'class' and v == 'item':
self.__start_div_item = True
elif k == 'class' and v == 'g-clear title-wrap':
self.__start_div_gclear = True
elif k == 'class' and v == 'rating-wrap g-clear':
self.__start_div_ratingwrap = True
elif k == 'class' and v == 'num':
self.__start_div_num = True
def end_div(self):
if self.__start_div_yingping:
if self.__start_div_item:
if self.__start_div_gclear:
if self.__start_div_num or self.__start_div_ratingwrap:
if self.__start_div_num:
self.__start_div_num = False
if self.__start_div_ratingwrap:
self.__start_div_ratingwrap = False
else:
self.__start_div_gclear = False
else:
self.data.append(self.__value)
self.__value = {}
self.__start_div_item = False
else:
self.__start_div_yingping = False
def start_a(self, attrs):
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear:
self.__start_a = True
for k, v in attrs:
if k == 'href':
self.__value['href'] = v
def end_a(self):
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear and self.__start_a:
self.__start_a = False
def start_span(self, attrs):
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear:
if self.__start_div_ratingwrap:
if self.__span_state != 1:
for k, v in attrs:
if k == 'class' and v == 'rating':
self.__span_state = 1
elif k == 'class' and v == 'time':
self.__span_state = 2
else:
for k, v in attrs:
if k == 'style':
score_text = re.search(r'\d+', v).group()
self.__value['score'] = int(score_text) / 20
self.__span_state = 3
elif self.__start_div_num:
self.__span_state = 4
def end_span(self):
self.__span_state = 0
def handle_data(self, data):
if self.__start_a:
self.__value['title'] = data
elif self.__span_state == 2:
self.__value['time'] = data
elif self.__span_state == 4:
score_text = re.search(r'\d+', data).group()
self.__value['people'] = int(score_text)
pass
def sgl_parser(html):
parser = CommentParaser()
parser.feed(html)
return parser.data
4,HTMLParaer,与3原理相识,就是调用的方法不太一样,基本上可以公用,
class CommentHTMLParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.__start_div_yingping = False
self.__start_div_item = False
self.__start_div_gclear = False
self.__start_div_ratingwrap = False
self.__start_div_num = False
# a
self.__start_a = False
# span 3中状态
self.__span_state = 0
# 数据
self.__value = {}
self.data = []
def handle_starttag(self, tag, attrs):
if tag == 'div':
for k, v in attrs:
if k == 'class' and v == 'yingping-list-wrap':
self.__start_div_yingping = True
elif k == 'class' and v == 'item':
self.__start_div_item = True
elif k == 'class' and v == 'g-clear title-wrap':
self.__start_div_gclear = True
elif k == 'class' and v == 'rating-wrap g-clear':
self.__start_div_ratingwrap = True
elif k == 'class' and v == 'num':
self.__start_div_num = True
elif tag == 'a':
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear:
self.__start_a = True
for k, v in attrs:
if k == 'href':
self.__value['href'] = v
elif tag == 'span':
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear:
if self.__start_div_ratingwrap:
if self.__span_state != 1:
for k, v in attrs:
if k == 'class' and v == 'rating':
self.__span_state = 1
elif k == 'class' and v == 'time':
self.__span_state = 2
else:
for k, v in attrs:
if k == 'style':
score_text = re.search(r'\d+', v).group()
self.__value['score'] = int(score_text) / 20
self.__span_state = 3
elif self.__start_div_num:
self.__span_state = 4
def handle_endtag(self, tag):
if tag == 'div':
if self.__start_div_yingping:
if self.__start_div_item:
if self.__start_div_gclear:
if self.__start_div_num or self.__start_div_ratingwrap:
if self.__start_div_num:
self.__start_div_num = False
if self.__start_div_ratingwrap:
self.__start_div_ratingwrap = False
else:
self.__start_div_gclear = False
else:
self.data.append(self.__value)
self.__value = {}
self.__start_div_item = False
else:
self.__start_div_yingping = False
elif tag == 'a':
if self.__start_div_yingping and self.__start_div_item and self.__start_div_gclear and self.__start_a:
self.__start_a = False
elif tag == 'span':
self.__span_state = 0
def handle_data(self, data):
if self.__start_a:
self.__value['title'] = data
elif self.__span_state == 2:
self.__value['time'] = data
elif self.__span_state == 4:
score_text = re.search(r'\d+', data).group()
self.__value['people'] = int(score_text)
pass
def html_parser(html):
parser = CommentHTMLParser()
parser.feed(html)
return parser.data
3,4对于该案例来说确实是不太适合,趁现在有空记录下来,功学习使用!
来源:https://blog.csdn.net/yilovexing/article/details/79675672


猜你喜欢
- 1、备份数据库(单个表备份) bk_table.bat mysqldump -h127.0.0.1 -P3306 -uroot -proot
- 目录前言1. 使用Lambda来修改Pandas数据框中的值2. 使用f-string来连接字符串3. 用Zip()函数对多个列表进行迭代4
- 本文实例为大家分享了python实现大转盘抽奖的具体代码,供大家参考,具体内容如下选择转盘中的某一个方框,来进行抽奖import tkint
- 项目场景pytorch训练时我们一般把数据集放到数据加载器里,然后分批拿出来训练。训练前我们一般还要看一下训练数据长啥样,也就是训练数据集可
- 首先是下载图解1、首先卸载centos7中自带的mariadbrpm -qa|grep mariadb //查询出来已安装的mariadbr
- 1. 权限管理Casbin是用于Golang项目的功能强大且高效的开源访问控制库。1.1.1. 特征Casbin的作用:以经典{subjec
- 本文实例讲述了Mysql数据库高级用法之视图、事务、索引、自连接、用户管理。分享给大家供大家参考,具体如下:视图视图是对若干张基本表的引用,
- 1. 引言在Python中有很多好玩的花式打印,对厉害的高手来说可能是小菜一碟,对入门的小白来说往往让人望而退步,我们今天就来挑战下面三个常
- 在许多用SQL Server实现的新的企业系统设计中,系统设计师需要在给数据结构和管理应用程序逻辑的定位上做出具有关键性意义的决定。SQL
- 1 . 如何让自己的本地APACHE服务器支持.htaccess 如何让自己的本地APACHE服务器支持”.htaccess”呢?其实只要简
- 如何在ADO服务器端利用好缓存技术?请看下面示例,这是一个用来显示图书分类的例子程序:displayBooks.asp< %
- Todo清单需要实现的功能有添加任务、删除任务、编辑任务,操作要关联数据库。任务需要绑定用户,部门。用户需要绑定部门。{#自己编写一个基类模
- 在本篇文章中,我们将探讨如何使用YOLOv5车牌识别系统开发一个Web应用,以及如何创建一个车牌识别API供其他开发者使用。我们将介绍Fla
- 第一种:字符串拆分法window.location.href 或者 location.href 或者 window.location 获得地
- 一. 图片懒加载的目的大型网站如常用的淘宝,京东等页面,需要展示大量的商品图片信息,如果打开网页时让所有图片一次性加载完成,需要处理很多次网
- 数据准备: 需要准备两个stl文件、Python需要安装vtk库步骤一:数据读取 首先通过vtk.vtkSTLReade
- 使用了Python一段时间后,可以说Python的基本单位就是模块了,在使用模块的时候我们一般会使用通过import语句来将其导入,但是我们
- 昨天晚上才发现已经出了jQuery的1.3版本,于是下载下来,把原来一个兄弟翻译的1.2.6的文档移植到了1.3中,点击这里可
- 一: 删除LOG1:分离数据库 企业管理器->服务器->数据库->右键->分离数据库2:删除LOG文件3:附加数据库 企业管理器->服务器-
- 前言爬虫和反爬虫日益成为每家公司的标配系统。爬虫在情报获取、虚假流量、动态定价、恶意攻击、薅羊毛等方面都能起到很关键的作用,所以每家公司都或