//获取网页源代码的方法
1 2 3 4 5 6 7 8 9 10 11 12 |
import urllib.request def getHtml(url): page = urllib.request.urlopen(url) html = page.read() return html #getHtml()内输入任意的URL ------ html = getHtml("http://www.somy86.com") #修改html对象内的字符编码为UTF-8 html = html.decode('UTF-8') print(html) |
//下载图片代码
1 2 3 4 5 |
import requests html = requests.get('http://www.ainote.cc/wp-content/themes/zblog/img/logo.png') with open('/home/sam/python/cs/images/1.jpg', 'wb') as file: file.write(html.content) |
//获取贴子内所有图片
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import re import urllib.request #获取网页源代码的方法 def getHtml(url): page = urllib.request.urlopen(url) html = page.read() return html #getHtml()内输入任意的URL html = getHtml("https://tieba.baidu.com/p/5352556650") #修改html对象内的字符编码为UTF-8 html = html.decode('UTF-8') #获取帖子内所有图片地址的方法 def getImg(html): #利用正则表达式匹配网页内容找到图片地址 reg = 'src="([.*\S]*\.jpg)"' imgre = re.compile(reg); imglist = re.findall(imgre, html) return imglist imgList = getImg(html) imgName = 0 for imgPath in imgList: #这里使用异常处理及多线程编程方式 try: f = open('D:\\Temp\\'+ str(imgName)+".jpg", 'wb') f.write((urllib.request.urlopen(imgPath)).read()) print(imgPath) f.close() except Exception as e: print(imgPath+" error") imgName += 1 print("All Done!") |
转载请注明:人工智能笔记 » Python实现网页爬虫常用代码总结