python爬取新闻,制作词云图

前言

一提到python爬虫,词云图,就头大,我们就从简单开始,一步一步进行

python爬虫

一、基本框架

此代码只对python的基本框架进行描述

# -*- coding: utf-8 -*-#
#基本框架

#一、库的引用
from bs4 import BeautifulSoup  # 网页解析,获取数据
import re  # 正则表达式,进行文字匹配
import urllib.request, urllib.error  # 制定URL,获取网页数据
#二、主函数
def main():
    a = 1
    # 爬取网页,获取数据
    baseurl = "https://news.163.com/"
    Datelist = getDate(baseurl)
    #保存
    savepath = ".\\新闻2.xls"
    saveDate(savepath, Datelist, a)

# 三、爬网页
def getDate(baseurl, a):
    datelist = []          #存为列表
#四、保存
def saveDate(savepath, Datelist, a):
    print("...")

if __name__ == "__main__":
        main()

二、爬取网页

爬取网页首先我们需要获取网页链接,我们定义一个函数名字叫做:askURL(url)

爬取了网页接下来我们需要的就是获取网页内容,我们写一个叫做 getData(baseUrl)的函数

from bs4 import BeautifulSoup  # 网页解析,获取数据
import urllib.request, urllib.error  # 制定URL,获取网页数据

def main():
    a = 1
    # 爬取网页,获取数据
    baseurl = "https://news.163.com/"
    Datelist, a = getDate(baseurl, a)

    savepath = ".\\新闻2.xls"
    saveDate(savepath, Datelist, a)

# 得到指定URL的网页内容
def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"}
    # 模拟浏览器头部信息,向服务器发送消息
    request = urllib.request.Request(url, headers=head)
    html = ""   #字符串存

    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8", 'ignore')
        print(html)

    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html
#爬网页
def getDate(baseurl, a):
    datelist = []          #存为列表
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")
    return datelist, a

#保存
def saveDate(savepath, Datelist, a):
    print("...")


if __name__ == "__main__":
        main()

实现了如图所示的代码,但是数据很杂乱且庞大,我们还需做到数据的清洗

三、数据清洗

# -*- coding: utf-8 -*-#
from bs4 import BeautifulSoup  # 网页解析,获取数据
import re  # 正则表达式,进行文字匹配
import urllib.request, urllib.error  # 制定URL,获取网页数据

def main():
    a = 1
    # 爬取网页,获取数据
    baseurl = "https://news.163.com/"
    Datelist, a = getDate(baseurl, a)

    savepath = ".\\新闻2.xls"
    saveDate(savepath, Datelist, a)

# 得到指定URL的网页内容
def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"}
    # 模拟浏览器头部信息,向服务器发送消息
    request = urllib.request.Request(url, headers=head)
    html = ""   #字符串存

    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8", 'ignore')
        # print(html)

    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html
#爬网页
def getDate(baseurl, a):
    datelist = []          #存为列表
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")
    for item in soup.select(".hidden"):  # 查找符合要求的字符串,形成列表

        for c in item.select('a'):
             print(c)
    return datelist, a

#保存
def saveDate(savepath, Datelist, a):
    print("...")


if __name__ == "__main__":
        main()

 四、保存数据

爬取到了数据接下来我们需要保存数据(这里我们采取保存数据到excel中)

# -*- coding: utf-8 -*-#
from bs4 import BeautifulSoup  # 网页解析,获取数据
import re  # 正则表达式,进行文字匹配
import urllib.request, urllib.error  # 制定URL,获取网页数据
import xlwt  # 进行excel操作

def main():
    a = 1
    # 爬取网页,获取数据
    baseurl = "https://news.163.com/"
    Datelist, a = getDate(baseurl, a)

    savepath = "新闻2.xls"
    saveDate(savepath, Datelist, a)

# 得到指定URL的网页内容
def askURL(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763"}
    # 模拟浏览器头部信息,向服务器发送消息
    request = urllib.request.Request(url, headers=head)
    html = ""   #字符串存

    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8", 'ignore')
        # print(html)

    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html

findlink = re.compile(r'<a href="(.*?)">')
findjs = re.compile(r'">(.*)</a>')

#爬网页
def getDate(baseurl, a):
    datelist = []          #存为列表
    html = askURL(baseurl)
    soup = BeautifulSoup(html, "html.parser")

    for item in soup.select(".hidden"):  # 查找符合要求的字符串,形成列表
        for c in item.select('a'):
             #print(c)
             date = []
             c = str(c)
             Js = findjs.findall(c)
             date.append(Js)
             Link = findlink.findall(c)
             date.append(Link[0])
             date.append('')
             Html = askURL(Link[0])
             Soup = BeautifulSoup(Html, "html.parser")
             for item1 in Soup.select(".post_body"):
                 date.insert(2, item1.get_text().strip())

             print("已保存第%.3d条新闻数据" % a)
             a += 1
             datelist.append(date)
    return datelist, a

#保存
def saveDate(savepath, Datelist, a):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)  # 创建workbook对象
    sheet = book.add_sheet(savepath, cell_overwrite_ok=True)  # 创建工作表
    crl = ("新闻标题", "新闻链接", "新闻内容")
    for i in range(0, len(crl)):
        sheet.write(0, i, crl[i])
    for i in range(1, a):
        for j in range(0, len(crl)):
            sheet.write(i, j, Datelist[i - 1][j])
    print("保存完毕")
    book.save(savepath)

if __name__ == "__main__":
        main()

词云图

使用时需要引用wordcloud  和 matplotlib,具体的效果图如下

from wordcloud import WordCloud
import matplotlib.pyplot as plt

#打开文本
text=open('头条新闻.txt',encoding="utf-8").read()

#生成
#字体地址,图片长宽,背景颜色
wc=WordCloud(font_path='C:\Windows\Fonts\msyh.ttc',width=800,height=600,mode="RGBA",background_color='white').generate(text)

#显示
plt.imshow(wc)
plt.axis("off")#消除坐标
plt.show()

#保存
wc.to_file("2.wordcloud2.png")

再进一步

虽然已经制作出了词云图,但长长的句子并不是我们的本意,我们得引入分词模块

from wordcloud import WordCloud
import matplotlib.pyplot as plt
import jieba

#打开文本
text=open('头条新闻.txt',encoding="utf-8").read()
#中文分词
text=' '.join(jieba.cut(text))#形成列表,将列表里的词用空格分开并拼成长字符串
#生成
#字体地址,图片长宽,背景颜色
wc=WordCloud(font_path='C:\Windows\Fonts\msyh.ttc',width=800,height=600,mode="RGBA",background_color='white').generate(text)
#显示
plt.imshow(wc)
plt.axis("off")#消除坐标
plt.show()
#保存
wc.to_file("2.wordcloud2.png")

写在最后

👍🏻点赞,你的认可是我创作的动力!

⭐收藏,你的青睐是我努力的方向!

✏️评论,你的意见是我进步的财富!

物联沃分享整理
物联沃-IOTWORD物联网 » python爬取新闻,制作词云图

发表评论