2023数据采集与融合技术实践作业四

发布时间 2023-11-07 14:51:23作者: 李波102102157

2023数据采集与融合技术实践四

作业1:

  • 要求:
    熟练掌握 Selenium 查找 HTML 元素、爬取 Ajax 网页数据、等待 HTML 元素等内容。
    使用 Selenium 框架+ MySQL 数据库存储技术路线爬取“沪深 A 股”、“上证 A 股”、“深证 A 股”3 个板块的股票数据信息。
  • 候选网站:东方财富网:
    http://quote.eastmoney.com/center/gridlist.html#hs_a_board
  • 输出信息:
    MYSQL 数据库存储和输出格式如下,表头应是英文命名例如:序号id,股票代码:bStockNo……,由同学们自行定义设计表头:。
    码云文件夹链接
序号 股票代码 股票名称 最新报价涨跌幅 涨跌额 成交量 振幅 最高 最底 今开 昨收
1 688093 N世华 28.47 10.92 26.13万 7.6亿 22.34 32.0 28.08 30.20
2 .....

1.代码内容运行结果和Gitee链接

from selenium.webdriver.chrome.options import Options
import pymysql
import datetime
import time
from selenium.webdriver.common.by import By
class MySpider:
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"}
    def start(self, url):
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        self.driver = webdriver.Chrome(options=chrome_options)
        try:
            self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd="123456", db="mydb",
                                       charset="utf8")
            self.cursor = self.con.cursor(pymysql.cursors.DictCursor)
            self.cursor.execute("DROP TABLE  IF EXISTS stock")
            # 创建表
            self.cursor.execute("CREATE TABLE IF NOT EXISTS stock(Sid INT PRIMARY KEY,"
                                "Ssymbol VARCHAR(32),"
                                "Sname VARCHAR(32),"
                                "Soffer VARCHAR(32),"
                                "SchangeRate VARCHAR(32),"
                                "SchangePrice VARCHAR(32),"
                                "Svolume VARCHAR(32),"
                                "Sturnover VARCHAR(32),"
                                "Samplitude VARCHAR(32),"
                                "Shighest VARCHAR(32),"
                                "Slowest VARCHAR(32),"
                                "Stoday VARCHAR(32),"
                                "Syesterday VARCHAR(32))")
            self.opened = True
            self.No = 0
        except Exception as err:
            print(err)
            self.opened = False
        self.driver.get(url)
    def close(self):
        try:
            self.con.commit()
            self.con.close()
            self.driver.close()
        except Exception as err:
            print(err);
    def insertDB(self, Ssymbol, Sname, Soffer, SchangeRate, SchangePrice, Svolume, Sturnover, Samplitude, Shighest,
                 Slowest, Stoday, Syesterday):
        try:
            print(self.No, Ssymbol, Sname, Soffer, SchangeRate, SchangePrice, Svolume, Sturnover, Samplitude, Shighest,
                  Slowest, Stoday, Syesterday)
            self.cursor.execute(
                "insert into stock(Sid,Ssymbol,Sname,Soffer,SchangeRate,SchangePrice,Svolume,Sturnover,Samplitude,Shighest,Slowest,Stoday,Syesterday) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
                (self.No, Ssymbol, Sname, Soffer, SchangeRate, SchangePrice, Svolume, Sturnover, Samplitude, Shighest,
                 Slowest, Stoday, Syesterday))
        except Exception as err:
            print(err)
    def process(self):
        try:
            time.sleep(1)
            print(self.driver.current_url)
            datas = self.driver.find_elements(By.XPATH,"//table[@id='table_wrapper-table']/tbody/tr")
            for data in datas:
                try:
                    datas1 = data.text.split(" ")
                    Ssymbol = datas1[1]
                    Sname = datas1[2]
                    Soffer = datas1[6]
                    SchangeRate = datas1[7]
                    SchangePrice = datas1[8]
                    Svolume = datas1[9]
                    Sturnover = datas1[10]
                    Samplitude = datas1[11]
                    Shighest = datas1[12]
                    Slowest = datas1[13]
                    Stoday = datas1[14]
                    Syesterday = datas1[15]
                except:
                    Ssymbol = " "
                    Sname = " "
                    Soffer = " "
                    SchangeRate = " "
                    SchangePrice = " "
                    Svolume = " "
                    Sturnover = " "
                    Samplitude = " "
                    Shighest = " "
                    Slowest = " "
                    Stoday = " "
                    Syesterday = " "
                self.No = self.No + 1
                self.insertDB(Ssymbol, Sname, Soffer, SchangeRate, SchangePrice, Svolume, Sturnover, Samplitude,
                              Shighest, Slowest, Stoday, Syesterday)
            try:
                self.driver.find_element(By.XPATH,
                    "//div[@class='dataTables_paginate paging_input']//a[@class='next paginate_button disabled']")
            except:
                nextPage = self.driver.find_element(By.XPATH,
                    "//div[@class='dataTables_paginate paging_input']//a[@class='next paginate_button']")
                time.sleep(10)
                if (self.No <= 60):
                    nextPage.click()
                    self.process()
        except Exception as err:
            print(err)
    def executeSpider(self, url):
        print("Spider processing......")
        self.process()
spider = MySpider()
fs = {
    "沪深A股": "/gridlist.html#hs_a_board",
    "上证A股": "/gridlist.html#sh_a_board",
    "深证A股": "/gridlist.html#sz_a_board"
}
starttime = datetime.datetime.now()
count = 0
for i in fs.keys():
    url = "http://quote.eastmoney.com/center" + fs[i]
    print("Spider starting......")
    spider.start(url)
    spider.executeSpider(url)
print("Spider closing......")
spider.close()
print("Spider completed......")
endtime = datetime.datetime.now()
elapsed = (endtime - starttime).seconds
print("Total ", elapsed, " seconds elapsed")

运行结果:

Gitee链接:
https://gitee.com/li-bo-102102157/libo_project/commit/f9fadfc74129bff047804a54ef50b1e05609a3d4

2.心得体会

首先,在 start 方法中打开了数据库连接和浏览器,并在 close 方法中关闭它们。其次,在 start 方法中,创建了一个表。最后,在 insertDB 方法中,将数据拼接进 SQL 语句中。

作业2:

  • 要求:熟练掌握 Selenium 查找 HTML 元素、实现用户模拟登录、爬取 Ajax 网页数据、等待 HTML 元素等内容。
    使用 Selenium 框架+MySQL 爬取中国 mooc 网课程资源信息(课程号、课程名称、学校名称、主讲教师、团队成员、参加人数、课程进度、课程简介)
  • 候选网站:中国 mooc 网:
    https://www.icourse163.org
  • 输出信息:MYSQL 数据库存储和输出格式
    Gitee 文件夹链接
Id cCourse cCollege cTeacher cTeam cCount cProcess cBrief
1 Python数据分析与展示 北京理工大学 嵩天 嵩天 470 2020 年11 月 17日 ~2020 年12 月 29日 “我们正步入一个数据或许比软件更重要的新时代。——TimO'Reilly” ……
2 .....

1.代码内容运行结果和Gitee链接

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
import pymysql


class MySpiderRenamed:
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"}

    def __init__(self):
        print("Initializing spider...")
        chrome_options = Options()
        # chrome_options.add_argument('--headless')
        # chrome_options.add_argument('--disable-gpu')
        self.driver = webdriver.Chrome(options=chrome_options)
        self.page = 0
        self.no = 0

        try:
            self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd="123456", charset="utf8",
                                       db="mydb")
            self.cursor = self.con.cursor()
            try:
                self.cursor.execute("drop table if exists mooc_renamed")
            except:
                pass

            sql = """create table if not exists mooc_renamed(Id varchar(8),cCource varchar(64),cCollege varchar(64),cTeacher varchar(64),
                        cTeam varchar(64),cCount varchar(64),cBrief varchar(256))ENGINE=InnoDB DEFAULT CHARSET=utf8"""
            self.cursor.execute(sql)
        except Exception as err:
            print(err)

        print("Initialization completed")

    def choose_keyword_and_search(self, key, url):
        print("Searching for keyword {}...".format(key))
        self.driver.get(url)
        time.sleep(1)

        key_input = self.driver.find_element(By.XPATH, ".//div[@class='u-baseinputui']/input[@type='text']")
        key_input.send_keys(key)
        key_input.send_keys(Keys.ENTER)
        time.sleep(2)
        print("Search completed, ready to start scraping")

    def scrape_and_store_data(self):
        try:
            self.page += 1
            print("Scraping page {}".format(self.page))

            datalist = self.driver.find_elements(By.XPATH, "//div[@class='cnt f-pr']")
            time.sleep(2)

            for data in datalist:
                try:
                    cCource = data.find_element(By.XPATH, "./div[@class='t1 f-f0 f-cb first-row']").text
                    cCollege = data.find_element(By.XPATH, ".//a[@class='t21 f-fc9']").text
                    cTeacher = data.find_element(By.XPATH, ".//a[contains(@class,'f-fc9')][1]").text
                    cTeam = data.find_element(By.XPATH, ".//a[contains(@class,'f-fc9')][2]").text
                    cCount = data.find_element(By.XPATH, ".//span[@class='hot']").text
                    cBrief = data.find_element(By.XPATH, ".//span[@class='p5 brief f-ib f-f0 f-cb']").text

                    self.no += 1
                    self.cursor.execute("insert into mooc_renamed values(%s,%s,%s,%s,%s,%s,%s)", (self.no,
                                                                                                 cCource, cCollege, cTeacher,
                                                                                                 cTeam, cCount, cBrief))
                    self.con.commit()
                    print(cCource)
                except:
                    pass

            try:
                self.driver.find_element(By.XPATH,
                                         "//li[@class='ux-pager_btn ux-pager_btn__next']/a[@class='th-bk-disable-gh']")
                print("Scraping completed")
            except:
                next_page = self.driver.find_element(By.XPATH,
                                                     "//li[@class='ux-pager_btn ux-pager_btn__next']/a[@class='th-bk-main-gh']")
                next_page.click()
                time.sleep(3)
                self.scrape_and_store_data()
        except Exception as err:
            print(err)

    def start_processing(self, url):
        self.__init__()

        print("Please enter the keyword to scrape:")
        key = input()
        self.choose_keyword_and_search(key, url)
        self.scrape_and_store_data()


url = "https://www.icourse163.org"

spider = MySpiderRenamed()
spider.start_processing(url)

运行结果:

Gitee链接:
https://gitee.com/li-bo-102102157/libo_project/commit/499365d0144f7b17ea30481745d923b473345f7e

2.心得体会

整体来说,这段代码主要完成了以下功能:通过模拟浏览器行为,搜索指定关键词并提取相关信息,然后将信息存储到数据库中。

作业3:

  • 要求:
    掌握大数据相关服务,熟悉 Xshell 的使用完成文档 华为云_大数据实时分析处理实验手册-Flume 日志采集实验(部分)v2.docx 中的任务,即为下面 5 个任务,具体操作见文档。
  • 环境搭建:
    ·任务一:开通 MapReduce 服务
  • 实时分析开发实战:
    ·任务一:Python 脚本生成测试数据
    ·任务二:配置 Kafka
    ·任务三: 安装 Flume 客户端
    ·任务四:配置 Flume 采集数据
  • 输出:实验关键步骤或结果截图。
    ·任务一:Python 脚本生成测试数据

    ·任务二:配置 Kafka


    ·任务三: 安装 Flume 客户端



    ·任务四:配置 Flume 采集数据

心得体会

Flume,实验三我做了不下五次,主要是困在了前面开通 MapReduce 服务上,跟着教程过了几遍没有一次是成功的,我甚至怀疑是不是我电脑的问题。好在请教了助教,助教将新版的步骤发给我,第一遍就开通了MapReduce服务。接下来就畅通无阻了,太感谢感谢助教了。