资讯专栏INFORMATION COLUMN

scrapy汽车之家车型的简单爬取

zhangfaliang / 3368人阅读

摘要:汽车之家车型的简单爬取名字自定义配置重新定义起始爬取点所有首字母按照首字母,组合对应的页面,压入根据,抓取页面定义默认的抓取函数品牌编号品牌名品牌品牌小类别品牌小类别对应的页面品牌小类别的编号品牌小类别名品牌小类别对应的页面的

汽车之家车型的简单爬取
spider

</>复制代码

  1. # -*- coding: utf-8 -*-
  2. import scrapy
  3. from scrapy import Request
  4. from mininova.items import carItem
  5. import sys
  6. reload(sys)
  7. sys.setdefaultencoding("utf8")
  8. class SplashSpider(scrapy.Spider):
  9. #spider名字
  10. name = "car_home"
  11. allowed_domains = ["autohome.com.cn"]
  12. start_urls = [
  13. ]
  14. # 自定义配置
  15. custom_settings = {
  16. "ITEM_PIPELINES": {
  17. "mininova.pipelines.CarPipeline": 300,
  18. }
  19. }
  20. def start_requests(self): #重新定义起始爬取点
  21. #所有首字母
  22. words = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
  23. #按照首字母,组合对应的页面,压入start_urls
  24. for word in words:
  25. self.start_urls.append("https://www.autohome.com.cn/grade/carhtml/"+word+".html")
  26. #根据start_urls,抓取页面
  27. for url in self.start_urls:
  28. yield Request(url,meta={"word":word})
  29. #定义默认的抓取函数
  30. def parse(self, response):
  31. print("url")
  32. print(response.url)
  33. word = response.meta["word"]
  34. car_cates = response.xpath("//dl").extract()
  35. brand_id = 0
  36. total_cars = []
  37. for brand_index in range(len(car_cates)):
  38. #品牌编号
  39. brand_num = brand_index + 1
  40. brand_num = str(brand_num)
  41. #品牌名
  42. brand = response.xpath("//dl["+brand_num+"]/dt/div[1]/a/text()").extract()[0]
  43. print("brand:"+brand)
  44. #品牌logo
  45. brand_logo_url = response.xpath("//dl["+brand_num+"]/dt//img[1]/@src").extract()[0]
  46. #品牌小类别
  47. brand_items = response.xpath("//dl["+brand_num+"]/dd//div[@class="h3-tit"]/a/text()").extract()
  48. #品牌小类别对应的页面
  49. brand_item_urls = response.xpath("//dl["+brand_num+"]/dd//div[@class="h3-tit"]/a/@href").extract()
  50. for brand_item_index in range(len(brand_items)):
  51. #品牌小类别的编号
  52. brand_item_num = brand_item_index + 1
  53. brand_item_num = str(brand_item_num)
  54. #品牌小类别名
  55. brand_item = brand_items[brand_item_index]
  56. #品牌小类别对应的页面的url
  57. brand_item_url = brand_item_urls[brand_item_index]
  58. print("brand_item:"+brand_item)
  59. print("brand_item_url:"+brand_item_url)
  60. #品牌小类别中的所有车
  61. cars = response.xpath("//dl["+brand_num+"]/dd//ul[@class="rank-list-ul"]["+brand_item_num+"]/li[@id]").extract()
  62. print("cars_count:"+str(len(cars)))
  63. for car_index in range(len(cars)):
  64. car_num = car_index + 1
  65. car_num = str(car_num)
  66. #具体车的名称
  67. name = response.xpath("//dl["+brand_num+"]/dd//ul[@class="rank-list-ul"]["+brand_item_num+"]/li[@id]["+car_num+"]/h4/a/text()").extract()[0]
  68. #车对应的页面
  69. url = response.xpath("//dl["+brand_num+"]/dd//ul[@class="rank-list-ul"]["+brand_item_num+"]/li[@id]["+car_num+"]/h4/a/@href").extract()[0]
  70. #报价(最低价-最高价)
  71. price = response.xpath("//dl["+brand_num+"]/dd//ul[@class="rank-list-ul"]["+brand_item_num+"]/li[@id]["+car_num+"]/div[1]/a/text()").extract()[0]
  72. prices = price.split("-")
  73. price_base = "万"
  74. if len(prices) != 2:
  75. max_price = "暂无"
  76. min_price = "暂无"
  77. else:
  78. max_price = str(prices[1].replace(price_base,""))
  79. min_price = str(prices[0])
  80. print("car:"+name+" max_price:"+str(max_price)+" min_price:"+str(min_price)+" price_base:"+price_base)
  81. car_item = carItem()
  82. car_item["name"] = name
  83. car_item["url"] = url
  84. car_item["brand_item"] = brand_item
  85. car_item["first_word"] = word
  86. car_item["brand"] = brand
  87. car_item["brand_logo_url"] = brand_logo_url
  88. car_item["max_price"] = max_price
  89. car_item["min_price"] = min_price
  90. total_cars.append(car_item)
  91. return total_cars

item

</>复制代码

  1. # -*- coding: utf-8 -*-
  2. import scrapy
  3. class carItem(scrapy.Item):
  4. #具体车名
  5. name = scrapy.Field()
  6. #对应的介绍页面url
  7. url = scrapy.Field()
  8. #最高报价,单位(万)
  9. max_price = scrapy.Field()
  10. #最低报价,单位(万)
  11. min_price = scrapy.Field()
  12. #品牌名
  13. brand = scrapy.Field()
  14. #品牌logo
  15. brand_logo_url = scrapy.Field()
  16. #品牌小类别名
  17. brand_item = scrapy.Field()
  18. #品牌首字母
  19. first_word = scrapy.Field()

mongo_car

</>复制代码

  1. from mininova.mongodb import Mongo
  2. from mininova.settings import mongo_setting
  3. class MongoCar():
  4. db_name = "car"
  5. brand_set_name = "brand"
  6. brand_item_set_name = "brand_item"
  7. car_set_name = "car"
  8. def __init__(self):
  9. self.db = Mongo(mongo_setting["mongo_host"],mongo_setting["mongo_port"],mongo_setting["mongo_user"],mongo_setting["mongo_password"])
  10. def insert(self,item):
  11. brand_where = {"name":item["brand"]}
  12. brand = self.brand_exist(self.db,brand_where)
  13. if brand == False:
  14. brand = {"name":item["brand"],"first_word":item["first_word"]}
  15. brand = self.insert_brand(self.db,brand)
  16. print("brand insert ok!")
  17. else:
  18. brand = {"name":item["brand"],"first_word":item["first_word"],"logo_url":item["brand_logo_url"]}
  19. brand = self.update_brand(self.db,brand_where,brand)
  20. print("brand_exist!")
  21. brand_item_where = {"name":item["brand_item"]}
  22. brand_item = self.brand_item_exist(self.db,brand_item_where)
  23. if brand_item == False:
  24. brand_item = {"name":item["brand_item"],"first_word":item["first_word"],"brand_id":brand["_id"]}
  25. brand_item = self.insert_brand_item(self.db,brand_item)
  26. print("brand_item insert ok!")
  27. else:
  28. print("brand_item_exist!")
  29. car_where = {"name":item["brand_item"],"name":item["name"]}
  30. car = self.car_exist(self.db,car_where)
  31. if car == False:
  32. car = {"name":item["name"],"url":item["url"],"max_price":item["max_price"],"min_price":item["min_price"],"first_word":item["first_word"],"brand_id":brand["_id"],"brand_item_id":brand_item["_id"]}
  33. car = self.insert_car(self.db,car)
  34. print("car insert ok!")
  35. else:
  36. print("car_exist!")
  37. if car != False:
  38. return True;
  39. else:
  40. return False;
  41. def update_brand(self,db,brand_where,brand):
  42. my_set = db.set(self.db_name,self.brand_set_name)
  43. my_set.update_one(brand_where,{"$set":brand})
  44. exist = my_set.find_one(brand_where)
  45. if(exist is None):
  46. return False
  47. else:
  48. return exist
  49. def brand_exist(self,db,brand):
  50. my_set = db.set(self.db_name,self.brand_set_name)
  51. exist = my_set.find_one(brand)
  52. if(exist is None):
  53. return False
  54. else:
  55. return exist
  56. def insert_brand(self,db,brand):
  57. my_set = db.set(self.db_name,self.brand_set_name)
  58. my_set.insert_one(brand)
  59. brand = my_set.find_one(brand)
  60. return brand
  61. def brand_item_exist(self,db,brand_item):
  62. my_set = db.set(self.db_name,self.brand_item_set_name)
  63. exist = my_set.find_one(brand_item)
  64. if(exist is None):
  65. return False
  66. else:
  67. return exist
  68. def insert_brand_item(self,db,brand_item):
  69. my_set = db.set(self.db_name,self.brand_item_set_name)
  70. my_set.insert_one(brand_item)
  71. brand = my_set.find_one(brand_item)
  72. return brand
  73. def car_exist(self,db,car):
  74. my_set = db.set(self.db_name,self.car_set_name)
  75. exist = my_set.find_one(car)
  76. if(exist is None):
  77. return False
  78. else:
  79. return exist
  80. def insert_car(self,db,car):
  81. my_set = db.set(self.db_name,self.car_set_name)
  82. my_set.insert_one(car)
  83. brand = my_set.find_one(car)
  84. return brand

pipeline

</>复制代码

  1. from mininova.settings import settings
  2. import pymysql
  3. import os
  4. from mininova.db import Bookdb
  5. from mininova.mongo_novel import MongoNovel
  6. from mininova.mongo_car import MongoCar
  7. import copy
  8. class CarPipeline(object):
  9. def process_item(self,item,spider):
  10. mongo_car = MongoCar()
  11. mongo_car.insert(item)
  12. print(item["name"])
  13. print("item insert ok!")

setting

</>复制代码

  1. mongo_setting = {
  2. "mongo_host" : "xxx.xxx.xxx.xxx",
  3. "mongo_port" : 27017,
  4. "mongo_user" : "username",
  5. "mongo_password" : "password"
  6. }

文章版权归作者所有,未经允许请勿转载,若此文章存在违规行为,您可以联系管理员删除。

转载请注明本文地址:https://www.ucloud.cn/yun/43814.html

相关文章

  • 首次公开,整理12年积累博客收藏夹,零距离展示《收藏夹吃灰》系列博客

    摘要:时间永远都过得那么快,一晃从年注册,到现在已经过去了年那些被我藏在收藏夹吃灰的文章,已经太多了,是时候把他们整理一下了。那是因为收藏夹太乱,橡皮擦给设置私密了,不收拾不好看呀。 ...

    Harriet666 评论0 收藏0
  • 2016年,我对爬虫总结

    摘要:都说年末了,该给自己写写总结了。我现在做一些简单的爬虫都会用它。并且对数据的实时性要求较高,或者爬数据的时候封的太厉害了。对于这一类的爬虫。消息队列用于分发消息给某个爬虫节点。爬虫节点完成具体的爬虫,格式化爬虫数据。最后,感谢我的,谢谢 都说年末了,该给自己写写总结了。今天我想谈一谈的是我在公司这一年多里的负责的部分工作---爬虫。做了这么久的爬虫,是该写点什么,留下点什么。在我所负责...

    netmou 评论0 收藏0
  • Golang爬虫 爬取汽车之家 二手车产品库

    摘要:原文地址爬取汽车之家二手车产品库项目地址目标最近经常有人在耳边提起汽车之家,也好奇二手车在国内的价格是怎么样的,因此本次的目标站点是汽车之家的二手车产品库分析目标源一页共条含分页,但这个老产品库,在页后会存在问题,因此我们爬取页可以获取全 原文地址:爬取汽车之家 二手车产品库项目地址:https://github.com/go-crawler... 目标 最近经常有人在耳边提起汽车之家...

    sumory 评论0 收藏0
  • 爬虫学习之基于Scrapy网络爬虫

    摘要:不过不用担心,中有很多非常优秀的爬虫框架,比如我们接下来要学习到的。结合以上分析我们基本确定了本次爬虫的各个路线入口,接下来我们就开始通过程序来实现本次的目标。这里我们的目的是建立一种写爬虫的思路,而不在于怎么使用工具来爬数据。 概述 在上一篇文章《爬虫学习之一个简单的网络爬虫》中我们对爬虫的概念有了一个初步的认识,并且通过Python的一些第三方库很方便的提取了我们想要的内容,但是...

    BingqiChen 评论0 收藏0
  • 大话爬虫实践技巧

    摘要:图意淫爬虫与反爬虫间的对决数据的重要性如今已然是大数据时代,数据正在驱动着业务开发,驱动着运营手段,有了数据的支撑可以对用户进行用户画像,个性化定制,数据可以指明方案设计和决策优化方向,所以互联网产品的开发都是离不开对数据的收集和分析,数 showImg(https://segmentfault.com/img/remote/1460000013428119?w=539&h=337)...

    沈俭 评论0 收藏0

发表评论

0条评论

最新活动
阅读需要支付1元查看
<