初学go语言开发第一个爬虫项目

    xiaoxiao2023-10-01  158

    package main import ( "fmt" "net/http" "os" "regexp" "strconv" "strings" ) func HttpGet(url string) (result string, err error) { resp, err1 := http.Get(url)//发送Get请求 if err1 != nil { err = err1 return } defer resp.Body.Close() //读取网页内容 buf := make([]byte, 4*1024) for { n, err := resp.Body.Read(buf) if n == 0 { fmt.Println("resp.Body.Read err=", err) break } result += string(buf[:n]) } return } func SpiderOneJoy(url string) (title, content string, err error) { result, err1 := HttpGet(url) if err1 != nil { err = err1 return } //取标题:<h1>标题</h1> re := regexp.MustCompile(`<h1>(?s:(.*?))</h1>`) if re == nil { err = fmt.Errorf("%s", "regexp.MustCompile err") return } tmpTitle := re.FindAllStringSubmatch(result, 1)//1过滤一个 for _, data := range tmpTitle { title = data[1] title = strings.Replace(title, "\t", "", -1) break // 防止有多次直接break } //取内容: re = regexp.MustCompile(`<div class="content-txt pt10">(?s:(.*?))<a id="prev" href="`) if re == nil { err = fmt.Errorf("%s", "regexp.MustCompile err") return } tmpContent := re.FindAllStringSubmatch(result, -1) for _, data := range tmpContent { content = data[1] content = strings.Replace(content, "\r\n", "", -1) content = strings.Replace(content, "\n", "", -1) content = strings.Replace(content, "\r", "", -1) content = strings.Replace(content, " ", "", -1) content = strings.Replace(content, "\t", "", -1) content = strings.Replace(content, "<br/>", "", -1) content = strings.Replace(content, "<br />", "", -1) break } return } func StoreJoyToFile(i int, fileTitle, fileContent []string) { //新建文件 fileName := strconv.Itoa(i) + ".txt" f, err := os.Create(fileName) if err != nil { fmt.Println("os.Create err=", err) return } defer f.Close() len := len(fileTitle) //往文件里写内容 for i := 0; i < len; i++ { f.WriteString(fileTitle[i] + "\n") f.WriteString(fileContent[i] + "\n") f.WriteString("---------\n") } } func SpiderPage(i int, page chan int) { //明确爬取的网址 url := "https://www.pengfu.com/xiaohua_" + strconv.Itoa(i) + ".html" fmt.Printf("正在爬取第%d个网页:%s\n", i, url) //开始爬取页面的内容 result, err := HttpGet(url) if err != nil { fmt.Println("HttpGet err=", err) return } //取内容,<h1 class="dp-b"><a href="一个段子url连接 " re := regexp.MustCompile(`<h1 class="dp-b"><a href="(?s:(.*?))"`) if re == nil { fmt.Println("regexp.MustCompile err") return } //取关键信息 joyUrls := re.FindAllStringSubmatch(result, -1) // 切片 fileTitle := make([]string, 0) fileContent := make([]string, 0) //第一个返回下标,第二个返回值 for _, data := range joyUrls { url := data[1] //爬子页面 获取每一个对应到的url title, content, err := SpiderOneJoy(url) if err != nil { fmt.Println("SpiderOneJoy err=", err) continue } fileTitle = append(fileTitle, title)//追加内容 fileContent = append(fileContent, content)//追加内容 } // 写成文件 StoreJoyToFile(i, fileTitle, fileContent) page <- i//爬完之后,将页编号加入通道 } func DoWork(start, end int) { fmt.Printf("准备爬取第%d页到%d页的网址\n", start, end) // 声明一个管道,来保证爬取完毕 page := make(chan int) for i := start; i <= end; i++ { //定义一个函数,爬主页面 go SpiderPage(i, page) } // 记住一定要单独接收管道消息!!! for i := start; i <= end; i++ { //管道阻塞 fmt.Printf("第%d页爬取结束\n", <-page) } } func main() { var start, end int fmt.Printf("请输入起始页(大于等于1):") fmt.Scan(&start) fmt.Printf("请输入终止页(大于等于起始页):") fmt.Scan(&end) DoWork(start, end)//工作函数 }

     

    最新回复(0)