文章目录
七、ElasticSearch实战(仿京东商城搜索)
7.1、项目整体概况
目录结构
7.2、项目初始化
1、导入依赖
<properties>
<java.version>1.8</java.version>
<elasticsearch.version>7.6.1</elasticsearch.version>
</properties>
<dependencies>
<!-- jsoup解析页面 -->
<!-- 解析网页 爬视频可 研究tiko -->
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>1.10.2</version>
</dependency>
<!-- fastjson -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.70</version>
</dependency>
<!-- ElasticSearch -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-elasticsearch</artifactId>
</dependency>
<!-- thymeleaf -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-thymeleaf</artifactId>
</dependency>
<!-- web -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<!-- devtools热部署 -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-devtools</artifactId>
<scope>runtime</scope>
<optional>true</optional>
</dependency>
<!-- -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
<!-- lombok 需要安装插件 -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<!-- test -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
2、导入前端素材
3、编写
application.preperties
配置文件
# 更改端口,防止冲突
server.port=9999
# 关闭thymeleaf缓存
spring.thymeleaf.cache=false
4、测试controller和view
@Controller
public class IndexController {
@GetMapping({"/","/index"})
public String index(){
return "index";
}
}
访问 localhost:9999
7.3、爬虫数据
那我们的数据怎么处理?通常可以选择数据库获取、消息队列中获取、都可以成为数据源,这里我们用爬虫
爬取数据: (获取请求返回的页面信息,筛选出我们想要的数据就可以了! )
路径:http://search.jd.com/Search?keyword=java
jsoup包。做爬虫用的
导入包:
<!-- jsoup解析页面 -->
<!-- 解析网页 爬视频可 研究tiko -->
<dependency>
<groupId>org.jsoup</groupId>
<artifactId>jsoup</artifactId>
<version>1.10.2</version>
</dependency>
工具类:
package com.wlw.util;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import java.io.IOException;
import java.net.URL;
/**
* 爬取页面工具类
*/
public class HtmlParseUtil {
public static void main(String[] args) throws IOException {
/// 使用前需要联网
// 请求url
String url = "http://search.jd.com/search?keyword=java";
//1.解析网页(jsoup 解析返回的Document对象 就是 浏览器Document对象)
Document document = Jsoup.parse(new URL(url), 30000);
//使用document可以使用在js对document的所有操作
//2.获取元素(通过id) J_goodsList是通过F12 审查该页面源代码得到的
Element j_goodsList = document.getElementById("J_goodsList");
//3.获取J_goodsList 中的 ul里的 每一个 li 标签
Elements lis = j_goodsList.getElementsByTag("li");
System.out.println(lis);
//4.获取li下的 img、price、name 标签
for (Element li : lis) {
// 获取li下 第一张图片
String img = li.getElementsByTag("img").eq(0).attr("src");
String name = li.getElementsByClass("p-name").eq(0).text();
String price = li.getElementsByClass("p-price").eq(0).text();
System.out.println("=======================");
System.out.println("img : " + img);
System.out.println("name : " + name);
System.out.println("price : " + price);
}
}
}
目标元素:img、price、name
运行结果发现没有图片的信息
原因是啥?
一般图片特别多的网站,所有的图片都是通过延迟加载的
// 打印标签内容
Elements lis = j_goodsList.getElementsByTag("li");
System.out.println(lis);
打印所有li标签,发现img标签中并没有属性src的设置,只是data-lazy-img设置图片加载的地址
HtmlParseUtil的改写
-
更改图片获取属性为
data-lazy-img
- 与实体类结合,实体类如下
package com.wlw.pojo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
@Data
@AllArgsConstructor
@NoArgsConstructor
public class Content implements Serializable {
private static final long serialVersionUID = -8049497962627482693L;
private String name;
private String price;
private String img;
}
package com.wlw.util;
import com.wlw.pojo.Content;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.springframework.stereotype.Component;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
/**
* 爬取页面工具类
*/
@Component
public class HtmlParseUtil {
public static void main(String[] args) throws Exception {
parseJD("java").forEach(System.out::println);
}
public static List<Content> parseJD(String keywords) throws Exception {
/// 使用前需要联网
// 请求url
String url = "http://search.jd.com/search?keyword=" + keywords;
//1.解析网页(jsoup 解析返回的Document对象 就是 浏览器Document对象)
Document document = Jsoup.parse(new URL(url), 30000);
//使用document可以使用在js对document的所有操作
//2.获取元素(通过id) J_goodsList是通过F12 审查该页面源代码得到的
Element j_goodsList = document.getElementById("J_goodsList");
//3.获取J_goodsList 中的 ul里的 每一个 li 标签
Elements lis = j_goodsList.getElementsByTag("li");
List<Content> contents = new ArrayList<Content>();
//4.获取li下的 img、price、name 标签
for (Element li : lis) {
// 获取li下 第一张图片
// 一般图片特别多的网站,所有的图片都是通过延迟加载的
// 所以我们要获得的图片节点属性是: data-lazy-img
String img = li.getElementsByTag("img").eq(0).attr("data-lazy-img");
String name = li.getElementsByClass("p-name").eq(0).text();
String price = li.getElementsByClass("p-price").eq(0).text();
//封装为对象
contents.add(new Content(name, price, img));
}
return contents;
}
}
7.4、业务编写
1、编写ESConfig
package com.wlw.config;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class ElasticSearchConfig {
// 注册 rest高级客户端
@Bean
public RestHighLevelClient restHighLevelClient(){
RestHighLevelClient client = new RestHighLevelClient(
//如果是集群就构建多个
RestClient.builder(new HttpHost("127.0.0.1",9200,"http"))
);
return client;
}
}
2、编写service
因为是爬取的数据,那么就不走Dao,以下编写都不会编写接口,开发中必须严格要求编写
ContentService
package com.wlw.service;
import com.alibaba.fastjson.JSON;
import com.wlw.pojo.Content;
import com.wlw.util.HtmlParseUtil;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
@Service
public class ContentService {
@Autowired
private RestHighLevelClient restHighLevelClient;
//把数据放入 es 索引中
public Boolean parseContent(String keywords) throws Exception {
//1、获取内容
List<Content> contents = HtmlParseUtil.parseJD(keywords);
//2、内容放入 es 中 (批量存入到ES中)
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.timeout("2m"); //超时时间,可根据实际业务来设置
for (int i = 0; i < contents.size(); i++) {
//jd_goods 为索引库名,所以需要事先创建好
bulkRequest.add(new IndexRequest("jd_goods")
//.id(""+(i+1)) //不写就是随机id
.source(JSON.toJSONString(contents.get(i)), XContentType.JSON)
);
}
BulkResponse bulk = restHighLevelClient.bulk(bulkRequest, RequestOptions.DEFAULT);
//restHighLevelClient.close();
return !bulk.hasFailures();
}
//根据keywords分页查询结果
public List<Map<String, Object>> search(String keywords, Integer pageIndex, Integer pageSize) throws IOException {
if (pageIndex <= 1){
pageIndex = 1;
}
//1、创建搜索请求
SearchRequest jd_goods = new SearchRequest("jd_goods");
//2、创建搜索源建造者对象,来构造查询条件
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
//-精确查询 通过keyword查字段name
TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("name", keywords);
searchSourceBuilder.query(termQueryBuilder);
searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));// 60s
//-分页
searchSourceBuilder.from(pageIndex);
searchSourceBuilder.size(pageSize);
//3、搜索条件放入搜索请求中
jd_goods.source(searchSourceBuilder);
//4、执行查询,返回结果
SearchResponse searchResponse = restHighLevelClient.search(jd_goods, RequestOptions.DEFAULT);
//restHighLevelClient.close();
//5、解析结果
SearchHits hits = searchResponse.getHits();
List<Map<String,Object>> results = new ArrayList<>();
for (SearchHit documentFields : hits.getHits()) {
Map<String, Object> sourceAsMap = documentFields.getSourceAsMap();
results.add(sourceAsMap);
}
// 返回查询的结果
return results;
}
}
3、编写controller
package com.wlw.controller;
import com.wlw.service.ContentService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RestController;
import java.io.IOException;
import java.util.List;
import java.util.Map;
@RestController
public class ContentController {
@Autowired
private ContentService contentService;
//把数据放入 es 索引中
@GetMapping("/parse/{keywords}")
public Boolean parse(@PathVariable("keywords") String keywords) throws Exception {
return contentService.parseContent(keywords);
}
//分页查询
@GetMapping("/search/{keywords}/{pageIndex}/{pageSize}")
public List<Map<String, Object>> parse(@PathVariable("keywords") String keywords,
@PathVariable("pageIndex") Integer pageIndex,
@PathVariable("pageSize") Integer pageSize) throws IOException {
return contentService.search(keywords,pageIndex,pageSize);
}
}
4、测试
http://localhost:9999/parse/java 测试插入,页面返回true,然后通过es可视化工具查询数据情况
http://localhost:9999/search/java/1/10 分页查询,页面直接返回数据
7.5、前后端分离(简单使用Vue)
1、下载并引入Vue.min.js和axios.min.js
如果安装了nodejs,可以按如下步骤,没有可以到后面素材处下载
npm install vue
npm install axios
2、修改静态页面index.html
引入js
<script th:src="@{/js/vue.min.js}"></script>
<script th:src="@{/js/axios.min.js}"></script>
修改后的index.html
<!DOCTYPE html>
<html xmlns:th="http://www.thymeleaf.org">
<head>
<meta charset="utf-8"/>
<title>WLW-ES仿京东实战</title>
<link rel="stylesheet" th:href="@{/css/style.css}"/>
</head>
<body class="pg">
<div class="page" id="app">
<div id="mallPage" class=" mallist tmall- page-not-market ">
<!-- 头部搜索 -->
<div id="header" class=" header-list-app">
<div class="headerLayout">
<div class="headerCon ">
<!-- Logo-->
<h1 id="mallLogo">
<img th:src="@{/images/jdlogo.png}" alt="">
</h1>
<div class="header-extra">
<!--搜索-->
<div id="mallSearch" class="mall-search">
<form name="searchTop" class="mallSearch-form clearfix">
<fieldset>
<legend>天猫搜索</legend>
<div class="mallSearch-input clearfix">
<div class="s-combobox" id="s-combobox-685">
<div class="s-combobox-input-wrap">
<input v-model="keyword" type="text" autocomplete="off" value="dd" id="mq"
class="s-combobox-input" aria-haspopup="true">
</div>
</div>
<button type="submit" @click.prevent="searchKey" id="searchbtn">搜索</button>
</div>
</fieldset>
</form>
<ul class="relKeyTop">
<li><a>Java</a></li>
<li><a>前端</a></li>
<li><a>Linux</a></li>
<li><a>大数据</a></li>
<li><a>GO</a></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<!-- 商品详情页面 -->
<div id="content">
<div class="main">
<!-- 品牌分类 -->
<form class="navAttrsForm">
<div class="attrs j_NavAttrs" style="display:block">
<div class="brandAttr j_nav_brand">
<div class="j_Brand attr">
<div class="attrKey">
品牌
</div>
<div class="attrValues">
<ul class="av-collapse row-2">
<li><a href="#"> WLW </a></li>
<li><a href="#"> Java </a></li>
</ul>
</div>
</div>
</div>
</div>
</form>
<!-- 排序规则 -->
<div class="filter clearfix">
<a class="fSort fSort-cur">综合<i class="f-ico-arrow-d"></i></a>
<a class="fSort">人气<i class="f-ico-arrow-d"></i></a>
<a class="fSort">新品<i class="f-ico-arrow-d"></i></a>
<a class="fSort">销量<i class="f-ico-arrow-d"></i></a>
<a class="fSort">价格<i class="f-ico-triangle-mt"></i><i class="f-ico-triangle-mb"></i></a>
</div>
<!-- 商品详情 -->
<div class="view grid-nosku">
<div class="product" v-for="result in results">
<div class="product-iWrap">
<!--商品封面-->
<div class="productImg-wrap">
<a class="productImg">
<img :src="result.img">
</a>
</div>
<!--价格-->
<p class="productPrice">
<em><b>¥</b> {{result.price}} </em>
</p>
<!--标题-->
<p class="productTitle">
<a> {{result.name}} </a>
</p>
<!-- 店铺名 -->
<div class="productShop">
<span>店铺: WLW </span>
</div>
<!-- 成交信息 -->
<p class="productStatus">
<span>月成交<em>999笔</em></span>
<span>评价 <a>3</a></span>
</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!--前端使用Vue,前后端分离-->
<script th:src="@{/js/vue.min.js}"></script>
<script th:src="@{/js/axios.min.js}"></script>
<script>
new Vue({
//绑定元素
el:'#app',
//数据
data:{
keyword: '', // 搜索的关键字
results:[] // 后端返回的结果
},
//事件
methods:{
searchKey(){
var keyword = this.keyword;
console.log(keyword);
//对接后端的接口
axios.get('search/'+keyword+'/1/20').then(response=>{
console.log(response.data);
//绑定数据
this.results = response.data;
})
}
}
});
</script>
</body>
</html>
7.6、搜索高亮
1、ContentService 增加方法
//实现搜索高亮功能
public List<Map<String, Object>> searchHighlight(String keywords, Integer pageIndex, Integer pageSize) throws IOException {
if (pageIndex <= 0){
pageIndex = 0;
}
//1、创建搜索请求
SearchRequest jd_goods = new SearchRequest("jd_goods");
//2、创建搜索源建造者对象,来构造查询条件
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
//-精确查询 通过keyword查字段name
TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("name", keywords);
searchSourceBuilder.query(termQueryBuilder);
searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS));// 60s
//-分页
searchSourceBuilder.from(pageIndex);
searchSourceBuilder.size(pageSize);
//-高亮
HighlightBuilder highlightBuilder = new HighlightBuilder();
highlightBuilder.field("name");
highlightBuilder.requireFieldMatch(false);// 多个高亮显示
highlightBuilder.preTags("<span style='color:red'>");
highlightBuilder.postTags("</span>");
searchSourceBuilder.highlighter(highlightBuilder);
//3、搜索条件放入搜索请求中
jd_goods.source(searchSourceBuilder);
//4、执行查询,返回结果
SearchResponse searchResponse = restHighLevelClient.search(jd_goods, RequestOptions.DEFAULT);
//restHighLevelClient.close();
//5、解析结果
SearchHits hits = searchResponse.getHits();
List<Map<String,Object>> results = new ArrayList<>();
for (SearchHit documentFields : hits.getHits()) {
Map<String, Object> sourceAsMap = documentFields.getSourceAsMap();
//获取高亮字段
Map<String, HighlightField> highlightFields = documentFields.getHighlightFields();
HighlightField name = highlightFields.get("name");
// 替换
if (name != null){
Text[] fragments = name.fragments();
String new_name = "";
for (Text text : fragments) {
new_name += text;
}
sourceAsMap.put("name", new_name);
}
results.add(sourceAsMap);
}
// 返回查询的结果
return results;
}
2、ContentController增加入口
//查询
@GetMapping("/search/{keywords}/{pageIndex}/{pageSize}")
public List<Map<String, Object>> parse(@PathVariable("keywords") String keywords,
@PathVariable("pageIndex") Integer pageIndex,
@PathVariable("pageSize") Integer pageSize) throws IOException {
return contentService.searchHighlight(keywords,pageIndex,pageSize);
}
3、测试
7.7、问题:
1、使用term(精确查询)时,我发现三个问题,问题如下:
-
字段值必须是一个词(索引中存在的词),才能匹配
-
问题:中文字符串,term查询时无法查询到数据(比如,“编程”两字在文档中存在,但是搜索不到)
-
原因:索引未配置中文分词器(默认使用standard,即所有中文字符串都会被切分为单个中文汉字作为单词),所以没有超过1个汉字的词,也就无法匹配,进而查不到数据
-
解决:创建索引时配置中文分词器,如:
PUT /jd_goods { "mappings": { "properties": { "name":{ "type": "text", "analyzer": "ik_max_word" // ik分词器 } } } }
-
-
查询的英文字符只能是小写,大写都无效
-
查询时英文单词必须是完整的