spring data jpa一些用法?

一 如何引入spring data jpa 所需Jar包,此处用的maven依赖导入。

    maven依赖如下:

     <!-- 数据库持久化标准  entity实体使用 -->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jpa</artifactId>
        </dependency>

     <!-- 数据库连接池依赖 以及连接Mysql数据库依赖  -->

     <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid</artifactId>
            <version>1.1.4</version>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
        </dependency>

二 此处用的alibaba 一个开源数据池,我们用spring boot yml配置信息如下:

server:
  port: 8083  #配置端口  
spring:
  application:
    name: datummicroservice1
  datasource:
     type: com.alibaba.druid.pool.DruidDataSource
     driver-class-name: com.mysql.jdbc.Driver
     url: jdbc:mysql://192.163.20.99:3306/bhpp2.0_datummngtservice?useSSL=false
     username: root
     password: root
     # 下面为连接池的补充设置,应用到上面所有数据源中
     initialSize: 5 #初始化大小
     minIdle: 5  #最小
     maxActive: 20 #最大
     maxWait: 60000 # 配置获取连接等待超时的时间
     timeBetweenEvictionRunsMillis: 60000  # 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
     minEvictableIdleTimeMillis: 300000 # 配置一个连接在池中最小生存的时间,单位是毫秒
     validationQuery: SELECT 1 FROMDUAL
     testWhileIdle: true
     testOnBorrow: false
     testOnReturn: false
     poolPreparedStatements: true # 打开PSCache,并且指定每个连接上PSCache的大小
     maxPoolPreparedStatementPerConnectionSize: 20
     filters: stat,wall,log4j # 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
     #通过connectProperties属性来打开mergeSql功能;慢SQL记录
     connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000        
     # 合并多个DruidDataSource的监控数据
     #spring.datasource.useGlobalDataSourceStat=true
  jpa:
     hibernate:
        ddl-auto: update
     show-sql: true

三介绍一些spring data  jpa 的增删改查

     增

     save(R)

    改 可以 用新增的save方法,也可以用@Query 注解 写sql语句来修改。

   @Modifying
    @Transactional
    @Query(value = "update DatumFile df set df.uploadTime=:uploadTime,df.fileSize=:fileSize,df.uploadPerNum=:uploadPerNum,df.fileName=:fileName,df.realName=:realName WHERE df.fileNum =:fileNum")

   public int updateDatumFileByFileNum(@Param("uploadTime") Date uploadTime, @Param("fileSize") String fileSize,
            @Param("uploadPerNum") String uploadPerNum, @Param("fileName") String fileName,
            @Param("realName") String realName, @Param("fileNum") String fileNum);

删 删除一般用sql语句来删。

@Modifying

@Transactional
@Query(value = "DELETE DatumFile df WHERE df.fileNum =:fileNum")

public int deleteDatumFileData(@Param("fileNum") String fileNum);

根据单个属性查

public DatumFile findByFileNum(String fileNum);

public List<Thing> findByThingNumIn(List<String> thingNumList);

根据 两个属性查或的关系。

 public List<Thing> findByChargePerNumOrManagerNum(String chargePerNum, String managerNum);

根据 两个属性查与的关系。

 public List<Thing> findByChargePerNumAndManagerNum(String chargePerNum, String managerNum);

根据一个属性查另一个属性进行排序Desc 是降序 , Asc 是升序。

public List<DatumFile> findByDatumNumOrderByUploadTimeDesc(String datumNum);

原生态sql语句进行查询

@Query(value = "select * from book b where b.name=?1", nativeQuery = true)
List<Book> findByName(String name);
@Query(value = "select * from book b where b.name=:name", nativeQuery = true)
List<Book> findByName(@Param("name")String name);

jpa注解注入sql语句查询

@Query(value = "select name,author,price from Book b where b.name = :name AND b.author=:author AND b.price=:price")
List<Book> findByNamedParam(@Param("name") String name, @Param("author") String author,@Param("price") long price);

分页查询详解。

导层编写。

@Repository
public interface DatumDao extends JpaRepository<DatumEntity, Integer>, JpaSpecificationExecutor<DatumEntity> {}

servicece层编写。

 @Transactional
    public Map<String, Object> findDatumByPage(Map<String, Object> paramMap) {
        int currentPage = (int) paramMap.get("currentPage");
        int pageSize = (int) paramMap.get("pageSize");
        String keyWord = (String) paramMap.get("keyWord");// 选择 1资料编号 2 资料名称
        String queryValue = (String) paramMap.get("queryValue");// 输入值
        List<String> queryDate = (List<String>) paramMap.get("queryDate");// 上传时间数组
        // jpa分页查询
        Sort sort = new Sort(new Order(Direction.DESC, "fileUploadTime"));// 按上传时间倒序
        PageRequest pageRequest = new PageRequest(currentPage - 1, pageSize, sort);
        // 条件
        Specification<DatumEntity> spec = new Specification<DatumEntity>() {

            @Override
            public Predicate toPredicate(Root<DatumEntity> root, CriteriaQuery<?> query, CriteriaBuilder cb) {
                List<Predicate> predicates = new ArrayList<Predicate>();
                // 关键字资料编号
                if (queryValue != null && !"".equals(keyWord) && "1".equals(keyWord)) {
                    predicates.add(cb.like(root.get("datumNum"), "%" + queryValue + "%"));
                }
                // 关键字资料名称
                if (queryValue != null && !"".equals(keyWord) && "2".equals(keyWord)) {
                    predicates.add(cb.like(root.get("datumName"), "%" + queryValue + "%"));
                }
                // 上传时间数组
                if (queryDate != null && queryDate.size() != 0 && !"".equals(queryDate.toString())) {
                    predicates.add(cb.between(root.get("fileUploadTime").as(String.class), queryDate.get(0),
                            queryDate.get(1)));
                }

                predicates.add(cb.equal(root.get("workPerNum"), workPerNum));

                predicates.add(cb.in(root.get("thingNum")).value(thingNumList));
                Predicate[] arr = new Predicate[predicates.size()];
                return cb.and(predicates.toArray(arr));
            }
        };
        Page<DatumEntity> page = datumDao.findAll(spec, pageRequest);
        // 添加录入人名称
        List<DatumEntity> datumEntityList = page.getContent();
        for (DatumEntity datumEntity : datumEntityList) {
            Map<String, Object> workItemDatumMap = null;
            try {
                workItemDatumMap = requestHrService.findWorkItemDatumByDatum(datumEntity.getDatumNum());
            } catch (Exception e1) {
                e1.printStackTrace();
            }
            if (workItemDatumMap != null) {
                datumEntity.setDatumType((int) workItemDatumMap.get("datumType"));
            } else {
                datumEntity.setDatumType(3);
            }
            String personJsonStr;
            try {
                personJsonStr = requestHrService.getPersonByNumber(datumEntity.getWritePerNum());
            } catch (Exception e) {
                personJsonStr = null;
                e.printStackTrace();
            }
            if (personJsonStr == null) {
                datumEntity.setWritePerName("--");
            } else {
                Map<String, Object> personMap = JSON.parseObject(personJsonStr, Map.class);
                datumEntity.setWritePerName((String) personMap.get("name"));
            }
        }
        Map<String, Object> map = new HashMap<String, Object>();
        map.put("total", page.getTotalElements()); // 总记录数
        map.put("contentList", datumEntityList); // 当前页面内容
        // map.put("contentList", page.getContent()); // 当前页面内容
        map.put("pageNumber", page.getNumber() + 1);// 当前页
        map.put("totalPage", page.getTotalPages()); // 总页数
        return map;
    }

猜你喜欢

转载自blog.csdn.net/huxiaochao_6053/article/details/83614872