springboot+mybatis以及ssh框架的使用当当的sharding-jdbc分库分表用法

使用的是当当的jar包:

<dependency>
            <groupId>com.dangdang</groupId>
            <artifactId>sharding-jdbc-core</artifactId>
            <version>1.5.4.1</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid</artifactId>
            <version>1.1.9</version>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>

springboot+mybatis的sharding-jdbc分库分表用法:

1、首先config里的properties文件里配置数据源,其中ycbDataSource是你自定义的数据源名称:

spring.datasource.ycbDataSource.url=jdbc:mysql://localhost:3306/ycb?characterEncoding=utf8
spring.datasource.ycbDataSource.username=root
spring.datasource.ycbDataSource.password=root
spring.datasource.ycbDataSource.driver-class-name=com.mysql.jdbc.Driver


#spring.datasource.ycbDataSource2.url=jdbc:mysql://localhost:3306/ycb?characterEncoding=utf8
#spring.datasource.ycbDataSource2.username=root
#spring.datasource.ycbDataSource2.password=root
#spring.datasource.ycbDataSource2.driver-class-name=com.mysql.jdbc.Driver

2、由于springboot使用spring会自动配置数据源连接,所以你要想把数据源交给sharding来管理需要单独配置数据源连接。

如下:

package com.***.***.***.config;

import com.dangdang.ddframe.rdb.sharding.api.ShardingDataSourceFactory;
import com.dangdang.ddframe.rdb.sharding.api.rule.DataSourceRule;
import com.dangdang.ddframe.rdb.sharding.api.rule.ShardingRule;
import com.dangdang.ddframe.rdb.sharding.api.rule.TableRule;
import com.dangdang.ddframe.rdb.sharding.api.strategy.table.TableShardingStrategy;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.jdbc.DataSourceBuilder;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;

import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;

@Configuration
@MapperScan(basePackages = "com.***.***.***.mapper", sqlSessionTemplateRef  = "shardingSqlSessionTemplate")
public class DataSourceConfig {

    /**
     * 配置数据源,就是在config里你定义的,如果有多个则根据名字复制该方法配置多个数据源
     * 数据源的名称最好要有一定的规则,方便配置分库的计算规则
     *
     * @return
     */
    @Bean(name = "ycbDataSource")
    @ConfigurationProperties(prefix = "spring.datasource.ycbDataSource")
    public DataSource dataSource() {
        return   DataSourceBuilder.create().build();
    }

    /**
     * 配置数据源1,数据源的名称最好要有一定的规则,方便配置分库的计算规则
     *
     * @return
     */
    /*@Bean(name = "ycbDataSource2")
    @ConfigurationProperties(prefix = "spring.datasource.ycbDataSource2")
    public DataSource dataSource2() {
        return   DataSourceBuilder.create().build();
    }*/

    /**
     * 配置数据源规则,即将多个数据源交给sharding-jdbc管理,并且可以设置默认的数据源,
     * 当表没有配置分库规则时会使用默认的数据源
     *
     * @param ycbDataSource
     * @return
     */
    @Bean
    public DataSourceRule dataSourceRule(@Qualifier("ycbDataSource") DataSource ycbDataSource) {
        Map<String, DataSource> dataSourceMap = new HashMap<>(); //设置分库映射
        dataSourceMap.put("ycbDataSource", ycbDataSource);
      // dataSourceMap.put("ycbDataSource2", ycbDataSource2);
        return new DataSourceRule(dataSourceMap, "ycbDataSource"); //设置默认库,两个库以上时必须设置默认库。默认库的数据源名称必须是dataSourceMap的key之一

    }


    /**
     * 创建sharding-jdbc的数据源DataSource,MybatisAutoConfiguration会使用此数据源
     *
     * @param shardingRule
     * @return
     * @throws SQLException
     */
    @Bean(name = "shardingDataSource")
    public DataSource shardingDataSource(ShardingRule shardingRule) throws SQLException {
        return   ShardingDataSourceFactory.createDataSource(shardingRule);
    }

    /**
     * 需要手动配置事务管理器
     *
     * @param shardingDataSource
     * @return
     */
    @Bean
    public DataSourceTransactionManager transactitonManager(@Qualifier("shardingDataSource") DataSource shardingDataSource) {
        return new DataSourceTransactionManager( shardingDataSource);
    }

    @Bean(name = "shardingSqlSessionFactory")
    @Primary
    public SqlSessionFactory shardingSqlSessionFactory(@Qualifier("shardingDataSource") DataSource shardingDataSource) throws Exception {
        SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
        bean.setDataSource( shardingDataSource);
       // bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:com/ycb/wxxcx/provider/mapper/*")); 最上面已经配置注解扫描了,就不需要再配置这个了
        return bean.getObject();
    }

    @Bean(name = "shardingSqlSessionTemplate")
    @Primary
    public SqlSessionTemplate shardingSqlSessionTemplate(@Qualifier("shardingSqlSessionFactory") SqlSessionFactory shardingSqlSessionFactory) throws Exception {
        return new SqlSessionTemplate(shardingSqlSessionFactory);
    }

    /**
     * 配置数据源策略和表策略,具体策略需要自己实现
     * ycb_mcs_order是你自己定义的逻辑表名,实际上对应于数据库存在的两张物理表ycb_mcs_tradelog_0 和ycb_mcs_tradelog_1
     * customer表示的是用户id,这里根据用户id取模来决定是在哪张表读取操作
     * @param dataSourceRule
     * @return
     */
    @Bean
    public ShardingRule shardingRule(DataSourceRule dataSourceRule) {
        //具体分库分表策略
        TableRule orderTableRule = TableRule.builder("ycb_mcs_order")
                .actualTables(Arrays.asList("ycb_mcs_tradelog_0", "ycb_mcs_tradelog_1"))
                .tableShardingStrategy(new TableShardingStrategy("customer", new ModuloTableShardingAlgorithm()))
                .dataSourceRule(dataSourceRule)
                .build();

        //绑定表策略,在查询时会使用主表策略计算路由的数据源,因此需要约定绑定表策略的表的规则需要一致,可以一定程度提高效率  比如order表要和order_detail表绑定到一起,orderid是根据此来分片的字段
       // List<BindingTableRule> bindingTableRules = new ArrayList<BindingTableRule>();
        //bindingTableRules.add(new BindingTableRule(Arrays.asList(orderTableRule)));
        return ShardingRule.builder()
                .dataSourceRule(dataSourceRule)
                .tableRules(Arrays.asList(orderTableRule))
                //.bindingTableRules(bindingTableRules)  本次是单库分表,只分单表不分绑定表,需要的时候可以加上
                //.tableShardingStrategy(new TableShardingStrategy("orderid", new ModuloTableShardingAlgorithm()))
                .build();
    }


}

3、接下来是分表策略:

package com.***.***.***.config;

import com.dangdang.ddframe.rdb.sharding.api.ShardingValue;
import com.dangdang.ddframe.rdb.sharding.api.strategy.table.SingleKeyTableShardingAlgorithm;
import com.google.common.collect.Range;

import java.util.Collection;
import java.util.LinkedHashSet;

public class ModuloTableShardingAlgorithm implements SingleKeyTableShardingAlgorithm<Long> {

    @Override
    public String doEqualSharding(Collection<String> tableNames, ShardingValue<Long> shardingValue) {
        for (String each : tableNames) {
            if (each.endsWith(shardingValue.getValue() % 2 + "")) {  //根据分片字段取模来决定是哪张表来操作
                return each;
            }
        }
        throw new IllegalArgumentException();
    }

    @Override
    public Collection<String> doInSharding(Collection<String> tableNames, ShardingValue<Long> shardingValue) {
        Collection<String> result = new LinkedHashSet<>(tableNames.size());
        for (Long value : shardingValue.getValues()) {
            for (String tableName : tableNames) {
                if (tableName.endsWith(value % 2 + "")) {
                    result.add(tableName);
                }
            }
        }
        return result;
    }

    @Override
    public Collection<String> doBetweenSharding(Collection<String> tableNames, ShardingValue<Long> shardingValue) {
        Collection<String> result = new LinkedHashSet<>(tableNames.size());
        Range<Long> range = (Range<Long>) shardingValue.getValueRange();
        for (Long i = range.lowerEndpoint(); i <= range.upperEndpoint(); i++) {
            for (String each : tableNames) {
                if (each.endsWith(i % 2 + "")) {
                    result.add(each);
                }
            }
        }
        return result;
    }
}

4、sql语句写法:

package com.***.***.***.mapper;

import com.***.***.***.vo.*;
import org.apache.ibatis.annotations.*;

import java.math.BigDecimal;
import java.util.Date;
import java.util.List;
import java.util.Map;

@Mapper
public interface OrderMapper {

 @Select("SELECT t.id,t.orderid ,t.status FROM ycb_mcs_order t where orderid = #{orderid}")
    Order getOrderStatus(@Param("orderid") String orderid);
}

数据库实际存在表:

ssh框架使用当当的sharding-jdbc分库分表用法:

1、properties配置:

jpa_database_platform=org.hibernate.dialect.MySQL5Dialect
jdbc_username=root
jdbc_password=root

2、xml配置:

<!-- JDBC连接池  https://github.com/alibaba/druid/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98 -->
		<bean id="db-node-0" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
			<!-- 创建MySQL UTF-8编码数据库脚本:CREATE DATABASE s2jh DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; -->
			<property name="url" value="${jdbc_url}" />  //取的properties里的配置
			<property name="username" value="${jdbc_username}" />
			<property name="password" value="${jdbc_password}" />

			<!-- 由外部事务管理器处理提交 -->
			<property name="defaultAutoCommit" value="false" />

			<!-- 配置初始化大小、最小、最大 -->
			<property name="initialSize" value="10" />
			<property name="minIdle" value="50" />
			<property name="maxActive" value="300" />

			<!-- 配置获取连接等待超时的时间 -->
			<property name="maxWait" value="60000" />

			<!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->
			<property name="timeBetweenEvictionRunsMillis" value="60000" />

			<!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->
			<property name="minEvictableIdleTimeMillis" value="300000" />

			<property name="validationQuery" value="SELECT 'x'" />
			<property name="testWhileIdle" value="true" />
			<property name="testOnBorrow" value="false" />
			<property name="testOnReturn" value="false" />

			<!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->
			<property name="poolPreparedStatements" value="false" />
			<property name="maxPoolPreparedStatementPerConnectionSize" value="200" />

			<!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->
			<property name="proxyFilters">
				<list>
					<ref bean="stat-filter" />
				</list>
			</property>
		</bean>
        <!-- sharding配置-->
		<rdb:strategy id="userTableStrategy" sharding-columns="user_id" algorithm-class="merchant.sharding.UserSingleKeyTableShardingAlgorithm"/>
		<rdb:data-source id="dataSource">
			<rdb:sharding-rule data-sources="db-node-0" default-data-source="db-node-0">
				<rdb:table-rules>
					<rdb:table-rule logic-table="t_user" actual-tables="t_user_00,t_user_01,t_user_02" table-strategy="userTableStrategy"/>
				</rdb:table-rules>
			</rdb:sharding-rule>
		</rdb:data-source>

3、分片策略:

package **.sharding;

import com.dangdang.ddframe.rdb.sharding.api.ShardingValue;
import com.dangdang.ddframe.rdb.sharding.api.strategy.table.SingleKeyTableShardingAlgorithm;
import com.google.common.collect.Range;

import java.util.Collection;
import java.util.LinkedHashSet;


public class UserSingleKeyTableShardingAlgorithm implements SingleKeyTableShardingAlgorithm<Integer> {

    @Override
    public String doEqualSharding(Collection<String> tableNames, ShardingValue<Integer> shardingValue) {
        int targetCount = tableNames.size();
        Integer sdValue =  shardingValue.getValue();
        for (String tableName : tableNames ) {
            if(tableName.endsWith("0"+(sdValue % targetCount))) {
                return tableName;
            }
        }
        throw  new IllegalArgumentException("无分表参数 无法定位具体数据表");
    }

    @Override
    public Collection<String> doInSharding(Collection<String> tableNames, ShardingValue<Integer> shardingValue) {
        int targetCount = tableNames.size();
        Collection<String> result = new LinkedHashSet<>(targetCount);
        Collection<Integer> values = shardingValue.getValues();
        for (Integer value : values) {
            for (String tableName : tableNames) {
                if (tableName.endsWith("0" + (value % targetCount))) {
                    result.add(tableName);
                }
            }
        }
        return result;
    }

    @Override
    public Collection<String> doBetweenSharding(Collection<String> tableNames, ShardingValue<Integer> shardingValue) {
        int targetCount = tableNames.size();
        Collection<String> result = new LinkedHashSet<>(tableNames.size());
        Range<Integer> range =  shardingValue.getValueRange();
        for (Integer i = range.lowerEndpoint(); i <= range.upperEndpoint(); i++) {
            for (String each : tableNames) {
                if (each.endsWith("0" + (i % targetCount))) {
                    result.add(each);
                }
            }
        }
        return result;
    }
}

4、分片sql:

package ***.sharding.dao;

import lab.s2jh.core.dao.jpa.BaseDao;
import ***.sharding.entity.Tuser;
import ***.sharding.entity.TuserVO;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.data.jpa.repository.Query;
import org.springframework.stereotype.Repository;

import java.util.List;

@Repository
public interface UserShardingDao extends BaseDao<Tuser,Long> {

    @Query(value = "SELECT id,user_id,name,age from t_user", nativeQuery = true)
    List<TuserVO> selectUsers();

    @Modifying
    @Query(value = " insert into t_user SET user_id=?1,name=?2,age=?3", nativeQuery = true)
    int add(Long userId, String name,Long age);

    @Modifying
    @Query(value = " UPDATE t_user SET age=?1 where user_id=?2", nativeQuery = true)
    int modify(Long age,Long userId);

}

OK

猜你喜欢

转载自blog.csdn.net/Hedy17/article/details/88557356
今日推荐