spring boot + mybatis read/write splitting
参考:https://blog.csdn.net/wuyongde_0922/article/details/70655185 方案4
总体的思路:AbstractRoutingDataSource + mybatis @Intercepts + DataSourceTransactionManager
主体的判断逻辑均来自上述博文,这里仅描述在springboot环境下的配置方式。
使用版本:
- spring boot:2.0.2.RELEASE
- mybatis.starter:1.3.1
- druid:1.1.9
- mysql:5.7
1. 配置两个数据源,这里使用druid
# application.yml (mybatis配置)
spring:
profiles:
# 使用开发环境
active: dev
mybatis:
configuration:
# 驼峰转换
map-underscore-to-camel-case: true
# 默认缓存数量
default-fetch-size: 100
# SQL执行的超时时间
default-statement-timeout: 3000
# 日志输出到控制台
log-impl: org.apache.ibatis.logging.stdout.StdOutImpl
# application-dev.yml (数据源配置)
write:
datasource:
username: root
password: 123123
url: jdbc:mysql://localhost:3306/db1?useSSL=false&useUnicode=true&characterEncoding=utf8
# 最大连接数
maxActive: 1000
# 连接池初始化大小
initialSize: 100
# 获取连接的最大等待时间,单位毫秒
maxWait: 60000
# 最小保留数
minIdle: 500
# 检测关闭空闲连接的间隔时间,单位毫秒
timeBetweenEvictionRunsMillis: 60000
# 连接的最小生存时间,,单位毫秒
minEvictableIdleTimeMillis: 300000
validationQuery: select 1
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxOpenPreparedStatements: 20
# 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
filters: stat, wall, slf4j
# 合并多个DruidDataSource的监控数据
useGlobalDataSourceStat: true
read:
datasource:
username: root
password: 123123
url: jdbc:mysql://localhost:3306/db2?useSSL=false&useUnicode=true&characterEncoding=utf8
# 最大连接数
maxActive: 1000
# 连接池初始化大小
initialSize: 100
# 获取连接的最大等待时间,单位毫秒
maxWait: 60000
# 最小保留数
minIdle: 500
# 检测关闭空闲连接的间隔时间,单位毫秒
timeBetweenEvictionRunsMillis: 60000
# 连接的最小生存时间,,单位毫秒
minEvictableIdleTimeMillis: 300000
validationQuery: select 1
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxOpenPreparedStatements: 20
# 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
filters: stat, wall, slf4j
# 合并多个DruidDataSource的监控数据
useGlobalDataSourceStat: true
由于电脑上只有一个mysql,为了方便测试,这里使用两个数据库模拟。
# DynamicDataSourceConfig
@Bean(name = "writeDataSource")
@ConfigurationProperties(prefix = "write.datasource")
public DataSource writeDataSource() {
return DataSourceBuilder.create().type(DruidDataSource.class).build();
}
@Bean(name = "readDataSource")
@ConfigurationProperties(prefix = "read.datasource")
public DataSource readDataSource() {
return DataSourceBuilder.create().type(DruidDataSource.class).build();
}
2. 配置AbstractRoutingDataSource作为直接数据源
# DynamicDataSourceConfig
@Bean(name = "dataSource")
public DynamicDataSource getDynamicDataSource() {
DynamicDataSource dynamicDataSource = new DynamicDataSource();
Map<Object, Object> dataSourceMap = new HashMap<>();
dataSourceMap.put(DynamicDataSourceGlobal.READ.name(), readDataSource());
dataSourceMap.put(DynamicDataSourceGlobal.WRITE.name(), writeDataSource());
//传入数据源map,AbstractRoutingDataSource将以key来分配数据源
dynamicDataSource.setTargetDataSources(dataSourceMap);
return dynamicDataSource;
}
# DynamicDataSource (继承AbstractRoutingDataSource,实现分配逻辑)
public class DynamicDataSource extends AbstractRoutingDataSource {
//返回分配的数据库的key
@Override
protected Object determineCurrentLookupKey() {
DynamicDataSourceGlobal dynamicDataSourceGlobal = DynamicDataSourceHolder.getDataSource();
if(dynamicDataSourceGlobal == null
|| dynamicDataSourceGlobal == DynamicDataSourceGlobal.WRITE) {
return DynamicDataSourceGlobal.WRITE.name();
}
return DynamicDataSourceGlobal.READ.name();
}
}
# DynamicDataSourceGlobal
public enum DynamicDataSourceGlobal {
READ, WRITE;
}
# DynamicDataSourceHolder
public final class DynamicDataSourceHolder {
private static final ThreadLocal<DynamicDataSourceGlobal> holder = new ThreadLocal<DynamicDataSourceGlobal>();
private DynamicDataSourceHolder() {}
public static void putDataSource(DynamicDataSourceGlobal dataSource) {
holder.set(dataSource);
}
public static DynamicDataSourceGlobal getDataSource() {
return holder.get();
}
public static void clearDataSource() {
holder.remove();
}
}
3. 配置mybatis的intercept拦截读写操作,根据SqlCommandType区分读写,来分配数据源
# DynamicPlugin (intercept)
@Intercepts({
@Signature(type = Executor.class, method = "update", args = {MappedStatement.class, Object.class}),
@Signature(type = Executor.class, method = "query", args = {MappedStatement.class, Object.class, RowBounds.class, ResultHandler.class, CacheKey.class, BoundSql.class}),
@Signature(type = Executor.class, method = "query", args = {MappedStatement.class, Object.class, RowBounds.class, ResultHandler.class})
})
public class DynamicPlugin implements Interceptor {
protected static final Logger logger = LoggerFactory.getLogger(DynamicPlugin.class);
private static final String REGEX = ".*insert\\u0020.*|.*delete\\u0020.*|.*update\\u0020.*";
private static final Map<String, DynamicDataSourceGlobal> cacheMap = new ConcurrentHashMap<>();
@Override
public Object intercept(Invocation invocation) throws Throwable {
boolean synchronizationActive = TransactionSynchronizationManager.isSynchronizationActive();
//如果包含事务上下文,这里就不作处理,交由DynamicDataSourceTransactionManager处理
if (!synchronizationActive) {
Object[] objects = invocation.getArgs();
MappedStatement ms = (MappedStatement) objects[0];
DynamicDataSourceGlobal dynamicDataSourceGlobal = null;
if ((dynamicDataSourceGlobal = cacheMap.get(ms.getId())) == null) {
//读方法
if (ms.getSqlCommandType().equals(SqlCommandType.SELECT)) {
//!selectKey 为自增id查询主键(SELECT LAST_INSERT_ID() )方法,使用主库
if (ms.getId().contains(SelectKeyGenerator.SELECT_KEY_SUFFIX)) {
dynamicDataSourceGlobal = DynamicDataSourceGlobal.WRITE;
} else {
BoundSql boundSql = ms.getSqlSource().getBoundSql(objects[1]);
String sql = boundSql.getSql().toLowerCase(Locale.CHINA).replaceAll("[\\t\\n\\r]", " ");
if (sql.matches(REGEX)) {
dynamicDataSourceGlobal = DynamicDataSourceGlobal.WRITE;
} else {
dynamicDataSourceGlobal = DynamicDataSourceGlobal.READ;
}
}
} else {
dynamicDataSourceGlobal = DynamicDataSourceGlobal.WRITE;
}
logger.warn("设置方法[{}] use [{}] Strategy, SqlCommandType [{}]..", ms.getId(), dynamicDataSourceGlobal.name(), ms.getSqlCommandType().name());
cacheMap.put(ms.getId(), dynamicDataSourceGlobal);
}
DynamicDataSourceHolder.putDataSource(dynamicDataSourceGlobal);
}
return invocation.proceed();
}
@Override
public Object plugin(Object target) {
if (target instanceof Executor) {
return Plugin.wrap(target, this);
} else {
return target;
}
}
@Override
public void setProperties(Properties properties) {}
}
这里需要注意的是query方法的拦截,在Executor中query方法有两个:
<E> List<E> query(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler, CacheKey cacheKey, BoundSql boundSql) throws SQLException;
<E> List<E> query(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler) throws SQLException;
由于工程中同时使用了注解与xml的方式作为查询语句,这里我将两种都配置上。
# DynamicDataSourceConfig (将intercept传入SqlSessionFactory)
@Bean
public SqlSessionFactory getSqlSessionFactory(@Qualifier("dataSource") DynamicDataSource dataSource) {
SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
bean.setDataSource(dataSource);
bean.setPlugins(new DynamicPlugin[]{new DynamicPlugin()});
try {
return bean.getObject();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
@Bean
public SqlSessionTemplate getSqlSessionTemplate(SqlSessionFactory sqlSessionFactory) {
SqlSessionTemplate template = new SqlSessionTemplate(sqlSessionFactory);
return template;
}
4. 配置事务状态下的数据源分配
# DynamicDataSourceTransactionManager (自定义DataSourceTransactionManager实现分配逻辑)
public class DynamicDataSourceTransactionManager extends DataSourceTransactionManager {
//只读事务到读库,读写事务到写库
@Override
protected void doBegin(Object transaction, TransactionDefinition definition) {
//设置数据源
boolean readOnly = definition.isReadOnly();
if (readOnly) {
DynamicDataSourceHolder.putDataSource(DynamicDataSourceGlobal.READ);
} else {
DynamicDataSourceHolder.putDataSource(DynamicDataSourceGlobal.WRITE);
}
super.doBegin(transaction, definition);
}
//清理本地线程的数据源
@Override
protected void doCleanupAfterCompletion(Object transaction) {
super.doCleanupAfterCompletion(transaction);
DynamicDataSourceHolder.clearDataSource();
}
# DynamicDataSourceConfig (配置transactionManager)
@Bean
public DynamicDataSourceTransactionManager getDynamicDataSourceTransactionManager(
@Qualifier("dataSource") DynamicDataSource dataSource) {
DynamicDataSourceTransactionManager transactionManager = new DynamicDataSourceTransactionManager();
transactionManager.setDataSource(dataSource);
return transactionManager;
}
自此读写分离的配置就算是结束了,基本上不会对业务逻辑有入侵,方式简洁明了,再次感谢wuyongde0922的分享。
5. 写个demo测试一下
# TestMapper
@Insert("insert into test(value) values(#{value})")
@Options(useGeneratedKeys = true, keyColumn = "id")
int insert(Test test);
@Select("select * from test where id = #{id}")
Test getById(@Param("id") Long id);
# TestService (开启事务@EnableTransactionManagement)
@Service
public class TestService {
@Autowired@Lazy
TestMapper testMapper;
@Transactional
public void insert(Test test) {
testMapper.insert(test);
}
}
这里有个问题,service中自动注入Mapper时会存在循环依赖
testService (field com.keliii.readwritesplitting.dao.TestMapper com.keliii.readwritesplitting.service.TestService.testMapper)
↓
testMapper defined in file [D:\workspace\IdeaProjects\SpringBootDemo\read-write-splitting-mybatis\target\classes\com\keliii\readwritesplitting\dao\TestMapper.class]
↓
getSqlSessionFactory defined in class path resource [com/keliii/readwritesplitting/config/DynamicDataSourceConfig.class]
┌─────┐
| dataSource defined in class path resource [com/keliii/readwritesplitting/config/DynamicDataSourceConfig.class]
↑ ↓
| readDataSource defined in class path resource [com/keliii/readwritesplitting/config/DynamicDataSourceConfig.class]
↑ ↓
| org.springframework.boot.autoconfigure.jdbc.DataSourceInitializerInvoker
└─────┘
所以在注入时配置了@Lazy作延时(但这样配置总感觉不够便捷,每次都要加Lazy,若有更好的方法,还请告知一二)
# TestMapperTest
public class TestMapperTest extends ReadWriteSplittingMybatisApplicationTests {
@Autowired
TestMapper testMapper;
@Autowired
TestService testService;
@org.junit.Test
public void insert() {
System.out.println(testMapper.getById(1l));
Test test = new Test();
test.setValue(UUID.randomUUID().toString());
testService.insert(test);
}
@org.junit.Test
public void queryById() {
Test test = testMapper.getById(1l);
System.out.println(test);
}
在读库中手动插入一条记录(1,”123”)
# run test
#读
Test{id=1, value='123'}
#写
Test{id=1, value='e37ef3b8-7d23-42a8-b445-b99e88409a7b'}
读写分离配置完成
在当前配置的基础上可以做更多的拓展,比如多个读库,分库等