相信大家对session跨域也比较了解了。以前单台服务器session本地缓存就可以了,现在分布式后,session集中管理,那么用redis来管理是一个非常不错的选择。
在结合redis做session缓存的时候,也遇到了很多坑,不过还算是解决了。
和上篇讲述一样,实现自定义缓存,需要实现两个接口Cache,CachaManager。
RedisCache.java
package com.share1024.cache;
import org.apache.shiro.cache.Cache;
import org.apache.shiro.cache.CacheException;
import org.apache.shiro.util.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* @author : yesheng
* @Description :
* @Date : 2017/10/22
*/
public class RedisCache<K,V> implements Cache<K,V> {
private Logger logger = LoggerFactory.getLogger(RedisCache.class);
private final String SHIRO_SESSION="shiro_session:";
public String getKey(K key){
return SHIRO_SESSION + key;
}
@Override
public V get(K key) throws CacheException {
logger.info("get--从redis中获取:{}",key);
Object o = SerializeUtils.deserialize(RedisUtil.getInstance().get(getKey(key).getBytes()));
if(o == null){
return null;
}
return (V)o;
}
@Override
public V put(K key, V value) throws CacheException {
logger.info("put--保存到redis,key:{},value:{}",key,value);
get(key);
byte[] b = SerializeUtils.serialize(value);
Object o = SerializeUtils.deserialize(b);
RedisUtil.getInstance().set(getKey(key).getBytes(),SerializeUtils.serialize(value));
return get(key);
}
@Override
public V remove(K key) throws CacheException {
logger.info("remove--删除key:{}",key);
V value = get(key);
RedisUtil.getInstance().del(getKey(key).getBytes());
return value;
}
@Override
public void clear() throws CacheException {
logger.info("clear--清空缓存");
RedisUtil.getInstance().del((SHIRO_SESSION + "*").getBytes());
}
@Override
public int size() {
logger.info("size--获取缓存大小");
return keys().size();
}
@Override
public Set<K> keys() {
logger.info("keys--获取缓存大小keys");
return (Set<K>) RedisUtil.getInstance().keys(SHIRO_SESSION + "*");
}
@Override
public Collection<V> values() {
logger.info("values--获取缓存值values");
Set<K> keys = keys();
if (!CollectionUtils.isEmpty(keys)) {
List<V> values = new ArrayList<V>(keys.size());
for (K key : keys) {
@SuppressWarnings("unchecked")
V value = get(key);
if (value != null) {
values.add(value);
}
}
return Collections.unmodifiableList(values);
} else {
return Collections.emptyList();
}
}
}
RedisCacheManager.java
package com.share1024.cache;
import org.apache.shiro.cache.Cache;
import org.apache.shiro.cache.CacheException;
import org.apache.shiro.cache.CacheManager;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author : yesheng
* @Description :
* @Date : 2017/10/22
*/
public class RedisCacheManager implements CacheManager{
private final ConcurrentHashMap<String,Cache> caches = new ConcurrentHashMap<String, Cache>();
public <K, V> Cache<K, V> getCache(String name) throws CacheException {
Cache cache = caches.get(name);
if(cache == null){
cache = new RedisCache<K,V>();
caches.put(name,cache);
}
return cache;
}
}
RedisUtil.java太长 大家参考https://github.com/smallleaf/cacheWeb
SerializeUtils.java
package com.share1024.cache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
/**
* @author : yesheng
* @Description :
* @Date : 2017/10/22
*/
public class SerializeUtils {
private static Logger logger = LoggerFactory.getLogger(SerializeUtils.class);
/**
* 反序列化
* @param bytes
* @return
*/
public static Object deserialize(byte[] bytes) {
Object result = null;
if (isEmpty(bytes)) {
return null;
}
try {
ByteArrayInputStream byteStream = new ByteArrayInputStream(bytes);
try {
ObjectInputStream objectInputStream = new ObjectInputStream(byteStream);
try {
result = objectInputStream.readObject();
}
catch (ClassNotFoundException ex) {
throw new Exception("Failed to deserialize object type", ex);
}
}
catch (Throwable ex) {
throw new Exception("Failed to deserialize", ex);
}
} catch (Exception e) {
logger.error("Failed to deserialize",e);
}
return result;
}
public static boolean isEmpty(byte[] data) {
return (data == null || data.length == 0);
}
/**
* 序列化
* @param object
* @return
*/
public static byte[] serialize(Object object) {
byte[] result = null;
if (object == null) {
return new byte[0];
}
try {
ByteArrayOutputStream byteStream = new ByteArrayOutputStream(128);
try {
if (!(object instanceof Serializable)) {
throw new IllegalArgumentException(SerializeUtils.class.getSimpleName() + " requires a Serializable payload " +
"but received an object of type [" + object.getClass().getName() + "]");
}
ObjectOutputStream objectOutputStream = new ObjectOutputStream(byteStream);
objectOutputStream.writeObject(object);
objectOutputStream.flush();
result = byteStream.toByteArray();
}
catch (Throwable ex) {
throw new Exception("Failed to serialize", ex);
}
} catch (Exception ex) {
logger.error("Failed to serialize",ex);
}
return result;
}
}
修改spring-shiro.xml
<bean id="redisCacheManager" class="com.share1024.cache.RedisCacheManager"></bean>
<!-- 安全管理器 -->
<bean id="securityManager" class="org.apache.shiro.web.mgt.DefaultWebSecurityManager">
<property name="realm" ref="userRealm"/>
<property name="sessionManager" ref="sessionManager"/>
<!--<property name="cacheManager" ref="cacheManagerShiro"/>-->
<property name="cacheManager" ref="redisCacheManager"/>
</bean>
即可。
如何测试跨域,大家用nginx负载均衡一下就可以了。一台服务器登录,其他服务器就不用再登录,session从redis中获取。
在网上看到很多基本上是还要重写sessionDao。从前面几篇分析来看,如果只是要换个缓存地方是完全没有必要的。实现Cache,CacheManager接口即可。
当然如果有其它的业务要求就看情况实现sessionDao。
巨坑来了。
1、Redis存储对象
序列化方式,来存储
2、SimpleSession:shiro存入缓存的都是SimpleSession。来看看这个类:
public class SimpleSession implements ValidatingSession, Serializable {
// Serialization reminder:
// You _MUST_ change this number if you introduce a change to this class
// that is NOT serialization backwards compatible. Serialization-compatible
// changes do not require a change to this number. If you need to generate
// a new number in this case, use the JDK's 'serialver' program to generate it.
private static final long serialVersionUID = -7125642695178165650L;
//TODO - complete JavaDoc
private transient static final Logger log = LoggerFactory.getLogger(SimpleSession.class);
protected static final long MILLIS_PER_SECOND = 1000;
protected static final long MILLIS_PER_MINUTE = 60 * MILLIS_PER_SECOND;
protected static final long MILLIS_PER_HOUR = 60 * MILLIS_PER_MINUTE;
//serialization bitmask fields. DO NOT CHANGE THE ORDER THEY ARE DECLARED!
static int bitIndexCounter = 0;
private static final int ID_BIT_MASK = 1 << bitIndexCounter++;
private static final int START_TIMESTAMP_BIT_MASK = 1 << bitIndexCounter++;
private static final int STOP_TIMESTAMP_BIT_MASK = 1 << bitIndexCounter++;
private static final int LAST_ACCESS_TIME_BIT_MASK = 1 << bitIndexCounter++;
private static final int TIMEOUT_BIT_MASK = 1 << bitIndexCounter++;
private static final int EXPIRED_BIT_MASK = 1 << bitIndexCounter++;
private static final int HOST_BIT_MASK = 1 << bitIndexCounter++;
private static final int ATTRIBUTES_BIT_MASK = 1 << bitIndexCounter++;
// ==============================================================
// NOTICE:
//
// The following fields are marked as transient to avoid double-serialization.
// They are in fact serialized (even though 'transient' usually indicates otherwise),
// but they are serialized explicitly via the writeObject and readObject implementations
// in this class.
//
// If we didn't declare them as transient, the out.defaultWriteObject(); call in writeObject would
// serialize all non-transient fields as well, effectively doubly serializing the fields (also
// doubling the serialization size).
//
// This finding, with discussion, was covered here:
//
// http://mail-archives.apache.org/mod_mbox/shiro-user/201109.mbox/%[email protected]%3E
//
// ==============================================================
private transient Serializable id;
private transient Date startTimestamp;
private transient Date stopTimestamp;
private transient Date lastAccessTime;
private transient long timeout;
private transient boolean expired;
private transient String host;
private transient Map<Object, Object> attributes;
大家发现没有SimpleSession的属性已经被transient修饰,序列化的时候应该不会被序列化进去啊。
我当时纠结了很久。最后发现SimpleSession,写了这两个方法writeObject(ObjectOutputStream)和readObject(ObjectInputStream)
@SuppressWarnings({"unchecked"})
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject();
short bitMask = in.readShort();
if (isFieldPresent(bitMask, ID_BIT_MASK)) {
this.id = (Serializable) in.readObject();
}
if (isFieldPresent(bitMask, START_TIMESTAMP_BIT_MASK)) {
this.startTimestamp = (Date) in.readObject();
}
if (isFieldPresent(bitMask, STOP_TIMESTAMP_BIT_MASK)) {
this.stopTimestamp = (Date) in.readObject();
}
if (isFieldPresent(bitMask, LAST_ACCESS_TIME_BIT_MASK)) {
this.lastAccessTime = (Date) in.readObject();
}
if (isFieldPresent(bitMask, TIMEOUT_BIT_MASK)) {
this.timeout = in.readLong();
}
if (isFieldPresent(bitMask, EXPIRED_BIT_MASK)) {
this.expired = in.readBoolean();
}
if (isFieldPresent(bitMask, HOST_BIT_MASK)) {
this.host = in.readUTF();
}
if (isFieldPresent(bitMask, ATTRIBUTES_BIT_MASK)) {
this.attributes = (Map<Object, Object>) in.readObject();
}
}
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
short alteredFieldsBitMask = getAlteredFieldsBitMask();
out.writeShort(alteredFieldsBitMask);
if (id != null) {
out.writeObject(id);
}
if (startTimestamp != null) {
out.writeObject(startTimestamp);
}
if (stopTimestamp != null) {
out.writeObject(stopTimestamp);
}
if (lastAccessTime != null) {
out.writeObject(lastAccessTime);
}
if (timeout != 0l) {
out.writeLong(timeout);
}
if (expired) {
out.writeBoolean(expired);
}
if (host != null) {
out.writeUTF(host);
}
if (!CollectionUtils.isEmpty(attributes)) {
out.writeObject(attributes);
}
}
虽然属性已经被transient,但自定义序列化方法,又让他们重新序列化了。在这里序列化的好处,就是我们可以做一些自定义操作,比如校验信息,序列化到本地,等等。看看上面为null就不序列化,这样可以节省内存空间等等。
3、序列化报错
我在进行测试时,发现登录成功后,登录的信息一直保存不到redis中。导致每次都要登录,找了很久都没有发现原因。最终一步步debug终于发现。在做登录验证的User类没有实现Serializable。序列化一直报错只是异常没有抛出。所以在做序列化有关工作时,要看看相关类是否能够序列化。
这5篇从简单登录开始,一直到现在能够自定义缓存,用redis来做session缓存等等。
shiro篇已经过去。
谈到session跨域。
等有时间我想再写三篇,一篇是tomcat实现session跨域处理。一篇是分析spring-session实现原理。一篇是spring-session如何与shiro结合。
菜鸟不易,望有问题指出,共同进步。