Volley Android is Google's lightweight asynchronous network requests load frame and picture frame. Posted in Google I / O 2013 conference. Adapted scenario data amount is small, frequent communications network operation.
(2). Http some extent meet specifications, including the process returns ResponseCode (2xx, 3xx, 4xx, 5xx) , the request processing head support caching mechanism. And support retry and priorities defined.
(3) The default Android2.3 or more based HttpURLConnection, 2.3 Based on the following HttpClient.
(4) Provide a simple image loading tool.
RequestQueue create simple, static method call Volley class newRequestQueue, and you can specify Context:
The RequestQueue mqueue null = Private; // Create Request Queue ... mqueue = Volley.newRequestQueue (the this); // represents the current context of the this
String url = "http://192.168.56.1:8080"; StringRequest request = new StringRequest(url,new Response.Listener<String>() { @Override public void onResponse(String response)//success callbacks { //handle it } }, new Response.ErrorListener()//error callbacks { @Override public void onErrorResponse(VolleyError error) { error.printStackTrace(); } }); //add request to queue... mQueue.add(request);
Map <String, String> the params = new new the HashMap <String, String> (); params.put ( "name", "zhangsan"); params.put ( "Age", ". 17"); the JSONObject JSONRequest the JSONObject new new = ( the params); Log.i (the TAG, jsonRequest.toString ()); // if the data is empty json get request is, or is a post request // jsonrequest If not null, volley will jsonObject object into a string json intact to the server, and it will not turn into kv right, because I do not know how to convert volley String url = "http://192.168.56.1:8080/volley_test/servlet/JsonServlet"; JsonObjectRequest Request new new JsonObjectRequest = ( url, jsonRequest, new Response.Listener <JSONObject >() { @Override public void onResponse(JSONObject response) { //handle it } },new Response.ErrorListener() { @Override public void onErrorResponse(VolleyError error) { error.printStackTrace(); } }); mQueue.add(request);
ImageRequest request = new ImageRequest("http://192.168.56.1:8080/volley_test/image.jpg",new Response.Listener<Bitmap>() { @Override public void onResponse(Bitmap response) { mImageView.setImageBitmap(response); } },0,0, Bitmap.Config.ARGB_8888,new Response.ErrorListener() {//参数0 0 代表不压缩 @Override public void onErrorResponse(VolleyError error) { show(error.getMessage()); //可以去显示默认图片 } }); mQueue.add(request);
String url = "http://192.168.56.1:8080/volley_test/servlet/JsonServlet"; JsonObjectRequest request = new JsonObjectRequest(url, null,resplistener,errlistener) { //添加自定义请求头 @Override public Map<String, String> getHeaders() throws AuthFailureError { Map<String,String> map = new HashMap<String,String>(); map.put("header1","header1_val"); map.put("header2","header2_val"); return map; } };
String url = "http://192.168.56.1:8080/volley_test/servlet/PostServlet"; StringRequest request = new StringRequest(Method.POST,url,listener, errorListener) { //post请求需要复写getParams方法 @Override protected Map<String, String> getParams() throws AuthFailureError { Map<String,String> map = new HashMap<String,String>(); map.put("KEY1","value1"); map.put("KEY2", "value2"); return map; } };
Request req = ...; request.setTag("MAIN_ACTIVITY"); onDestroy() { ... mQueue.cancelAll("MAIN_ACTIVITY"); }
Mqueue = ... RequestQueue; ImageCache mCache = ...; ; Loader = new new ImageLoader (mqueue, mImageCache) when ImageListener listener = ImageLoader.getImageListener (mImageView / * associated iamgeView * /, R.drawable.ic_launcher / * image loading display display * /, R.drawable.task_icon / * images fail to load * /); loader.get ( "http://192.168.56.1:8080/volley_test/image.jpg", listener, 0, 0);
public interface ImageCache { public Bitmap getBitmap(String url); public void putBitmap(String url, Bitmap bitmap); }
/** * @author Rowandjj *图片缓存需要做成单例,全局共享 */ private static class LruImageCache implements ImageCache { private LruImageCache(){} private static LruImageCache instance = new LruImageCache(); public static final LruImageCache getInstance() { return instance; } private static final String TAG = "LruImageCache"; private final int maxSize = (int) (Runtime.getRuntime().maxMemory()/8); private LruCache<String,Bitmap> mCacheMap = new LruCache<String,Bitmap>(maxSize) { protected int sizeOf(String key, Bitmap value) { return value.getRowBytes()*value.getHeight(); } }; @Override public Bitmap getBitmap(String url) { Bitmap bitmap = mCacheMap.get(url); Log.i(TAG, "url = "+url+",cache:"+bitmap); return bitmap; } @Override public void putBitmap(String url, Bitmap bitmap) { Log.i(TAG, "put url = "+url); mCacheMap.put(url, bitmap); } }
<com.android.volley.toolbox.NetworkImageView android:id="@+id/niv" android:layout_width="0dp" android:layout_height="match_parent" android:layout_weight="1" > ImageLoader loader = ...; mNetImageView = findViewById(R.id.niv); mNetImageView.setDefaultImageResId(R.drawable.ic_launcher); mNetImageView.setErrorImageResId(R.drawable.task_icon); mNetImageView.setImageUrl("http://192.168.56.1:8080/volley_test/image.jpg", loader);
public class XMLRequest extends Request<XmlPullParser> { private Listener<XmlPullParser> mListener; public XMLRequest(int method, String url, Listener<XmlPullParser> listener, ErrorListener errorListener) { super(method, url, errorListener); mListener = listener; } public XMLRequest(String url, Listener<XmlPullParser> listener, ErrorListener errorListener) { this(Method.GET, url, listener, errorListener); } @Override protected Response<XmlPullParser> parseNetworkResponse(NetworkResponse response) { try { String xmlString = new String(response.data,HttpHeaderParser.parseCharset(response.headers)); XmlPullParser parser = Xml.newPullParser(); parser.setInput(new StringReader(xmlString));//将返回数据设置给解析器 return Response.success(parser,HttpHeaderParser.parseCacheHeaders(response)); } catch (UnsupportedEncodingException e) { return Response.error(new VolleyError(e)); } catch (XmlPullParserException e) { return Response.error(new VolleyError(e)); } } @Override protected void deliverResponse(XmlPullParser response) { mListener.onResponse(response); } }
Use:
/** * xmlRequest 使用示例 */ void test() { RequestQueue queue = Volley.newRequestQueue(context); String url = ""; XMLRequest request = new XMLRequest(url,new Response.Listener<XmlPullParser>() { @Override public void onResponse(XmlPullParser response) { int type = response.getEventType(); while(type != XmlPullParser.END_DOCUMENT) { switch (type) { case XmlPullParser.START_TAG: break; case XmlPullParser.END_TAG: break; default: break; } response.next(); } } },new Response.ErrorListener() { @Override public void onErrorResponse(VolleyError error) { } }); }
In fact, in addition to customize their Request, we can customize a lot of things, such as RequestQueue, see RequestQueue constructor:
public RequestQueue(Cache cache, Network network, int threadPoolSize, ResponseDelivery delivery)
//Volley.java public static RequestQueue newRequestQueue(Context context) { return newRequestQueue(context, null); }
Calls another factory method:
//volley.java public static RequestQueue newRequestQueue(Context context, HttpStack stack) { File cacheDir = new File(context.getCacheDir(), DEFAULT_CACHE_DIR); ... ... if (stack == null) { if (Build.VERSION.SDK_INT >= 9) { stack = new HurlStack(); } else { // Prior to Gingerbread, HttpUrlConnection was unreliable. // See: http://android-developers.blogspot.com/2011/09/androids-http-clients.html stack = new HttpClientStack(AndroidHttpClient.newInstance(userAgent)); } } Network network = new BasicNetwork(stack); RequestQueue queue = new RequestQueue(new DiskBasedCache(cacheDir), network); queue.start(); return queue; }
//RequestQueue.java public RequestQueue(Cache cache, Network network) { this(cache, network, DEFAULT_NETWORK_THREAD_POOL_SIZE); }
Specify the default thread pool size is 4.
//RequestQueue.java public RequestQueue(Cache cache, Network network, int threadPoolSize) { this(cache, network, threadPoolSize, new ExecutorDelivery(new Handler(Looper.getMainLooper()))); } public RequestQueue(Cache cache, Network network, int threadPoolSize, ResponseDelivery delivery) { mCache = cache; mNetwork = network; mDispatchers = new NetworkDispatcher[threadPoolSize]; mDelivery = delivery; }
public void start() { stop(); // Make sure any currently running dispatchers are stopped. // Create the cache dispatcher and start it. mCacheDispatcher = new CacheDispatcher(mCacheQueue, mNetworkQueue, mCache, mDelivery); mCacheDispatcher.start(); // Create network dispatchers (and corresponding threads) up to the pool size. for (int i = 0; i < mDispatchers.length; i++) { NetworkDispatcher networkDispatcher = new NetworkDispatcher(mNetworkQueue, mNetwork, mCache, mDelivery); mDispatchers[i] = networkDispatcher; networkDispatcher.start(); } }
The logic is simple, create a CacheDispatcher and four NetworkDispatcher objects, and then start it separately. This CacheDispatcher NetworkDispatcher and are a subclass of Thread, wherein the processing go CacheDispatcher cache request, the processing request away four NetworkDispatcher network. CacheDispatcher injected through the constructor cache request queue (mCacheQueue), a network request queue (mNetworkQueue), a hard disk cache object (DiskBasedCache), the result distributor (mDelivery). The reason why the request queue is also injected into the network because part of the cache request may have expired, and this time need to re-obtained from the network. NetworkDispatcher In addition to the cache request queue is not injected, like other CacheDispatcher. Here RequestQueue the task is complete, after a request will be handed over to the dispatcher thread.
public Request add(Request request) { // Tag the request as belonging to this queue and add it to the set of current requests. request.setRequestQueue(this); synchronized (mCurrentRequests) { mCurrentRequests.add(request); } // Process requests in the order they are added. request.setSequence(getSequenceNumber()); request.addMarker("add-to-queue"); // If the request is uncacheable, skip the cache queue and go straight to the network. if (!request.shouldCache()) { mNetworkQueue.add(request); return request; } // Insert request into stage if there's already a request with the same cache key in flight. synchronized (mWaitingRequests) { String cacheKey = request.getCacheKey(); if (mWaitingRequests.containsKey(cacheKey)) { // There is already a request in flight. Queue up. Queue<Request> stagedRequests = mWaitingRequests.get(cacheKey); if (stagedRequests == null) { stagedRequests = new LinkedList<Request>(); } stagedRequests.add(request); mWaitingRequests.put(cacheKey, stagedRequests); if (VolleyLog.DEBUG) { VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey); } } else { // Insert 'null' queue for this cacheKey, indicating there is now a request in // flight. mWaitingRequests.put(cacheKey, null); mCacheQueue.add(request); } return request; } }
By this method, the request is distributed to the two queues for each and NetworkDispatcher CacheDispatcher process.
CacheDispatcher.java#run @Override public void run() { if (DEBUG) VolleyLog.v("start new dispatcher"); Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND); // Make a blocking call to initialize the cache. mCache.initialize(); while (true) { try { // Get a request from the cache triage queue, blocking until // at least one is available. final Request request = mCacheQueue.take(); request.addMarker("cache-queue-take"); // If the request has been canceled, don't bother dispatching it. if (request.isCanceled()) { request.finish("cache-discard-canceled"); continue; } // Attempt to retrieve this item from cache. Cache.Entry entry = mCache.get(request.getCacheKey()); if (entry == null) { request.addMarker("cache-miss"); // Cache miss; send off to the network dispatcher. mNetworkQueue.put(request); continue; } // If it is completely expired, just send it to the network. if (entry.isExpired()) { request.addMarker("cache-hit-expired"); request.setCacheEntry(entry); mNetworkQueue.put(request); continue; } // We have a cache hit; parse its data for delivery back to the reques request.addMarker("cache-hit"); Response<?> response = request.parseNetworkResponse( new NetworkResponse(entry.data, entry.responseHeaders)); request.addMarker("cache-hit-parsed"); if (!entry.refreshNeeded()) { // Completely unexpired cache hit. Just deliver the response. mDelivery.postResponse(request, response); } else { // Soft-expired cache hit. We can deliver the cached response, // but we need to also send the request to the network for // refreshing. request.addMarker("cache-hit-refresh-needed"); request.setCacheEntry(entry); // Mark the response as intermediate. response.intermediate = true; // Post the intermediate response back to the user and have // the delivery then forward the request along to the network. mDelivery.postResponse(request, response, new Runnable() { @Override public void run() { try { mNetworkQueue.put(request); } catch (InterruptedException e) { // Not much we can do about this. } } }); } } catch (InterruptedException e) { // We may have been interrupted because it was time to quit. if (mQuit) { return; } continue; } } }
Generally this logic, the request is first removed from the queue to see whether it has been canceled, if it is returned, otherwise continue to go down. Then find the value (Cache.Entry) from the disk cache by the cache key, if not, then a queue request this request to join the network. Otherwise, cache expiration determination result (the page to be requested or assigned Cache-Control Last-Modified / Expires field, etc., and Cache-Control Expires higher priority than otherwise must request expired), if expired , the request to join the network queue. If not expired, then the method by request.parseNetworkResponse disk cache data is encapsulated into a Response object (Request parseNetworkResponse is the abstract, replication). Finally freshness judgment, if not refreshed, the call postResponse distribution results ResponseDelivery results dispenser. Otherwise, first results are returned, then the network request to the request queue refresh. [Read the code so cool, google engineers to write too praised! ] On specific process ResponseDelivery we leave to the next section to speak.
2. take requests from the network
@Override public void run() { Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND); Request request; while (true) { try { // Take a request from the queue. request = mQueue.take(); } catch (InterruptedException e) { // We may have been interrupted because it was time to quit. if (mQuit) { return; } continue; } try { request.addMarker("network-queue-take"); // If the request was cancelled already, do not perform the // network request. if (request.isCanceled()) { request.finish("network-discard-cancelled"); continue; } // Tag the request (if API >= 14) if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) { TrafficStats.setThreadStatsTag(request.getTrafficStatsTag()); } // Perform the network request. NetworkResponse networkResponse = mNetwork.performRequest(request); request.addMarker("network-http-complete"); // If the server returned 304 AND we delivered a response already, // we're done -- don't deliver a second identical response. if (networkResponse.notModified && request.hasHadResponseDelivered()) { request.finish("not-modified"); continue; } // Parse the response here on the worker thread. Response<?> response = request.parseNetworkResponse(networkResponse); request.addMarker("network-parse-complete"); // Write to cache if applicable. // TODO: Only update cache metadata instead of entire record for 304s. if (request.shouldCache() && response.cacheEntry != null) { mCache.put(request.getCacheKey(), response.cacheEntry); request.addMarker("network-cache-written"); } // Post the response back. request.markDelivered(); mDelivery.postResponse(request, response); } catch (VolleyError volleyError) { parseAndDeliverNetworkError(request, volleyError); } catch (Exception e) { VolleyLog.e(e, "Unhandled exception %s", e.toString()); mDelivery.postError(request, new VolleyError(e)); } } }
这里的逻辑跟CacheDispatcher类似,也是构造Response对象,然后交由ResponseDelivery处理,但是这里的Response对象是通过NetworkResponse转化的,而这个NetworkResponse是从网络获取的,这里最核心的一行代码就是
NetworkResponse networkResponse = mNetwork.performRequest(request);
这个mNetwork是BasicNetwork对象,我们看其performRequest的实现:
public NetworkResponse performRequest(Request<?> request) throws VolleyError { long requestStart = SystemClock.elapsedRealtime(); while (true) { HttpResponse httpResponse = null; byte[] responseContents = null; Map<String, String> responseHeaders = new HashMap<String, String>(); try { // Gather headers. Map<String, String> headers = new HashMap<String, String>(); addCacheHeaders(headers, request.getCacheEntry()); httpResponse = mHttpStack.performRequest(request, headers); StatusLine statusLine = httpResponse.getStatusLine(); int statusCode = statusLine.getStatusCode(); responseHeaders = convertHeaders(httpResponse.getAllHeaders()); // Handle cache validation. if (statusCode == HttpStatus.SC_NOT_MODIFIED) { return new NetworkResponse(HttpStatus.SC_NOT_MODIFIED, request.getCacheEntry().data, responseHeaders, true); } // Some responses such as 204s do not have content. We must check. if (httpResponse.getEntity() != null) { responseContents = entityToBytes(httpResponse.getEntity()); } else { // Add 0 byte response as a way of honestly representing a // no-content request. responseContents = new byte[0]; } // if the request is slow, log it. long requestLifetime = SystemClock.elapsedRealtime() - requestStart; logSlowRequests(requestLifetime, request, responseContents, statusLine); if (statusCode < 200 || statusCode > 299) { throw new IOException(); } return new NetworkResponse(statusCode, responseContents, responseHeaders, false); } catch (SocketTimeoutException e) { attemptRetryOnException("socket", request, new TimeoutError()); } catch (ConnectTimeoutException e) { attemptRetryOnException("connection", request, new TimeoutError()); } catch (MalformedURLException e) { throw new RuntimeException("Bad URL " + request.getUrl(), e); } catch (IOException e) { int statusCode = 0; NetworkResponse networkResponse = null; if (httpResponse != null) { statusCode = httpResponse.getStatusLine().getStatusCode(); } else { throw new NoConnectionError(e); } VolleyLog.e("Unexpected response code %d for %s", statusCode, request.getUrl()); if (responseContents != null) { networkResponse = new NetworkResponse(statusCode, responseContents, responseHeaders, false); if (statusCode == HttpStatus.SC_UNAUTHORIZED || statusCode == HttpStatus.SC_FORBIDDEN) { attemptRetryOnException("auth", request, new AuthFailureError(networkResponse)); } else { // TODO: Only throw ServerError for 5xx status codes. throw new ServerError(networkResponse); } } else { throw new NetworkError(networkResponse); } } } }
这里最核心的是这一句:
httpResponse = mHttpStack.performRequest(request, headers);
它调用了HttpStack的performRequest,这个方法内部肯定会调用HttpURLConnection或者是HttpClient去请求网络。这里我们就不必继续向下跟源码了。
public RequestQueue(Cache cache, Network network, int threadPoolSize) { this(cache, network, threadPoolSize, new ExecutorDelivery(new Handler(Looper.getMainLooper()))); }
ExecutorDelivery内部有个自定义Executor,它仅仅是封装了Handler,所有待分发的结果最终会通过handler.post方法交给UI线程。
public ExecutorDelivery(final Handler handler) { // Make an Executor that just wraps the handler. mResponsePoster = new Executor() { @Override public void execute(Runnable command) { handler.post(command); } }; }
下面看我们最关心的postResponse方法:
@Override public void postResponse(Request<?> request, Response<?> response, Runnable runnable) { request.markDelivered(); request.addMarker("post-response"); mResponsePoster.execute(new ResponseDeliveryRunnable(request, response, runnable)); } @Override public void postResponse(Request<?> request, Response<?> response) { postResponse(request, response, null); }
最终执行的是ResponseDeliveryRunnable这个Runnable:
private class ResponseDeliveryRunnable implements Runnable { private final Request mRequest; private final Response mResponse; private final Runnable mRunnable; public ResponseDeliveryRunnable(Request request, Response response, Runnable runnable) { mRequest = request; mResponse = response; mRunnable = runnable; } @SuppressWarnings("unchecked") @Override public void run() { // If this request has canceled, finish it and don't deliver. if (mRequest.isCanceled()) { mRequest.finish("canceled-at-delivery"); return; } // Deliver a normal response or error, depending. if (mResponse.isSuccess()) { mRequest.deliverResponse(mResponse.result); } else { mRequest.deliverError(mResponse.error); } // If this is an intermediate response, add a marker, otherwise we're done // and the request can be finished. if (mResponse.intermediate) { mRequest.addMarker("intermediate-response"); } else { mRequest.finish("done"); } // If we have been provided a post-delivery runnable, run it. if (mRunnable != null) { mRunnable.run(); } } }
这里我们看到了request.deliverResponse被调用了,这个方法通常会回调Listener.onResponse。哈哈,到这里,整个volley框架的主线就看完了!读到这里,我真是由衷觉得google工程师牛逼啊!
if (request.isCanceled()) { request.finish("network-discard-cancelled"); continue; }
如果请求取消就调用Request#finish,finish方法内部将调用与之绑定的请求队列的finish方法,该方法内部会将请求对象在队列中移除。
public void stop() { if (mCacheDispatcher != null) { mCacheDispatcher.quit(); } for (int i = 0; i < mDispatchers.length; i++) { if (mDispatchers[i] != null) { mDispatchers[i].quit(); } } }
XXXDispatcher的quit方法会修改mQuit变量并调用interrupt使线程抛Interrupt异常,而Dispatcher捕获到异常后会判断mQuit变量最终while循环结束,线程退出。
catch (InterruptedException e) { // We may have been interrupted because it was time to quit. if (mQuit) { return; } continue; }
public ImageContainer get(String requestUrl, ImageListener imageListener, int maxWidth, int maxHeight) { ... final String cacheKey = getCacheKey(requestUrl, maxWidth, maxHeight); // Try to look up the request in the cache of remote images. Bitmap cachedBitmap = mCache.getBitmap(cacheKey); if (cachedBitmap != null) { // Return the cached bitmap. ImageContainer container = new ImageContainer(cachedBitmap, requestUrl, null, null); imageListener.onResponse(container, true); return container; } ... Request<?> newRequest = new ImageRequest(requestUrl, new Listener<Bitmap>() { @Override public void onResponse(Bitmap response) { onGetImageSuccess(cacheKey, response); } }, maxWidth, maxHeight, Config.RGB_565, new ErrorListener() { @Override public void onErrorResponse(VolleyError error) { onGetImageError(cacheKey, error); } }); mRequestQueue.add(newRequest); mInFlightRequests.put(cacheKey, new BatchedImageRequest(newRequest, imageContainer)); return imageContainer; }
Servlet#doPost/doGet() /*设置缓存*/ resp.setDateHeader("Last-Modified",System.currentTimeMillis()); resp.setDateHeader("Expires", System.currentTimeMillis()+10*1000*60); resp.setHeader("Cache-Control","max-age=10000"); resp.setHeader("Pragma","Pragma");
Cache-Control字段的优先级高于Expires。这个可以从HttpHeaderParser#parseCacheHeaders方法中看到。
public static Cache.Entry parseCacheHeaders(NetworkResponse response) { long now = System.currentTimeMillis(); Map<String, String> headers = response.headers; long serverDate = 0; long serverExpires = 0; long softExpire = 0; long maxAge = 0; boolean hasCacheControl = false; String serverEtag = null; String headerValue; headerValue = headers.get("Date"); if (headerValue != null) { serverDate = parseDateAsEpoch(headerValue); } headerValue = headers.get("Cache-Control"); if (headerValue != null) { hasCacheControl = true; String[] tokens = headerValue.split(","); for (int i = 0; i < tokens.length; i++) { String token = tokens[i].trim(); if (token.equals("no-cache") || token.equals("no-store")) { return null; } else if (token.startsWith("max-age=")) { try { maxAge = Long.parseLong(token.substring(8)); } catch (Exception e) { } } else if (token.equals("must-revalidate") || token.equals("proxy-revalidate")) { maxAge = 0; } } } headerValue = headers.get("Expires"); if (headerValue != null) { serverExpires = parseDateAsEpoch(headerValue); } serverEtag = headers.get("ETag"); // Cache-Control takes precedence over an Expires header, even if both exist and Expires // is more restrictive. if (hasCacheControl) { softExpire = now + maxAge * 1000; } else if (serverDate > 0 && serverExpires >= serverDate) { // Default semantic for Expire header in HTTP specification is softExpire. softExpire = now + (serverExpires - serverDate); } Cache.Entry entry = new Cache.Entry(); entry.data = response.data; entry.etag = serverEtag; entry.softTtl = softExpire; entry.ttl = entry.softTtl; entry.serverDate = serverDate; entry.responseHeaders = headers; return entry; }
这个方法是由Request子类的parseNetworkResponse方法调用的:
Response.success(parsed, HttpHeaderParser.parseCacheHeaders(response))
下面这幅图也很好:
转载于:https://www.cnblogs.com/wangzehuaw/p/5583919.html