📄 terracottasessionmanager.java
字号:
// ========================================================================// Copyright 2004-2008 Mort Bay Consulting Pty. Ltd.// ------------------------------------------------------------------------// Licensed under the Apache License, Version 2.0 (the "License");// you may not use this file except in compliance with the License.// You may obtain a copy of the License at// http://www.apache.org/licenses/LICENSE-2.0// Unless required by applicable law or agreed to in writing, software// distributed under the License is distributed on an "AS IS" BASIS,// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.// See the License for the specific language governing permissions and// limitations under the License.// ========================================================================package org.mortbay.terracotta.servlet;import java.util.Collections;import java.util.Enumeration;import java.util.HashMap;import java.util.HashSet;import java.util.Hashtable;import java.util.Map;import java.util.Set;import java.util.concurrent.Executors;import java.util.concurrent.ScheduledExecutorService;import java.util.concurrent.ScheduledFuture;import java.util.concurrent.TimeUnit;import javax.servlet.http.Cookie;import javax.servlet.http.HttpServletRequest;import javax.servlet.http.HttpSession;import com.tc.object.bytecode.Manageable;import com.tc.object.bytecode.Manager;import com.tc.object.bytecode.ManagerUtil;import org.mortbay.jetty.Request;import org.mortbay.jetty.handler.ContextHandler;import org.mortbay.jetty.servlet.AbstractSessionManager;import org.mortbay.log.Log;/** * A specialized SessionManager to be used with <a href="http://www.terracotta.org">Terracotta</a>. * <br /> * <h3>IMPLEMENTATION NOTES</h3> * <h4>Requirements</h4> * This implementation of the session management requires J2SE 5 or superior. * <h4>Use of Hashtable</h4> * In Terracotta, collections classes are * <a href="http://www.terracotta.org/web/display/docs/Concept+and+Architecture+Guide">logically managed</a> * and we need two levels of locking: a local locking to handle concurrent requests on the same node * and a distributed locking to handle concurrent requests on different nodes. * Natively synchronized classes such as Hashtable fit better than synchronized wrappers obtained via, for * example, {@link Collections#synchronizedMap(Map)}. This is because Terracotta may replay the method call * on the inner unsynchronized collection without invoking the external wrapper, so the synchronization will * be lost. Natively synchronized collections does not have this problem. * <h4>Use of Hashtable as a Set</h4> * There is no natively synchronized Set implementation, so we use Hashtable instead, see * {@link TerracottaSessionIdManager}. * However, we don't map the session id to itself, because Strings are treated specially by Terracotta, * causing more traffic to the Terracotta server. Instead we use the same pattern used in the implementation * of <code>java.util.HashSet</code>: use a single shared object to indicate the presence of a key. * This is necessary since Hashtable does not allow null values. * <h4>Sessions expiration map</h4> * In order to scavenge expired sessions, we need a way to know if they are expired. This information * is normally held in the session itself via the <code>lastAccessedTime</code> property. * However, we would need to iterate over all sessions to check if each one is expired, and this migrates * all sessions to the node, causing a lot of unneeded traffic between nodes and the Terracotta server. * To avoid this, we keep a separate map from session id to expiration time, so we only need to migrate * all the expirations times to see if a session is expired or not. * <h4>Update of lastAccessedTime</h4> * As a performance improvement, the lastAccessedTime is updated only periodically, and not every time * a request enters a node. This optimization allows applications that have frequent requests but less * frequent accesses to the session to perform better, because the traffic between the node and the * Terracotta server is reduced. The update period is the scavenger period, see {@link Session#access(long)}. * <h4>Terracotta lock id</h4> * The Terracotta lock id is based on the session id, but this alone is not sufficient, as there may be * two sessions with the same id for two different contexts. So we need session id and context path. * However, this also is not enough, as we may have the rare case of the same webapp mapped to two different * virtual hosts, and each virtual host must have a different session object. * Therefore the lock id we need to use is a combination of session id, context path and virtual host, see * {@link #newLockId(String)}. * * @see TerracottaSessionIdManager */public class TerracottaSessionManager extends AbstractSessionManager implements Runnable{ /** * The local cache of session objects. */ private Map<String, Session> _sessions; /** * The distributed shared SessionData map. * Putting objects into the map result in the objects being sent to Terracotta, and any change * to the objects are also replicated, recursively. * Getting objects from the map result in the objects being fetched from Terracotta. */ private Hashtable<String, SessionData> _sessionDatas; /** * The distributed shared session expirations map, needed for scavenging. * In particular it supports removal of sessions that have been orphaned by nodeA * (for example because it crashed) by virtue of scavenging performed by nodeB. */ private Hashtable<String, MutableLong> _sessionExpirations; private String _contextPath; private String _virtualHost; private long _scavengePeriodMs = 30000; private ScheduledExecutorService _scheduler; private ScheduledFuture<?> _scavenger; public void doStart() throws Exception { super.doStart(); _contextPath = canonicalize(_context.getContextPath()); _virtualHost = virtualHostFrom(_context); _sessions = Collections.synchronizedMap(new HashMap<String, Session>()); _sessionDatas = newSharedMap("sessionData:" + _contextPath + ":" + _virtualHost); _sessionExpirations = newSharedMap("sessionExpirations:" + _contextPath + ":" + _virtualHost); _scheduler = Executors.newSingleThreadScheduledExecutor(); scheduleScavenging(); } private Hashtable newSharedMap(String name) { // We want to partition the session data among contexts, so we need to have different roots for // different contexts, and each root must have a different name, since roots with the same name are shared. Lock.lock(name); try { // We need a synchronized data structure to have node-local synchronization. // We use Hashtable because it is a natively synchronized collection that behaves // better in Terracotta than synchronized wrappers obtained with Collections.synchronized*(). Hashtable result = (Hashtable)ManagerUtil.lookupOrCreateRootNoDepth(name, new Hashtable()); ((Manageable)result).__tc_managed().disableAutoLocking(); return result; } finally { Lock.unlock(name); } } private void scheduleScavenging() { if (_scavenger != null) { _scavenger.cancel(false); _scavenger = null; } long scavengePeriod = getScavengePeriodMs(); if (scavengePeriod > 0 && _scheduler != null) _scavenger = _scheduler.scheduleWithFixedDelay(this, scavengePeriod, scavengePeriod, TimeUnit.MILLISECONDS); } public void doStop() throws Exception { if (_scavenger != null) _scavenger.cancel(true); if (_scheduler != null) _scheduler.shutdownNow(); super.doStop(); } public void run() { scavenge(); } public void enter(Request request) { /** * SESSION LOCKING * This is an entry point for session locking. * We arrive here at the beginning of every request */ String requestedSessionId = request.getRequestedSessionId(); HttpSession session = request.getSession(false); Log.debug("Entering, requested session id {}, session id {}", requestedSessionId, session == null ? null : getClusterId(session)); if (requestedSessionId == null) { // The request does not have a session id, do not lock. // If the session, later in the request, is created by the user, // it will be locked when it will be created } else { // We lock anyway with the requested session id. // The requested session id may not be a valid one, // for example because the session expired. // If the user creates a new session, it will have // a different session id and that also will be locked. enter(getIdManager().getClusterId(requestedSessionId)); } } protected void enter(String clusterId) { Lock.lock(newLockId(clusterId)); Log.debug("Entered, session id {}", clusterId); } protected boolean tryEnter(String clusterId) { return Lock.tryLock(newLockId(clusterId)); } public void exit(Request request) { /** * SESSION LOCKING * This is an exit point for session locking. * We arrive here at the end of every request */ String requestedSessionId = request.getRequestedSessionId(); HttpSession session = request.getSession(false); Log.debug("Exiting, requested session id {}, session id {}", requestedSessionId, session == null ? null : getClusterId(session)); if (requestedSessionId == null) { if (session == null) { // No session has been created in the request, just return } else { // A new session has been created by the user, unlock it exit(getClusterId(session)); } } else { // There was a requested session id, and we locked it, so here release it String requestedClusterId = getIdManager().getClusterId(requestedSessionId); exit(requestedClusterId); if (session != null) { if (!requestedClusterId.equals(getClusterId(session))) { // The requested session id was invalid, and a // new session has been created by the user with // a different session id, unlock it exit(getClusterId(session)); } } } } protected void exit(String clusterId) { Lock.unlock(newLockId(clusterId)); Log.debug("Exited, session id {}", clusterId); } protected void addSession(AbstractSessionManager.Session session) { /** * SESSION LOCKING * When this method is called, we already hold the session lock. * See {@link #newSession(HttpServletRequest)} */ String clusterId = getClusterId(session); Session tcSession = (Session)session; SessionData sessionData = tcSession.getSessionData(); _sessionExpirations.put(clusterId, sessionData._expiration); _sessionDatas.put(clusterId, sessionData); _sessions.put(clusterId, tcSession); Log.debug("Added session {} with id {}", tcSession, clusterId); } @Override public Cookie access(HttpSession session, boolean secure) { Cookie cookie = super.access(session, secure); Log.debug("Accessed session {} with id {}", session, session.getId()); return cookie; } @Override public void complete(HttpSession session) { super.complete(session); Log.debug("Completed session {} with id {}", session, session.getId()); } protected void removeSession(String clusterId) { /** * SESSION LOCKING * When this method is called, we already hold the session lock. * Either the scavenger acquired it, or the user invalidated * the existing session and thus {@link #enter(String)} was called. */ // Remove locally cached session Session session = _sessions.remove(clusterId); Log.debug("Removed session {} with id {}", session, clusterId);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -