📄 ssl_scache_shmcb.c
字号:
"session_id[0]=%u, masked index=%u", session_id[0], masked_index); if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_store_session internal error"); return FALSE; } /* Serialise the session, work out how much we're dealing * with. NB: This check could be removed if we're not paranoid * or we find some assurance that it will never be necessary. */ len_encoded = i2d_SSL_SESSION(pSession, NULL); if (len_encoded > SSL_SESSION_MAX_DER) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "session is too big (%u bytes)", len_encoded); return FALSE; } ptr_encoded = encoded; len_encoded = i2d_SSL_SESSION(pSession, &ptr_encoded); expiry_time = timeout; if (!shmcb_insert_encoded_session(s, &queue, &cache, encoded, len_encoded, session_id, expiry_time)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "can't store a session!"); return FALSE; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "leaving shmcb_store successfully"); header->num_stores++; return TRUE;}static SSL_SESSION *shmcb_retrieve_session( server_rec *s, void *shm_segment, UCHAR *id, int idlen){ SHMCBHeader *header; SHMCBQueue queue; SHMCBCache cache; unsigned char masked_index; SSL_SESSION *pSession; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "inside shmcb_retrieve_session"); if (idlen < 2) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short session_id provided " "(%u bytes)", idlen); return FALSE; } /* Get the header structure, which division this session lookup * will come from etc. */ shmcb_get_header(shm_segment, &header); masked_index = id[0] & header->division_mask; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "id[0]=%u, masked index=%u", id[0], masked_index); if (!shmcb_get_division(header, &queue, &cache, (unsigned int) masked_index)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_retrieve_session internal error"); header->num_retrieves_miss++; return FALSE; } /* Get the session corresponding to the session_id or NULL if it * doesn't exist (or is flagged as "removed"). */ pSession = shmcb_lookup_session_id(s, &queue, &cache, id, idlen); if (pSession) header->num_retrieves_hit++; else header->num_retrieves_miss++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "leaving shmcb_retrieve_session"); return pSession;}static BOOL shmcb_remove_session( server_rec *s, void *shm_segment, UCHAR *id, int idlen){ SHMCBHeader *header; SHMCBQueue queue; SHMCBCache cache; unsigned char masked_index; BOOL res; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "inside shmcb_remove_session"); if (id == NULL) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "remove called with NULL session_id!"); return FALSE; } /* Get the header structure, which division this session remove * will happen in etc. */ shmcb_get_header(shm_segment, &header); masked_index = id[0] & header->division_mask; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "id[0]=%u, masked index=%u", id[0], masked_index); if (!shmcb_get_division(header, &queue, &cache, (unsigned int)masked_index)) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "shmcb_remove_session, internal error"); header->num_removes_miss++; return FALSE; } res = shmcb_remove_session_id(s, &queue, &cache, id, idlen); if (res) header->num_removes_hit++; else header->num_removes_miss++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "leaving shmcb_remove_session"); return res;}/***** Weirdo cyclic buffer functions***//* This gets used in the cyclic "index array" (in the 'Queue's) and * in the cyclic 'Cache's too ... you provide the "width" of the * cyclic store, the starting position and how far to move (with * wrapping if necessary). Basically it's addition modulo buf_size. */static unsigned int shmcb_cyclic_increment( unsigned int buf_size, unsigned int start_pos, unsigned int to_add){ start_pos += to_add; while (start_pos >= buf_size) start_pos -= buf_size; return start_pos;}/* Given two positions in a cyclic buffer, calculate the "distance". * This is to cover the case ("non-trivial") where the 'next' offset * is to the left of the 'start' offset. NB: This calculates the * space inclusive of one end-point but not the other. There is an * ambiguous case (which is why we use the <start_pos,offset> * coordinate system rather than <start_pos,end_pos> one) when 'start' * is the same as 'next'. It could indicate the buffer is full or it * can indicate the buffer is empty ... I choose the latter as it's * easier and usually necessary to check if the buffer is full anyway * before doing incremental logic (which is this useful for), but we * definitely need the empty case handled - in fact it's our starting * state!! */static unsigned int shmcb_cyclic_space( unsigned int buf_size, unsigned int start_offset, unsigned int next_offset){ /* Is it the trivial case? */ if (start_offset <= next_offset) return (next_offset - start_offset); /* yes */ else return ((buf_size - start_offset) + next_offset); /* no */}/* A "normal-to-cyclic" memcpy ... this takes a linear block of * memory and copies it onto a cyclic buffer. The purpose and * function of this is pretty obvious, you need to cover the case * that the destination (cyclic) buffer has to wrap round. */static void shmcb_cyclic_ntoc_memcpy( unsigned int buf_size, unsigned char *data, unsigned int dest_offset, unsigned char *src, unsigned int src_len){ /* Cover the case that src_len > buf_size */ if (src_len > buf_size) src_len = buf_size; /* Can it be copied all in one go? */ if (dest_offset + src_len < buf_size) /* yes */ memcpy(data + dest_offset, src, src_len); else { /* no */ memcpy(data + dest_offset, src, buf_size - dest_offset); memcpy(data, src + buf_size - dest_offset, src_len + dest_offset - buf_size); } return;}/* A "cyclic-to-normal" memcpy ... given the last function, this * one's purpose is clear, it copies out of a cyclic buffer handling * wrapping. */static void shmcb_cyclic_cton_memcpy( unsigned int buf_size, unsigned char *dest, unsigned char *data, unsigned int src_offset, unsigned int src_len){ /* Cover the case that src_len > buf_size */ if (src_len > buf_size) src_len = buf_size; /* Can it be copied all in one go? */ if (src_offset + src_len < buf_size) /* yes */ memcpy(dest, data + src_offset, src_len); else { /* no */ memcpy(dest, data + src_offset, buf_size - src_offset); memcpy(dest + buf_size - src_offset, data, src_len + src_offset - buf_size); } return;}/* Here's the cool hack that makes it all work ... by simply * making the first collection of bytes *be* our header structure * (casting it into the C structure), we have the perfect way to * maintain state in a shared-memory session cache from one call * (and process) to the next, use the shared memory itself! The * original mod_ssl shared-memory session cache uses variables * inside the context, but we simply use that for storing the * pointer to the shared memory itself. And don't forget, after * Apache's initialisation, this "header" is constant/read-only * so we can read it outside any locking. * <grin> - sometimes I just *love* coding y'know?! */static void shmcb_get_header(void *shm_mem, SHMCBHeader **header){ *header = (SHMCBHeader *)shm_mem; return;}/* This is what populates our "interesting" structures. Given a * pointer to the header, and an index into the appropriate * division (this must have already been masked using the * division_mask by the caller!), we can populate the provided * SHMCBQueue and SHMCBCache structures with values and * pointers to the underlying shared memory. Upon returning * (if not FALSE), the caller can meddle with the pointer * values and they will map into the shared-memory directly, * as such there's no need to "free" or "set" the Queue or * Cache values, they were themselves references to the *real* * data. */static BOOL shmcb_get_division( SHMCBHeader *header, SHMCBQueue *queue, SHMCBCache *cache, unsigned int idx){ unsigned char *pQueue; unsigned char *pCache; /* bounds check */ if (idx > (unsigned int) header->division_mask) return FALSE; /* Locate the blocks of memory storing the corresponding data */ pQueue = ((unsigned char *) header) + header->division_offset + (idx * header->division_size); pCache = pQueue + header->queue_size; /* Populate the structures with appropriate pointers */ queue->first_pos = (unsigned int *) pQueue; /* Our structures stay packed, no matter what the system's * data-alignment regime is. */ queue->pos_count = (unsigned int *) (pQueue + sizeof(unsigned int)); queue->indexes = (SHMCBIndex *) (pQueue + (2 * sizeof(unsigned int))); cache->first_pos = (unsigned int *) pCache; cache->pos_count = (unsigned int *) (pCache + sizeof(unsigned int)); cache->data = (unsigned char *) (pCache + (2 * sizeof(unsigned int))); queue->header = cache->header = header; return TRUE;}/* This returns a pointer to the piece of shared memory containing * a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed * width non-referencing structure of primitive types that can be * cast onto the corresponding block of shared memory. Thus, by * returning a cast pointer to that section of shared memory, the * caller can read and write values to and from the "structure" and * they are actually reading and writing the underlying shared * memory. */static SHMCBIndex *shmcb_get_index( const SHMCBQueue *queue, unsigned int idx){ /* bounds check */ if (idx > queue->header->index_num) return NULL; /* Return a pointer to the index. NB: I am being horribly pendantic * here so as to avoid any potential data-alignment assumptions being * placed on the pointer arithmetic by the compiler (sigh). */ return (SHMCBIndex *)(((unsigned char *) queue->indexes) + (idx * sizeof(SHMCBIndex)));}/* This functions rolls expired cache (and index) entries off the front * of the cyclic buffers in a division. The function returns the number * of expired sessions. */static unsigned int shmcb_expire_division( server_rec *s, SHMCBQueue *queue, SHMCBCache *cache){ SHMCBIndex *idx; time_t now; unsigned int loop, index_num, pos_count, new_pos; SHMCBHeader *header; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "entering shmcb_expire_division"); /* We must calculate num and space ourselves based on expiry times. */ now = time(NULL); loop = 0; new_pos = shmcb_get_safe_uint(queue->first_pos); /* Cache useful values */ header = queue->header; index_num = header->index_num; pos_count = shmcb_get_safe_uint(queue->pos_count); while (loop < pos_count) { idx = shmcb_get_index(queue, new_pos); if (shmcb_get_safe_time(&(idx->expires)) > now) /* it hasn't expired yet, we're done iterating */ break; /* This one should be expired too. Shift to the next entry. */ loop++; new_pos = shmcb_cyclic_increment(index_num, new_pos, 1); } /* Find the new_offset and make the expiries happen. */ if (loop > 0) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "will be expiring %u sessions", loop); /* We calculate the new_offset by "peeking" (or in the * case it's the last entry, "sneaking" ;-). */ if (loop == pos_count) { /* We are expiring everything! This is easy to do... */ shmcb_set_safe_uint(queue->pos_count, 0); shmcb_set_safe_uint(cache->pos_count, 0);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -