⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 layoutmanager.h

📁 nandflash文件系统源代码
💻 H
📖 第 1 页 / 共 2 页
字号:
//---------------------------------------------------------- -*- Mode: C++ -*-// $Id: LayoutManager.h 214 2008-11-05 22:15:43Z sriramsrao $ //// Created 2006/06/06// Author: Sriram Rao//// Copyright 2008 Quantcast Corp.// Copyright 2006-2008 Kosmix Corp.//// This file is part of Kosmos File System (KFS).//// Licensed under the Apache License, Version 2.0// (the "License"); you may not use this file except in compliance with// the License. You may obtain a copy of the License at//// http://www.apache.org/licenses/LICENSE-2.0//// Unless required by applicable law or agreed to in writing, software// distributed under the License is distributed on an "AS IS" BASIS,// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or// implied. See the License for the specific language governing// permissions and limitations under the License.//// \file LayoutManager.h// \brief Layout manager is responsible for laying out chunks on chunk// servers.  Model is that, when a chunkserver connects to the meta// server, the layout manager gets notified; the layout manager then// uses the chunk server for data placement.////----------------------------------------------------------------------------#ifndef META_LAYOUTMANAGER_H#define META_LAYOUTMANAGER_H#include <map>#include <tr1/unordered_map>#include <vector>#include <set>#include <sstream>#include "kfstypes.h"#include "meta.h"#include "queue.h"#include "ChunkServer.h"#include "LeaseCleaner.h"#include "ChunkReplicator.h"#include "libkfsIO/Counter.h"namespace KFS{	///	/// For disk space utilization balancing, we say that a server	/// is "under utilized" if is below XXX% full; we say that a server	/// is "over utilized" if it is above YYY% full.  For rebalancing, we	/// move data from servers that are over-utilized to servers that are	/// under-utilized.  These #'s are intentionally set conservatively; we	/// don't want the system to constantly move stuff between nodes when	/// there isn't much to be gained by it.	///	const float MIN_SERVER_SPACE_UTIL_THRESHOLD = 0.3;	const float MAX_SERVER_SPACE_UTIL_THRESHOLD = 0.9;	/// Model for leases: metaserver assigns write leases to chunkservers;	/// clients/chunkservers can grab read lease on a chunk at any time.	/// The server will typically renew unexpired leases whenever asked.	/// As long as the lease is valid, server promises not to chnage	/// the lease's version # (also, chunk won't disappear as long as	/// lease is valid).	struct LeaseInfo {		LeaseInfo(LeaseType t, int64_t i): leaseType(t), leaseId(i)		{			time(&expires);			// default lease time of 1 min			expires += LEASE_INTERVAL_SECS;		}		LeaseInfo(LeaseType t, int64_t i, ChunkServerPtr &c):			leaseType(t), leaseId(i), chunkServer(c)		{			time(&expires);			// default lease time of 1 min			expires += LEASE_INTERVAL_SECS;		}		static bool IsValidLease(const LeaseInfo &l)		{			time_t now = time(0);			return now <= l.expires;		}		static bool IsValidWriteLease(const LeaseInfo &l)		{			return (l.leaseType == WRITE_LEASE) &&				IsValidLease(l);		}		LeaseType leaseType;		int64_t leaseId;		// set for a write lease		ChunkServerPtr chunkServer;		time_t expires;	};	// Given a chunk-id, where is stored and who has the lease(s)	struct ChunkPlacementInfo {		ChunkPlacementInfo() :			fid(-1), ongoingReplications(0) { }		// For cross-validation, we store the fid here.  This		// is also useful during re-replication: given a chunk, we		// can get its fid and from all the attributes of the file		fid_t fid;		/// is this chunk being (re) replicated now?  if so, how many		int ongoingReplications;		std::vector<ChunkServerPtr> chunkServers;		std::vector<LeaseInfo> chunkLeases;	};	// To support rack-aware placement, we need an estimate of how much	// space is available on each given rack.  Once the set of candidate	// racks are ordered, we walk down the sorted list to pick the	// desired # of servers.  For ordering purposes, we track how much space	// each machine on the rack exports and how much space we have parceled	// out; this gives us an estimate of availble space (we are	// over-counting because the space we parcel out may not be fully used).	class RackInfo {		uint32_t mRackId;		uint64_t mTotalSpace;		uint64_t mAllocSpace;		// set of servers on this rack		std::vector<ChunkServerPtr> mServers;	public:		RackInfo(int id) : mRackId(id), mTotalSpace(0), mAllocSpace(0) { }		inline uint32_t id() const {			return mRackId;		}		void clear() {			mTotalSpace = mAllocSpace = 0;		}		void addServer(ChunkServerPtr &s) {			mTotalSpace += s->GetTotalSpace();			mAllocSpace += s->GetUsedSpace();			mServers.push_back(s);		}		void removeServer(ChunkServer *server) {			std::vector <ChunkServerPtr>::iterator iter;			iter = find_if(mServers.begin(), mServers.end(),					ChunkServerMatcher(server));			if (iter == mServers.end())				return;			mTotalSpace -= server->GetTotalSpace();			mAllocSpace -= server->GetUsedSpace();			mServers.erase(iter);		}		void computeSpace() {			clear();			for(std::vector<ChunkServerPtr>::iterator iter = mServers.begin();				iter != mServers.end(); iter++) {				ChunkServerPtr s = *iter;				mTotalSpace += s->GetTotalSpace();				mAllocSpace += s->GetUsedSpace();			}		}		const std::vector<ChunkServerPtr> &getServers() {			return mServers;		}		uint64_t availableSpace() const {			if (mTotalSpace < mAllocSpace)				// paranoia...				return 0;			return mTotalSpace - mAllocSpace;		}		// want to sort in decreasing order so that racks with more		// space are at the head of list (and so, a node from them will		// get chosen).		bool operator < (const RackInfo &other) const {			uint64_t mine;			mine = availableSpace();			if (mine == 0)				return false;			return mine < other.availableSpace();		}	};	// Functor to enable matching of a rack-id with a RackInfo	class RackMatcher {		uint32_t id;	public:		RackMatcher(uint32_t rackId) : id(rackId) { }		bool operator()(const RackInfo &rack) const {			return rack.id() == id;		}	};	// chunkid to server(s) map	typedef std::map <chunkId_t, ChunkPlacementInfo > CSMap;	typedef std::map <chunkId_t, ChunkPlacementInfo >::const_iterator CSMapConstIter;	typedef std::map <chunkId_t, ChunkPlacementInfo >::iterator CSMapIter;#if 0	typedef std::tr1::unordered_map <chunkId_t, ChunkPlacementInfo > CSMap;	typedef std::tr1::unordered_map <chunkId_t, ChunkPlacementInfo >::const_iterator CSMapConstIter;	typedef std::tr1::unordered_map <chunkId_t, ChunkPlacementInfo >::iterator CSMapIter;#endif	// candidate set of chunks whose replication needs checking	typedef std::set <chunkId_t> CRCandidateSet;	typedef std::set <chunkId_t>::iterator CRCandidateSetIter;	//	// For maintenance reasons, we'd like to schedule downtime for a server.	// When the server is taken down, a promise is made---the server will go	// down now and come back up by a specified time. During this window, we	// are willing to tolerate reduced # of copies for a block.  Now, if the	// server doesn't come up by the promised time, the metaserver will	// initiate re-replication of blocks on that node.  This ability allows	// us to schedule downtime on a node without having to incur the	// overhead of re-replication.	//	struct HibernatingServerInfo_t {		// the server we put in hibernation		ServerLocation location;		// the blocks on this server		CRCandidateSet blocks;		// when is it likely to wake up		time_t sleepEndTime;	};        ///        /// LayoutManager is responsible for write allocation:        /// it determines where to place a chunk based on metrics such as,        /// which server has the most space, etc.  Will eventually be        /// extend this model to include replication.        ///        /// Allocating space for a chunk is a 3-way communication:        ///  1. Client sends a request to the meta server for        /// allocation        ///  2. Meta server picks a chunkserver to hold the chunk and        /// then sends an RPC to that chunkserver to create a chunk.        ///  3. The chunkserver creates a chunk and replies to the        /// meta server's RPC.        ///  4. Finally, the metaserver logs the allocation request        /// and then replies to the client.        ///        /// In this model, the layout manager picks the chunkserver        /// location and queues the RPC to the chunkserver.  All the        /// communication is handled by a thread in NetDispatcher        /// which picks up the RPC request and sends it on its merry way.        ///	class LayoutManager {	public:		LayoutManager();		virtual ~LayoutManager() { }                /// A new chunk server has joined and sent a HELLO message.                /// Use it to configure information about that server                /// @param[in] r  The MetaHello request sent by the                /// new chunk server.		void AddNewServer(MetaHello *r);                /// Our connection to a chunkserver went down.  So,                /// for all chunks hosted on this server, update the                /// mapping table to indicate that we can't                /// get to the data.                /// @param[in] server  The server that is down		void ServerDown(ChunkServer *server);		/// A server is being taken down: if downtime is > 0, it is a		/// value in seconds that specifies the time interval within		/// which the server will connect back.  If it doesn't connect		/// within that interval, the server is assumed to be down and		/// re-replication will start.		int RetireServer(const ServerLocation &loc, int downtime);                /// Allocate space to hold a chunk on some                /// chunkserver.                /// @param[in] r The request associated with the                /// write-allocation call.                /// @retval 0 on success; -1 on failure		int AllocateChunk(MetaAllocate *r);		/// A chunkid has been previously allocated.  The caller		/// is trying to grab the write lease on the chunk. If a valid		/// lease exists, we return it; otherwise, we assign a new lease,		/// bump the version # for the chunk and notify the caller.		///                /// @param[in] r The request associated with the                /// write-allocation call.		/// @param[out] isNewLease  True if a new lease has been		/// issued, which tells the caller that a version # bump		/// for the chunk has been done.                /// @retval status code		int GetChunkWriteLease(MetaAllocate *r, bool &isNewLease);                /// Delete a chunk on the server that holds it.                /// @param[in] chunkId The id of the chunk being deleted		void DeleteChunk(chunkId_t chunkId);                /// A chunkserver is notifying us that a chunk it has is		/// corrupt; so update our tables to reflect that the chunk isn't		/// hosted on that chunkserver any more; re-replication will take		/// care of recovering that chunk.                /// @param[in] r  The request that describes the corrupted chunk		void ChunkCorrupt(MetaChunkCorrupt *r);                /// Truncate a chunk to the desired size on the server that holds it.                /// @param[in] chunkId The id of the chunk being                /// truncated		/// @param[in] sz    The size to which the should be                /// truncated to.		void TruncateChunk(chunkId_t chunkId, off_t sz);		/// Handlers to acquire and renew leases.  Unexpired leases		/// will typically be renewed.		int GetChunkReadLease(MetaLeaseAcquire *r);		int LeaseRenew(MetaLeaseRenew *r);		/// Handler to let a lease owner relinquish a lease.		int LeaseRelinquish(MetaLeaseRelinquish *r);		/// Is a valid lease issued on any of the chunks in the		/// vector of MetaChunkInfo's?		bool IsValidLeaseIssued(const std::vector <MetaChunkInfo *> &c);                /// Add a mapping from chunkId -> server.                /// @param[in] chunkId  chunkId that has been stored                /// on server c                /// @param[in] fid  fileId associated with this chunk.                /// @param[in] c   server that stores chunk chunkId.                ///   If c == NULL, then, we update the table to                /// reflect chunk allocation; whenever chunk servers                /// start up and tell us what chunks they have, we                /// line things up and see which chunk is stored where.		void AddChunkToServerMapping(chunkId_t chunkId, fid_t fid, ChunkServer *c);		/// Remove the mappings for a chunk.                /// @param[in] chunkId  chunkId for which mapping needs to be nuked.		void RemoveChunkToServerMapping(chunkId_t chunkId);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -