⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 bgwriter.c

📁 PostgreSQL 8.1.4的源码 适用于Linux下的开源数据库系统
💻 C
📖 第 1 页 / 共 2 页
字号:
		 */		if (do_checkpoint)		{			/*			 * We will warn if (a) too soon since last checkpoint (whatever			 * caused it) and (b) somebody has set the ckpt_time_warn flag			 * since the last checkpoint start.  Note in particular that this			 * implementation will not generate warnings caused by			 * CheckPointTimeout < CheckPointWarning.			 */			if (BgWriterShmem->ckpt_time_warn &&				elapsed_secs < CheckPointWarning)				ereport(LOG,						(errmsg("checkpoints are occurring too frequently (%d seconds apart)",								elapsed_secs),						 errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));			BgWriterShmem->ckpt_time_warn = false;			/*			 * Indicate checkpoint start to any waiting backends.			 */			ckpt_active = true;			BgWriterShmem->ckpt_started++;			CreateCheckPoint(false, force_checkpoint);			/*			 * After any checkpoint, close all smgr files.	This is so we			 * won't hang onto smgr references to deleted files indefinitely.			 */			smgrcloseall();			/*			 * Indicate checkpoint completion to any waiting backends.			 */			BgWriterShmem->ckpt_done = BgWriterShmem->ckpt_started;			ckpt_active = false;			/*			 * Note we record the checkpoint start time not end time as			 * last_checkpoint_time.  This is so that time-driven checkpoints			 * happen at a predictable spacing.			 */			last_checkpoint_time = now;		}		else			BgBufferSync();		/*		 * Nap for the configured time, or sleep for 10 seconds if there is no		 * bgwriter activity configured.		 *		 * On some platforms, signals won't interrupt the sleep.  To ensure we		 * respond reasonably promptly when someone signals us, break down the		 * sleep into 1-second increments, and check for interrupts after each		 * nap.		 *		 * We absorb pending requests after each short sleep.		 */		if ((bgwriter_all_percent > 0.0 && bgwriter_all_maxpages > 0) ||			(bgwriter_lru_percent > 0.0 && bgwriter_lru_maxpages > 0))			udelay = BgWriterDelay * 1000L;		else			udelay = 10000000L;		while (udelay > 1000000L)		{			if (got_SIGHUP || checkpoint_requested || shutdown_requested)				break;			pg_usleep(1000000L);			AbsorbFsyncRequests();			udelay -= 1000000L;		}		if (!(got_SIGHUP || checkpoint_requested || shutdown_requested))			pg_usleep(udelay);	}}/* -------------------------------- *		signal handler routines * -------------------------------- *//* * bg_quickdie() occurs when signalled SIGQUIT by the postmaster. * * Some backend has bought the farm, * so we need to stop what we're doing and exit. */static voidbg_quickdie(SIGNAL_ARGS){	PG_SETMASK(&BlockSig);	/*	 * DO NOT proc_exit() -- we're here because shared memory may be	 * corrupted, so we don't want to try to clean up our transaction. Just	 * nail the windows shut and get out of town.	 *	 * Note we do exit(1) not exit(0).	This is to force the postmaster into a	 * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random	 * backend.  This is necessary precisely because we don't clean up our	 * shared memory state.	 */	exit(1);}/* SIGHUP: set flag to re-read config file at next convenient time */static voidBgSigHupHandler(SIGNAL_ARGS){	got_SIGHUP = true;}/* SIGINT: set flag to run a normal checkpoint right away */static voidReqCheckpointHandler(SIGNAL_ARGS){	checkpoint_requested = true;}/* SIGUSR2: set flag to run a shutdown checkpoint and exit */static voidReqShutdownHandler(SIGNAL_ARGS){	shutdown_requested = true;}/* -------------------------------- *		communication with backends * -------------------------------- *//* * BgWriterShmemSize *		Compute space needed for bgwriter-related shared memory */SizeBgWriterShmemSize(void){	Size		size;	/*	 * Currently, the size of the requests[] array is arbitrarily set equal to	 * NBuffers.  This may prove too large or small ...	 */	size = offsetof(BgWriterShmemStruct, requests);	size = add_size(size, mul_size(NBuffers, sizeof(BgWriterRequest)));	return size;}/* * BgWriterShmemInit *		Allocate and initialize bgwriter-related shared memory */voidBgWriterShmemInit(void){	bool		found;	BgWriterShmem = (BgWriterShmemStruct *)		ShmemInitStruct("Background Writer Data",						BgWriterShmemSize(),						&found);	if (BgWriterShmem == NULL)		ereport(FATAL,				(errcode(ERRCODE_OUT_OF_MEMORY),				 errmsg("not enough shared memory for background writer")));	if (found)		return;					/* already initialized */	MemSet(BgWriterShmem, 0, sizeof(BgWriterShmemStruct));	BgWriterShmem->max_requests = NBuffers;}/* * RequestCheckpoint *		Called in backend processes to request an immediate checkpoint * * If waitforit is true, wait until the checkpoint is completed * before returning; otherwise, just signal the request and return * immediately. * * If warnontime is true, and it's "too soon" since the last checkpoint, * the bgwriter will log a warning.  This should be true only for checkpoints * caused due to xlog filling, else the warning will be misleading. */voidRequestCheckpoint(bool waitforit, bool warnontime){	/* use volatile pointer to prevent code rearrangement */	volatile BgWriterShmemStruct *bgs = BgWriterShmem;	sig_atomic_t old_failed = bgs->ckpt_failed;	sig_atomic_t old_started = bgs->ckpt_started;	/*	 * If in a standalone backend, just do it ourselves.	 */	if (!IsPostmasterEnvironment)	{		CreateCheckPoint(false, true);		/*		 * After any checkpoint, close all smgr files.	This is so we won't		 * hang onto smgr references to deleted files indefinitely.		 */		smgrcloseall();		return;	}	/* Set warning request flag if appropriate */	if (warnontime)		bgs->ckpt_time_warn = true;	/*	 * Send signal to request checkpoint.  When waitforit is false, we	 * consider failure to send the signal to be nonfatal.	 */	if (BgWriterShmem->bgwriter_pid == 0)		elog(waitforit ? ERROR : LOG,			 "could not request checkpoint because bgwriter not running");	if (kill(BgWriterShmem->bgwriter_pid, SIGINT) != 0)		elog(waitforit ? ERROR : LOG,			 "could not signal for checkpoint: %m");	/*	 * If requested, wait for completion.  We detect completion according to	 * the algorithm given above.	 */	if (waitforit)	{		while (bgs->ckpt_started == old_started)		{			CHECK_FOR_INTERRUPTS();			pg_usleep(100000L);		}		old_started = bgs->ckpt_started;		/*		 * We are waiting for ckpt_done >= old_started, in a modulo sense.		 * This is a little tricky since we don't know the width or signedness		 * of sig_atomic_t.  We make the lowest common denominator assumption		 * that it is only as wide as "char".  This means that this algorithm		 * will cope correctly as long as we don't sleep for more than 127		 * completed checkpoints.  (If we do, we will get another chance to		 * exit after 128 more checkpoints...)		 */		while (((signed char) (bgs->ckpt_done - old_started)) < 0)		{			CHECK_FOR_INTERRUPTS();			pg_usleep(100000L);		}		if (bgs->ckpt_failed != old_failed)			ereport(ERROR,					(errmsg("checkpoint request failed"),					 errhint("Consult the server log for details.")));	}}/* * ForwardFsyncRequest *		Forward a file-fsync request from a backend to the bgwriter * * Whenever a backend is compelled to write directly to a relation * (which should be seldom, if the bgwriter is getting its job done), * the backend calls this routine to pass over knowledge that the relation * is dirty and must be fsync'd before next checkpoint. * * If we are unable to pass over the request (at present, this can happen * if the shared memory queue is full), we return false.  That forces * the backend to do its own fsync.  We hope that will be even more seldom. * * Note: we presently make no attempt to eliminate duplicate requests * in the requests[] queue.  The bgwriter will have to eliminate dups * internally anyway, so we may as well avoid holding the lock longer * than we have to here. */boolForwardFsyncRequest(RelFileNode rnode, BlockNumber segno){	BgWriterRequest *request;	if (!IsUnderPostmaster)		return false;			/* probably shouldn't even get here */	Assert(BgWriterShmem != NULL);	LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);	if (BgWriterShmem->bgwriter_pid == 0 ||		BgWriterShmem->num_requests >= BgWriterShmem->max_requests)	{		LWLockRelease(BgWriterCommLock);		return false;	}	request = &BgWriterShmem->requests[BgWriterShmem->num_requests++];	request->rnode = rnode;	request->segno = segno;	LWLockRelease(BgWriterCommLock);	return true;}/* * AbsorbFsyncRequests *		Retrieve queued fsync requests and pass them to local smgr. * * This is exported because it must be called during CreateCheckPoint; * we have to be sure we have accepted all pending requests *after* we * establish the checkpoint REDO pointer.  Since CreateCheckPoint * sometimes runs in non-bgwriter processes, do nothing if not bgwriter. */voidAbsorbFsyncRequests(void){	BgWriterRequest *requests = NULL;	BgWriterRequest *request;	int			n;	if (!am_bg_writer)		return;	/*	 * We have to PANIC if we fail to absorb all the pending requests (eg,	 * because our hashtable runs out of memory).  This is because the system	 * cannot run safely if we are unable to fsync what we have been told to	 * fsync.  Fortunately, the hashtable is so small that the problem is	 * quite unlikely to arise in practice.	 */	START_CRIT_SECTION();	/*	 * We try to avoid holding the lock for a long time by copying the request	 * array.	 */	LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);	n = BgWriterShmem->num_requests;	if (n > 0)	{		requests = (BgWriterRequest *) palloc(n * sizeof(BgWriterRequest));		memcpy(requests, BgWriterShmem->requests, n * sizeof(BgWriterRequest));	}	BgWriterShmem->num_requests = 0;	LWLockRelease(BgWriterCommLock);	for (request = requests; n > 0; request++, n--)		RememberFsyncRequest(request->rnode, request->segno);	if (requests)		pfree(requests);	END_CRIT_SECTION();}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -