+ Possible Process Management Modes
+
+
+
+
+ Static
+
+ All children are pre-forked at startup.
+
+
+
+
+ Dynamic
+ child processes are spawned on demand.
+ number of idle child processes at any time depends on
+ min_spare_children and max_spare_children
+
+
+
+
+
+
+
+
+ Default is Static.
+
+
+
+ process_management_mode is not available prior to
+ Pgpool-II V4.4.
+
+
+
+
+
+
+ process_management_strategy (enum)
+
+ process_management_strategy configuration parameter
+
+
+
+
+ Specify the process management strategy to satisfy spare (idle) processes count
+ Valid options:
+
+ Possible Process Management Strategies
+
+
+
+
+ Lazy
+ With this strategy the scale down is performed gradually
+ and only gets triggered when excessive spare processes count
+ remains high for more than 5 mins
+
+
+
+
+ Gentle
+ With this strategy the scale down is performed gradually
+ and only gets triggered when excessive spare processes count
+ remains high for more than 2 mins
+
+
+
+
+ Aggressive
+ With this strategy the scale down is performed aggressively
+ and gets triggered more frequently in case of higher spare processes.
+ This mode uses faster and slightly less smart process selection criteria
+ to identify the child processes that can be serviced to satisfy
+ max_spare_children
+
+
+
+
+
+
+
+
+ Default is Gentle.
+
+
+ process_management_mode is not available prior to
+ Pgpool-II V4.4.
+
+
+
+
+
+
+ min_spare_children (integer)
+
+ min_spare_children configuration parameter
+
+
+
+
+ Specify the minimum number of spare (idle) child processes to keep.
+ If the idle process count falls below min_spare_children,
+ Pgpool-II will spawn new child processes unless it hits the total allowed child process ceiling (num_init_children)
+ Default value is 5.
+
+
+ This parameter can be changed by reloading
+ the Pgpool-II configurations.
+
+
+ This parameter is only applicable for dynamic process management mode.
+
+
+
+
+
+ max_spare_children (integer)
+
+ max_spare_children configuration parameter
+
+
+
+
+ Specify the maximum number of spare (idle) child processes to keep.
+ If the idle process count increases from max_spare_children, Pgpool-II will kill the excessive child processes.
+ Default value is 10.
+
+
+ This parameter can be changed by reloading
+ the Pgpool-II configurations.
+
+
+ This parameter is only applicable for dynamic process management mode.
+
+
+
+
+
+
+
+
+
Clustering mode
diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml
index 08027361..223c52c3 100644
--- a/doc/src/sgml/runtime.sgml
+++ b/doc/src/sgml/runtime.sgml
@@ -297,6 +297,39 @@ default_transaction_isolation = 'repeatable read'
is not possible in the mode.
+
+
+ Process management modes
+
+
+ dynamic process management
+
+
+
+ static process management
+
+
+ Pgpool-II> implements a multi-process architecture where
+ each child process can handle exactly one client connection at any time.
+ The total number of concurrent client connections Pgpool-II>
+ can handle is configured by the
+ num_init_children config parameter.
+ Pgpool-II> supports two child process management modes.
+ Dynamic and Static.
+ In static process management mode, Pgpool-II> pre-forks the
+ num_init_children number of child
+ process at startup, and each child process keeps listening for incoming
+ client connection. While with dynamic process management mode,
+ Pgpool-II>keeps track of idle processes and forks or kills
+ processes to keep this number within the specified boundaries.
+
+
+ process_management_mode is not available prior to
+ Pgpool-II V4.4.
+
+
+
+
diff --git a/src/auth/pool_auth.c b/src/auth/pool_auth.c
index 9425a0f2..0286f8f9 100644
--- a/src/auth/pool_auth.c
+++ b/src/auth/pool_auth.c
@@ -21,6 +21,7 @@
*/
#include "pool.h"
+#include
#include "context/pool_session_context.h"
#include "protocol/pool_process_query.h"
#include "protocol/pool_proto_modules.h"
@@ -34,11 +35,11 @@
#include "utils/palloc.h"
#include "utils/memutils.h"
#include "auth/md5.h"
+#include
#ifdef HAVE_CRYPT_H
#include
#endif
-
#ifdef HAVE_SYS_TYPES_H
#include
#endif
diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c
index 25d7ac29..8896cbc9 100644
--- a/src/config/pool_config_variables.c
+++ b/src/config/pool_config_variables.c
@@ -236,6 +236,21 @@ static const struct config_enum_entry backend_clustering_mode_options[] = {
{NULL, 0, false}
};
+static const struct config_enum_entry process_management_mode_options[] = {
+ {"static", PM_STATIC, false},
+ {"dynamic", PM_DYNAMIC, false},
+
+ {NULL, 0, false}
+};
+
+static const struct config_enum_entry process_management_strategy_options[] = {
+ {"aggressive", PM_STRATEGY_AGGRESSIVE, false},
+ {"gentle", PM_STRATEGY_GENTLE, false},
+ {"lazy", PM_STRATEGY_LAZY, false},
+
+ {NULL, 0, false}
+};
+
static const struct config_enum_entry log_standby_delay_options[] = {
{"always", LSD_ALWAYS, false},
{"if_over_threshold", LSD_OVER_THRESHOLD, false},
@@ -1896,7 +1911,7 @@ static struct config_int ConfigureNamesInt[] =
{
{"num_init_children", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
- "Number of children pre-forked for client connections.",
+ "Maximim number of child processs to handle client connections.",
CONFIG_VAR_TYPE_INT, false, 0
},
&g_pool_config.num_init_children,
@@ -1905,6 +1920,28 @@ static struct config_int ConfigureNamesInt[] =
NULL, NULL, NULL
},
+ {
+ {"min_spare_children", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG,
+ "Minimum number of spare child processes.",
+ CONFIG_VAR_TYPE_INT, false, 0
+ },
+ &g_pool_config.min_spare_children,
+ 5,
+ 1, INT_MAX,
+ NULL, NULL, NULL
+ },
+
+ {
+ {"max_spare_children", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG,
+ "Maximum number of spare child processes.",
+ CONFIG_VAR_TYPE_INT, false, 0
+ },
+ &g_pool_config.max_spare_children,
+ 10,
+ 1, INT_MAX,
+ NULL, NULL, NULL
+ },
+
{
{"reserved_connections", CFGCXT_INIT, CONNECTION_POOL_CONFIG,
"Number of reserved connections.",
@@ -2246,6 +2283,27 @@ static struct config_enum ConfigureNamesEnum[] =
NULL, NULL, NULL, NULL
},
+ {
+ {"process_management_mode", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG,
+ "child process management mode.",
+ CONFIG_VAR_TYPE_ENUM, false, 0
+ },
+ (int *) &g_pool_config.process_management,
+ PM_STATIC,
+ process_management_mode_options,
+ NULL, NULL, NULL, NULL
+ },
+
+ {
+ {"process_management_strategy", CFGCXT_RELOAD, CONNECTION_POOL_CONFIG,
+ "child process management strategy.",
+ CONFIG_VAR_TYPE_ENUM, false, 0
+ },
+ (int *) &g_pool_config.process_management_strategy,
+ PM_STRATEGY_GENTLE,
+ process_management_strategy_options,
+ NULL, NULL, NULL, NULL
+ },
{
{"syslog_facility", CFGCXT_RELOAD, LOGGING_CONFIG,
@@ -4863,6 +4921,14 @@ config_post_processor(ConfigContext context, int elevel)
(errmsg("invalid configuration, failover_when_quorum_exists is not allowed in native replication mode")));
return false;
}
+
+ if (pool_config->min_spare_children >= pool_config->max_spare_children)
+ {
+ ereport(elevel,
+ (errmsg("invalid configuration, max_spare_children:%d must be greater than max_spare_children:%d",
+ pool_config->max_spare_children,pool_config->min_spare_children)));
+ return false;
+ }
return true;
}
diff --git a/src/context/pool_process_context.c b/src/context/pool_process_context.c
index 51b08bed..00a04ff8 100644
--- a/src/context/pool_process_context.c
+++ b/src/context/pool_process_context.c
@@ -230,25 +230,30 @@ pool_coninfo_backend_pid(int backend_pid, int *backend_node_id)
for (child = 0; child < pool_config->num_init_children; child++)
{
int pool;
- ProcessInfo *pi = pool_get_process_info(process_info[child].pid);
- for (pool = 0; pool < pool_config->max_pool; pool++)
+ if (process_info[child].pid)
{
- int backend_id;
+ ProcessInfo *pi = pool_get_process_info(process_info[child].pid);
- for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
+ for (pool = 0; pool < pool_config->max_pool; pool++)
{
- int poolBE = pool * MAX_NUM_BACKENDS + backend_id;
+ int backend_id;
- if (ntohl(pi->connection_info[poolBE].pid) == backend_pid)
+ for (backend_id = 0; backend_id < NUM_BACKENDS; backend_id++)
{
- ereport(DEBUG1,
- (errmsg("found for the connection with backend pid:%d on backend node %d", backend_pid, backend_id)));
- *backend_node_id = backend_id;
- return &pi->connection_info[poolBE];
+ int poolBE = pool * MAX_NUM_BACKENDS + backend_id;
+
+ if (ntohl(pi->connection_info[poolBE].pid) == backend_pid)
+ {
+ ereport(DEBUG1,
+ (errmsg("found the connection with backend pid:%d on backend node %d", backend_pid, backend_id)));
+ *backend_node_id = backend_id;
+ return &pi->connection_info[poolBE];
+ }
}
}
}
+
}
return NULL;
}
diff --git a/src/include/main/pool_internal_comms.h b/src/include/main/pool_internal_comms.h
index 1709d1ad..6b4dc60d 100644
--- a/src/include/main/pool_internal_comms.h
+++ b/src/include/main/pool_internal_comms.h
@@ -42,5 +42,4 @@ extern void register_backend_state_sync_req_interrupt(void);
extern void register_inform_quarantine_nodes_req(void);
extern bool register_node_operation_request(POOL_REQUEST_KIND kind,
int *node_id_set, int count, unsigned char flags);
-
#endif /* pool_internal_comms_h */
diff --git a/src/include/pcp/libpcp_ext.h b/src/include/pcp/libpcp_ext.h
index 9d2c0cfd..2d86dd30 100644
--- a/src/include/pcp/libpcp_ext.h
+++ b/src/include/pcp/libpcp_ext.h
@@ -124,7 +124,8 @@ typedef enum
WAIT_FOR_CONNECT,
COMMAND_EXECUTE,
IDLE,
- IDLE_IN_TRANS
+ IDLE_IN_TRANS,
+ CONNECTING
} ProcessStatus;
/*
@@ -176,10 +177,13 @@ typedef struct
* this process */
int client_connection_count; /* how many times clients used this process */
ProcessStatus status;
- char need_to_restart; /* If non 0, exit this child process as
+ bool need_to_restart; /* If non 0, exit this child process as
* soon as current session ends. Typical
* case this flag being set is failback a
* node in streaming replication mode. */
+ bool exit_if_idle;
+ int pooled_connections; /* Total number of pooled connections
+ * by this child */
} ProcessInfo;
/*
diff --git a/src/include/pool.h b/src/include/pool.h
index af070697..f3de1454 100644
--- a/src/include/pool.h
+++ b/src/include/pool.h
@@ -595,6 +595,7 @@ extern BACKEND_STATUS private_backend_status[MAX_NUM_BACKENDS];
extern char remote_host[]; /* client host */
extern char remote_port[]; /* client port */
+
/*
* public functions
*/
diff --git a/src/include/pool_config.h b/src/include/pool_config.h
index 142c6fab..3bf1663e 100644
--- a/src/include/pool_config.h
+++ b/src/include/pool_config.h
@@ -57,6 +57,19 @@ typedef struct
regex_t regexv;
} RegPattern;
+typedef enum ProcessManagementModes
+{
+ PM_STATIC = 1,
+ PM_DYNAMIC
+} ProcessManagementModes;
+
+typedef enum ProcessManagementSstrategies
+{
+ PM_STRATEGY_AGGRESSIVE = 1,
+ PM_STRATEGY_GENTLE,
+ PM_STRATEGY_LAZY
+} ProcessManagementSstrategies;
+
typedef enum NativeReplicationSubModes
{
SLONY_MODE = 1,
@@ -203,6 +216,8 @@ typedef struct
typedef struct
{
ClusteringModes backend_clustering_mode; /* Backend clustering mode */
+ ProcessManagementModes process_management;
+ ProcessManagementSstrategies process_management_strategy;
char **listen_addresses; /* hostnames/IP addresses to listen on */
int port; /* port # to bind */
char **pcp_listen_addresses; /* PCP listen address to listen on */
@@ -212,7 +227,10 @@ typedef struct
int unix_socket_permissions; /* pgpool sockets permissions */
char *wd_ipc_socket_dir; /* watchdog command IPC socket directory */
char *pcp_socket_dir; /* PCP socket directory */
- int num_init_children; /* # of children initially pre-forked */
+ int num_init_children; /* Maximum number of child to
+ * accept connections */
+ int min_spare_children; /* Minimum number of idle children */
+ int max_spare_children; /* Minimum number of idle children */
int listen_backlog_multiplier; /* determines the size of the
* connection queue */
int reserved_connections; /* # of reserved connections */
diff --git a/src/include/protocol/pool_connection_pool.h b/src/include/protocol/pool_connection_pool.h
index 19b8f72a..aee976d7 100644
--- a/src/include/protocol/pool_connection_pool.h
+++ b/src/include/protocol/pool_connection_pool.h
@@ -37,4 +37,5 @@ extern int connect_inet_domain_socket_by_port(char *host, int port, bool retry);
extern int connect_unix_domain_socket_by_port(int port, char *socket_dir, bool retry);
extern int pool_pool_index(void);
extern void close_all_backend_connections(void);
+extern void update_pooled_connection_count(void);
#endif /* pool_connection_pool_h */
diff --git a/src/main/pgpool_main.c b/src/main/pgpool_main.c
index 5dde0364..1fc25bd5 100644
--- a/src/main/pgpool_main.c
+++ b/src/main/pgpool_main.c
@@ -117,6 +117,8 @@ typedef struct User1SignalSlot
#endif
#define PGPOOLMAXLITSENQUEUELENGTH 10000
+#define MAX_ONE_SHOT_KILLS 8
+
#define UNIXSOCK_PATH_BUFLEN sizeof(((struct sockaddr_un *) NULL)->sun_path)
@@ -198,12 +200,16 @@ static void exec_notice_pcp_child(FAILOVER_CONTEXT *failover_context);
static void check_requests(void);
static void print_signal_member(sigset_t *sig);
+static void service_child_processes(void);
+static int select_victim_processes(int *process_info_idxs, int count);
static struct sockaddr_un *un_addrs; /* unix domain socket path */
static struct sockaddr_un pcp_un_addr; /* unix domain socket path for PCP */
ProcessInfo *process_info = NULL; /* Per child info table on shmem */
volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
* shmem */
+int current_child_process_count;
+
struct timeval random_start_time;
/*
@@ -515,15 +521,24 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
* is harmless.
*/
POOL_SETMASK(&BlockSig);
+
+ if (pool_config->process_management == PM_DYNAMIC)
+ current_child_process_count = pool_config->max_spare_children;
+ else
+ current_child_process_count = pool_config->num_init_children;
+
/* fork the children */
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < current_child_process_count; i++)
{
- process_info[i].pid = fork_a_child(fds, i);
process_info[i].start_time = time(NULL);
process_info[i].client_connection_count = 0;
process_info[i].status = WAIT_FOR_CONNECT;
process_info[i].connected = 0;
process_info[i].wait_for_connect = 0;
+ process_info[i].pooled_connections = 0;
+ process_info[i].need_to_restart = false;
+ process_info[i].exit_if_idle = false;
+ process_info[i].pid = fork_a_child(fds, i);
}
/* create pipe for delivering event */
@@ -656,11 +671,15 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
for (;;)
{
int r;
- struct timeval t = {3, 0};
+ struct timeval t = {2, 0};
POOL_SETMASK(&UnBlockSig);
r = pool_pause(&t);
POOL_SETMASK(&BlockSig);
+
+ if (pool_config->process_management == PM_DYNAMIC)
+ service_child_processes();
+
if (r > 0)
break;
}
@@ -1958,7 +1977,8 @@ reaper(void)
{
found = true;
/* if found, fork a new child */
- if (!switching && !exiting && restart_child)
+ if (!switching && !exiting && restart_child &&
+ pool_config->process_management != PM_DYNAMIC)
{
process_info[i].pid = fork_a_child(fds, i);
process_info[i].start_time = time(NULL);
@@ -1969,7 +1989,10 @@ reaper(void)
process_info[i].wait_for_connect = 0;
}
else
+ {
+ current_child_process_count--;
process_info[i].pid = 0;
+ }
break;
}
}
@@ -2050,12 +2073,25 @@ int *
pool_get_process_list(int *array_size)
{
int *array;
+ int cnt = 0;
int i;
- *array_size = pool_config->num_init_children;
+ for (i=0;i < pool_config->num_init_children;i++)
+ {
+ if (process_info[i].pid != 0)
+ cnt++;
+ }
+ *array_size = cnt;
+ cnt = 0;
array = palloc0(*array_size * sizeof(int));
- for (i = 0; i < *array_size; i++)
- array[i] = process_info[i].pid;
+ for (i = 0; i < pool_config->num_init_children || cnt < *array_size; i++)
+ {
+ if (process_info[i].pid != 0)
+ {
+ array[cnt] = process_info[i].pid;
+ cnt++;
+ }
+ }
return array;
}
@@ -2977,7 +3013,10 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
process_info = (ProcessInfo *)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * (sizeof(ProcessInfo)));
for (i = 0; i < pool_config->num_init_children; i++)
+ {
process_info[i].connection_info = pool_coninfo(i, 0, 0);
+ process_info[i].pid = 0;
+ }
user1SignalSlot = (User1SignalSlot *)pool_shared_memory_segment_get_chunk(sizeof(User1SignalSlot));
@@ -3734,6 +3773,7 @@ sync_backend_from_watchdog(void)
process_info[i].status = WAIT_FOR_CONNECT;
process_info[i].connected = 0;
process_info[i].wait_for_connect = 0;
+ process_info[i].pooled_connections = 0;
}
}
else
@@ -3749,7 +3789,8 @@ sync_backend_from_watchdog(void)
*/
for (i = 0; i < pool_config->num_init_children; i++)
{
- process_info[i].need_to_restart = 1;
+ if (process_info[i].pid)
+ process_info[i].need_to_restart = 1;
}
}
@@ -4598,6 +4639,8 @@ exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id)
process_info[i].status = WAIT_FOR_CONNECT;
process_info[i].connected = 0;
process_info[i].wait_for_connect = 0;
+ process_info[i].pooled_connections = 0;
+
}
}
else
@@ -4890,3 +4933,180 @@ void print_signal_member(sigset_t *sig)
ereport(LOG,
(errmsg("SIGTERM is member")));
}
+
+/*
+* Function does the house keeping of spare child processes
+*/
+static void
+service_child_processes(void)
+{
+ int connected_children = Req_info->conn_counter;
+ int idle_children = current_child_process_count - connected_children;
+ static int high_load_counter = 0;
+ ereport(DEBUG2,
+ (errmsg("current_children_count = %d idle_children = %d connected_children = %d high_load_counter = %d",
+ current_child_process_count, idle_children, connected_children, high_load_counter)));
+ if (idle_children > pool_config->max_spare_children)
+ {
+ int ki;
+ int victim_count;
+ int kill_process_info_idxs[MAX_ONE_SHOT_KILLS];
+ int kill_count = idle_children - pool_config->max_spare_children;
+ int cycle_skip_count_before_scale_down;
+ int cycle_skip_between_scale_down;
+ int one_shot_kill_count;
+
+ switch (pool_config->process_management_strategy)
+ {
+ case PM_STRATEGY_AGGRESSIVE:
+ cycle_skip_count_before_scale_down = 25; /* roughly 50 seconds */
+ cycle_skip_between_scale_down = 2;
+ one_shot_kill_count = MAX_ONE_SHOT_KILLS;
+ break;
+
+ case PM_STRATEGY_LAZY:
+ cycle_skip_count_before_scale_down = 150; /* roughly 300 seconds */
+ cycle_skip_between_scale_down = 10;
+ one_shot_kill_count = 3;
+ break;
+
+ case PM_STRATEGY_GENTLE:
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
+
+ default:
+ /* should never come here, but if we do use gentle counts*/
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
+ }
+
+ /* Do not scale down too quickly */
+ if (++high_load_counter < cycle_skip_count_before_scale_down || high_load_counter % cycle_skip_between_scale_down)
+ return;
+
+ memset(kill_process_info_idxs, -1 ,sizeof(kill_process_info_idxs));
+
+ if (kill_count > one_shot_kill_count)
+ kill_count = one_shot_kill_count;
+
+ victim_count = select_victim_processes(kill_process_info_idxs, kill_count);
+
+ for (ki = 0; ki < victim_count; ki++)
+ {
+ int index = kill_process_info_idxs[ki];
+ if (index >=0)
+ {
+ if (process_info[index].pid && process_info[index].status == WAIT_FOR_CONNECT)
+ {
+ ereport(DEBUG1,
+ (errmsg("asking child process with pid:%d to kill itself to satisfy max_spare_children",
+ process_info[index].pid),
+ errdetail("child process has %d pooled connections",process_info[index].pooled_connections)));
+ process_info[index].exit_if_idle = true;
+ kill(process_info[index].pid, SIGUSR2);
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Reset the high load counter */
+ high_load_counter = 0;
+ /*See if we need to spawn new children */
+ if (idle_children < pool_config->min_spare_children)
+ {
+ int i;
+ int spawned = 0;
+ int new_spawn_no = pool_config->min_spare_children - idle_children;
+ /* Add 25% of max_spare_children */
+ new_spawn_no += pool_config->max_spare_children / 4;
+ if (new_spawn_no + current_child_process_count > pool_config->num_init_children)
+ {
+ ereport(LOG,
+ (errmsg("we have hit the ceiling, spawning %d child(ren)",
+ pool_config->num_init_children - current_child_process_count)));
+ new_spawn_no = pool_config->num_init_children - current_child_process_count;
+ }
+ if (new_spawn_no <= 0)
+ return;
+ for (i = 0; i < pool_config->num_init_children; i++)
+ {
+ if (process_info[i].pid == 0)
+ {
+ process_info[i].start_time = time(NULL);
+ process_info[i].client_connection_count = 0;
+ process_info[i].status = WAIT_FOR_CONNECT;
+ process_info[i].connected = 0;
+ process_info[i].wait_for_connect = 0;
+ process_info[i].pooled_connections = 0;
+ process_info[i].need_to_restart = 0;
+ process_info[i].exit_if_idle = false;
+ process_info[i].pid = fork_a_child(fds, i);
+
+ current_child_process_count++;
+ if (++spawned >= new_spawn_no)
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Function selects the child processes that can be killed based.
+ * selection criteria is to select the processes with minimum number of
+ * pooled connections.
+ * Returns the total number of identified process and fills the proc_info_arr
+ * with the victim children process_info index
+ */
+static int
+select_victim_processes(int *process_info_idxs, int count)
+{
+ int i, ki;
+ bool found_enough = false;
+ int selected_count = 0;
+
+ if (count <= 0)
+ return 0;
+
+ for (i = 0; i < pool_config->num_init_children; i++)
+ {
+ /* Only the child process in waiting for connect can be terminated */
+ if (process_info[i].pid && process_info[i].status == WAIT_FOR_CONNECT)
+ {
+ if (selected_count < count)
+ {
+ process_info_idxs[selected_count++] = i;
+ }
+ else
+ {
+ found_enough = true;
+ /* we don't bother selecting the child having least pooled connection with
+ * aggressive strategy
+ */
+ if (pool_config->process_management_strategy != PM_STRATEGY_AGGRESSIVE)
+ {
+ for (ki = 0; ki < count; ki++)
+ {
+ int old_index = process_info_idxs[ki];
+ if (old_index < 0 || process_info[old_index].pooled_connections > process_info[i].pooled_connections)
+ {
+ process_info_idxs[ki] = i;
+ found_enough = false;
+ break;
+ }
+ if (process_info[old_index].pooled_connections)
+ found_enough = false;
+ }
+ }
+ }
+ }
+ if (found_enough)
+ break;
+ }
+ return selected_count;
+}
diff --git a/src/protocol/child.c b/src/protocol/child.c
index 9056a29a..f68c9eda 100644
--- a/src/protocol/child.c
+++ b/src/protocol/child.c
@@ -82,6 +82,7 @@ static bool connect_using_existing_connection(POOL_CONNECTION * frontend,
POOL_CONNECTION_POOL * backend,
StartupPacket *sp);
static void check_restart_request(void);
+static void check_exit_request(void);
static void enable_authentication_timeout(void);
static void disable_authentication_timeout(void);
static int wait_for_new_connections(int *fds, SockAddr *saddr);
@@ -300,7 +301,7 @@ do_child(int *fds)
pool_close(child_frontend);
child_frontend = NULL;
}
-
+ update_pooled_connection_count();
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
}
@@ -325,6 +326,7 @@ do_child(int *fds)
/* pgpool stop request already sent? */
check_stop_request();
check_restart_request();
+ check_exit_request();
accepted = 0;
/* Destroy session context for just in case... */
pool_session_context_destroy();
@@ -345,6 +347,10 @@ do_child(int *fds)
if (front_end_fd == RETRY)
continue;
+ set_process_status(CONNECTING);
+ /* Reset any exit if idle request even if it's pending */
+ pool_get_my_process_info()->exit_if_idle = false;
+
con_count = connection_count_up();
if (pool_config->reserved_connections > 0)
@@ -459,7 +465,7 @@ do_child(int *fds)
/* Mark this connection pool is not connected from frontend */
pool_coninfo_unset_frontend_connected(pool_get_process_context()->proc_id, pool_pool_index());
-
+ update_pooled_connection_count();
accepted = 0;
connection_count_down();
if (pool_config->log_disconnections)
@@ -483,6 +489,8 @@ do_child(int *fds)
child_exit(POOL_EXIT_NO_RESTART);
}
+
+
/* -------------------------------------------------------------------
* private functions
* -------------------------------------------------------------------
@@ -1432,6 +1440,23 @@ pool_initialize_private_backend_status(void)
my_main_node_id = REAL_MAIN_NODE_ID;
}
+static void
+check_exit_request(void)
+{
+ /*
+ * Check if exit request is set because of spare children management.
+ */
+ if (pool_get_my_process_info()->exit_if_idle)
+ {
+ ereport(LOG,
+ (errmsg("Exit flag set"),
+ errdetail("Exiting myself")));
+
+ pool_get_my_process_info()->exit_if_idle = 0;
+ child_exit(POOL_EXIT_NO_RESTART);
+ }
+}
+
static void
check_restart_request(void)
{
@@ -1445,7 +1470,7 @@ check_restart_request(void)
(errmsg("failover or failback event detected"),
errdetail("restarting myself")));
- pool_get_my_process_info()->need_to_restart = 0;
+ pool_get_my_process_info()->need_to_restart = false;
child_exit(POOL_EXIT_AND_RESTART);
}
}
diff --git a/src/protocol/pool_connection_pool.c b/src/protocol/pool_connection_pool.c
index ed28d870..7f3c44ce 100644
--- a/src/protocol/pool_connection_pool.c
+++ b/src/protocol/pool_connection_pool.c
@@ -471,7 +471,7 @@ pool_backend_timer(void)
nearest = 1;
pool_alarm(pool_backend_timer_handler, nearest);
}
-
+ update_pooled_connection_count();
POOL_SETMASK(&UnBlockSig);
}
@@ -1066,3 +1066,16 @@ close_all_backend_connections(void)
POOL_SETMASK(&oldmask);
}
+
+void update_pooled_connection_count(void)
+{
+ int i;
+ int count = 0;
+ POOL_CONNECTION_POOL *p = pool_connection_pool;
+ for (i = 0; i < pool_config->max_pool; i++)
+ {
+ if (MAIN_CONNECTION(p))
+ count++;
+ }
+ pool_get_my_process_info()->pooled_connections = count;
+}
diff --git a/src/sample/pgpool.conf.sample-stream b/src/sample/pgpool.conf.sample-stream
index a0b02e8b..d5745a46 100644
--- a/src/sample/pgpool.conf.sample-stream
+++ b/src/sample/pgpool.conf.sample-stream
@@ -170,9 +170,45 @@ backend_clustering_mode = 'streaming_replication'
# - Concurrent session and pool size -
+#process_management_mode = static
+ # process management mode for child processes
+ # Valid options:
+ # static: all children are pre-forked at startup
+ # dynamic: child processes are spawned on demand.
+ # number of idle child processes at any time are
+ # configured by min_spare_children and max_spare_children
+
+#process_management_strategy = gentle
+ # process management strategy to satisfy spare processes
+ # Valid options:
+ #
+ # lazy: In this mode the scale down is performed gradually
+ # and only gets triggered when excessive spare processes count
+ # remains high for more than 5 mins
+ #
+ # gentle: In this mode the scale down is performed gradually
+ # and only gets triggered when excessive spare processes count
+ # remains high for more than 2 mins
+ #
+ # aggressive: In this mode the scale down is performed aggressively
+ # and gets triggered more frequently in case of higher spare processes.
+ # This mode uses faster and slightly less smart process selection criteria
+ # to identify the child processes that can be serviced to satisfy
+ # max_spare_children
+ #
+ # (Only applicable for dynamic process management mode)
+
#num_init_children = 32
- # Number of concurrent sessions allowed
+ # Maximum Number of concurrent sessions allowed
# (change requires restart)
+#min_spare_children = 5
+ # Minimum number of spare child processes waiting for connection
+ # (Only applicable for dynamic process management mode)
+
+#max_spare_children = 10
+ # Maximum number of idle child processes waiting for connection
+ # (Only applicable for dynamic process management mode)
+
#max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
diff --git a/src/tools/pcp/pcp_frontend_client.c b/src/tools/pcp/pcp_frontend_client.c
index 4ae47ffa..ef068e29 100644
--- a/src/tools/pcp/pcp_frontend_client.c
+++ b/src/tools/pcp/pcp_frontend_client.c
@@ -4,7 +4,7 @@
* pgpool: a language independent connection pool server for PostgreSQL
* written by Tatsuo Ishii
*
- * Copyright (c) 2003-2021 PgPool Global Development Group
+ * Copyright (c) 2003-2022 PgPool Global Development Group
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
@@ -745,7 +745,7 @@ output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
if (pools == NULL)
break;
- if ((!all) && (pools->database[0] == '\0'))
+ if (((!all) && (pools->database[0] == '\0')) || (pools->pool_pid[0] == '\0'))
continue;
printed = true;
printf(format,
diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c
index acb32c72..9263b452 100644
--- a/src/utils/pool_process_reporting.c
+++ b/src/utils/pool_process_reporting.c
@@ -335,10 +335,6 @@ get_config(int *nrows)
/* POOLS */
/* - Pool size - */
- StrNCpy(status[i].name, "num_init_children", POOLCONFIG_MAXNAMELEN);
- snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->num_init_children);
- StrNCpy(status[i].desc, "# of children initially pre-forked", POOLCONFIG_MAXDESCLEN);
- i++;
StrNCpy(status[i].name, "listen_backlog_multiplier", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->listen_backlog_multiplier);
@@ -360,6 +356,21 @@ get_config(int *nrows)
StrNCpy(status[i].desc, "max # of connection pool per child", POOLCONFIG_MAXDESCLEN);
i++;
+ StrNCpy(status[i].name, "num_init_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->num_init_children);
+ StrNCpy(status[i].desc, "# of children initially pre-forked", POOLCONFIG_MAXDESCLEN);
+ i++;
+
+ StrNCpy(status[i].name, "min_spare_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->min_spare_children);
+ StrNCpy(status[i].desc, "min # of spare children waitting for connection", POOLCONFIG_MAXDESCLEN);
+ i++;
+
+ StrNCpy(status[i].name, "max_spare_children", POOLCONFIG_MAXNAMELEN);
+ snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->max_spare_children);
+ StrNCpy(status[i].desc, "max # of spare children waitting for connection", POOLCONFIG_MAXDESCLEN);
+ i++;
+
/* - Life time - */
StrNCpy(status[i].name, "child_life_time", POOLCONFIG_MAXNAMELEN);
snprintf(status[i].value, POOLCONFIG_MAXVALLEN, "%d", pool_config->child_life_time);
@@ -1622,6 +1633,9 @@ get_pools(int *nrows)
case IDLE_IN_TRANS:
StrNCpy(pools[lines].status, "Idle in transaction", POOLCONFIG_MAXPROCESSSTATUSLEN);
break;
+ case CONNECTING:
+ StrNCpy(pools[lines].status, "Connecting", POOLCONFIG_MAXPROCESSSTATUSLEN);
+ break;
default:
*(pools[lines].status) = '\0';
}
@@ -1746,6 +1760,9 @@ get_processes(int *nrows)
case IDLE_IN_TRANS:
StrNCpy(processes[child].status, "Idle in transaction", POOLCONFIG_MAXPROCESSSTATUSLEN);
break;
+ case CONNECTING:
+ StrNCpy(processes[child].status, "Connecting", POOLCONFIG_MAXPROCESSSTATUSLEN);
+ break;
default:
*(processes[child].status) = '\0';
}
diff --git a/src/watchdog/wd_json_data.c b/src/watchdog/wd_json_data.c
index e1dd04f4..83842a75 100644
--- a/src/watchdog/wd_json_data.c
+++ b/src/watchdog/wd_json_data.c
@@ -48,8 +48,6 @@ get_pool_config_from_json(char *json_data, int data_len)
if (root == NULL || root->type != json_object)
goto ERROR_EXIT;
- if (json_get_int_value_for_key(root, "num_init_children", &config->num_init_children))
- goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "listen_backlog_multiplier", &config->listen_backlog_multiplier))
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "child_life_time", &config->child_life_time))
@@ -62,6 +60,16 @@ get_pool_config_from_json(char *json_data, int data_len)
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "max_pool", &config->max_pool))
goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "num_init_children", &config->num_init_children))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "min_spare_children", &config->min_spare_children))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "max_spare_children", &config->max_spare_children))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "process_management_mode", (int*)&config->process_management))
+ goto ERROR_EXIT;
+ if (json_get_int_value_for_key(root, "process_management_strategy", (int*)&config->process_management_strategy))
+ goto ERROR_EXIT;
if (json_get_bool_value_for_key(root, "replication_mode", &config->replication_mode))
goto ERROR_EXIT;
if (json_get_bool_value_for_key(root, "enable_pool_hba", &config->enable_pool_hba))
@@ -174,13 +182,17 @@ get_pool_config_json(void)
JsonNode *jNode = jw_create_with_object(true);
- jw_put_int(jNode, "num_init_children", pool_config->num_init_children);
jw_put_int(jNode, "listen_backlog_multiplier", pool_config->listen_backlog_multiplier);
jw_put_int(jNode, "child_life_time", pool_config->child_life_time);
jw_put_int(jNode, "connection_life_time", pool_config->connection_life_time);
jw_put_int(jNode, "child_max_connections", pool_config->child_max_connections);
jw_put_int(jNode, "client_idle_limit", pool_config->client_idle_limit);
jw_put_int(jNode, "max_pool", pool_config->max_pool);
+ jw_put_int(jNode, "num_init_children", pool_config->num_init_children);
+ jw_put_int(jNode, "min_spare_children", pool_config->min_spare_children);
+ jw_put_int(jNode, "max_spare_children", pool_config->max_spare_children);
+ jw_put_int(jNode, "process_management_mode", pool_config->process_management);
+ jw_put_int(jNode, "process_management_strategy", pool_config->process_management_strategy);
jw_put_bool(jNode, "replication_mode", pool_config->replication_mode);
jw_put_bool(jNode, "enable_pool_hba", pool_config->enable_pool_hba);
jw_put_bool(jNode, "load_balance_mode", pool_config->load_balance_mode);