View Issue Details
| ID | Project | Category | View Status | Date Submitted | Last Update |
|---|---|---|---|---|---|
| 0000780 | Pgpool-II | Bug | public | 2023-01-19 16:32 | 2023-06-21 19:42 |
| Reporter | shahmir | Assigned To | t-ishii | ||
| Priority | high | Severity | major | Reproducibility | always |
| Status | assigned | Resolution | open | ||
| Platform | linux | OS | ubuntu | OS Version | 22.04 |
| Product Version | 4.3.3 | ||||
| Summary | 0000780: pool_send_and_wait: Error or notice message from backend | ||||
| Description | LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 2 backend pid: 59712 statement: "DEALLOCATE "_KEYSET_0x25587a0"" message: "prepared statement "_KEYSET_0x25587a0" does not exist" 2023-01-18 19:25:50.337: [unknown] pid 168794: WARNING: packet kind of backend 2 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-01-18 19:25:50.337: [unknown] pid 168794: FATAL: failed to read kind from backend 2023-01-18 19:25:50.338: [unknown] pid 168794: DETAIL: kind mismatch among backends. Possible last query was: "DEALLOCATE "_KEYSET_0x25587a0"" kind details are: 0[C] 2[E: prepared statement "_KEYSET_0x25587a0" does not exist] | ||||
| Steps To Reproduce | Just connect our app through pgpool to Postgres db | ||||
| Additional Information | log from app: 2023-01-18T19:24:19.565943 1863161[1863161] 0000000000000000 [DM_OBJ_MGR_E_CURSOR_FAIL]error: "In operation FetchWithLock an attempt to create cursor failed; query was: 'M_.r_object_id=:dmb_handlep'; error from database system was: STATE=08S01, CODE=35, MSG=SQLExecDirect unable due to the connection lost | ||||
| Tags | error | ||||
|
|
To investigate the error we need pgpool.conf. Please share it. You can comment out something you do not want to put in public. |
|
|
Hi, thank you for answer. File attached. And please can you tell me why pgpool cannot find primary after reboot primary db..show pool_nodes tell just that this node is standby and down. But really node after reboot is UPS and Primary. pgpool.conf (49,438 bytes)
# ----------------------------
# pgPool-II configuration file
# ----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# Whitespace may be used. Comments are introduced with "#" anywhere on a line.
# The complete list of parameter names and allowed values can be found in the
# pgPool-II documentation.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pgpool reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
#------------------------------------------------------------------------------
# BACKEND CLUSTERING MODE
# Choose one of: 'streaming_replication', 'native_replication',
# 'logical_replication', 'slony', 'raw' or 'snapshot_isolation'
# (change requires restart)
#------------------------------------------------------------------------------
backend_clustering_mode = 'streaming_replication'
#------------------------------------------------------------------------------
# CONNECTIONS
#------------------------------------------------------------------------------
# - pgpool Connection Settings -
listen_addresses = '*'
# Host name or IP address to listen on:
# '*' for all, '' for no TCP/IP connections
# (change requires restart)
#port = 5433
# Port number
# (change requires restart)
#socket_dir = '/var/run/postgresql'
# Unix domain socket path
# The Debian package defaults to
# /var/run/postgresql
# (change requires restart)
reserved_connections = 0
# Number of reserved connections.
# Pgpool-II does not accept connections if over
# num_init_chidlren - reserved_connections.
# - pgpool Communication Manager Connection Settings -
pcp_listen_addresses = '*'
# Host name or IP address for pcp process to listen on:
# '*' for all, '' for no TCP/IP connections
# (change requires restart)
pcp_port = 9898
# Port number for pcp
# (change requires restart)
#pcp_socket_dir = '/var/run/postgresql'
# Unix domain socket path for pcp
# The Debian package defaults to
# /var/run/postgresql
# (change requires restart)
#listen_backlog_multiplier = 2
# Set the backlog parameter of listen(2) to
# num_init_children * listen_backlog_multiplier.
# (change requires restart)
#serialize_accept = off
# whether to serialize accept() call to avoid thundering herd problem
# (change requires restart)
# - Backend Connection Settings -
backend_hostname0 = '****'
# Host name or IP address to connect to for backend 0
backend_port0 = 5432
# Port number for backend 0
backend_weight0 = 0
# Weight for backend 0 (only in load balancing mode)
backend_data_directory0 = '/pgsql/data'
# Data directory for backend 0
#backend_flag0 = 'ALLOW_TO_FAILOVER'
# Controls various backend behavior
# ALLOW_TO_FAILOVER, DISALLOW_TO_FAILOVER
# or ALWAYS_PRIMARY
#backend_application_name0 = 'server0'
# walsender's application_name, used for "show pool_nodes" command
backend_hostname1 = '****'
backend_port1 = 5432
backend_weight1 = 1
backend_data_directory1 = '/pgsql/data'
#backend_flag1 = 'DISALLOW_TO_FAILOVER'
#backend_application_name1 = 'server1'
#
backend_hostname2 = '*****'
backend_port2 = 5432
backend_weight2 = 1
backend_data_directory2 = '/pgsql/data'
#backend_flag2 = 'DISALLOW_TO_FAILOVER'
# - Authentication -
#enable_pool_hba = off
# Use pool_hba.conf for client authentication
#pool_passwd = 'pool_passwd'
# File name of pool_passwd for md5 authentication.
# "" disables pool_passwd.
# (change requires restart)
#authentication_timeout = 1min
# Delay in seconds to complete client authentication
# 0 means no timeout.
#allow_clear_text_frontend_auth = off
# Allow Pgpool-II to use clear text password authentication
# with clients, when pool_passwd does not
# contain the user password
# - SSL Connections -
#ssl = off
# Enable SSL support
# (change requires restart)
#ssl_key = 'server.key'
# SSL private key file
# (change requires restart)
#ssl_cert = 'server.crt'
# SSL public certificate file
# (change requires restart)
#ssl_ca_cert = ''
# Single PEM format file containing
# CA root certificate(s)
# (change requires restart)
#ssl_ca_cert_dir = ''
# Directory containing CA root certificate(s)
# (change requires restart)
#ssl_crl_file = ''
# SSL certificate revocation list file
# (change requires restart)
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL'
# Allowed SSL ciphers
# (change requires restart)
#ssl_prefer_server_ciphers = off
# Use server's SSL cipher preferences,
# rather than the client's
# (change requires restart)
#ssl_ecdh_curve = 'prime256v1'
# Name of the curve to use in ECDH key exchange
#ssl_dh_params_file = ''
# Name of the file containing Diffie-Hellman parameters used
# for so-called ephemeral DH family of SSL cipher.
#ssl_passphrase_command=''
# Sets an external command to be invoked when a passphrase
# for decrypting an SSL file needs to be obtained
# (change requires restart)
#------------------------------------------------------------------------------
# POOLS
#------------------------------------------------------------------------------
# - Concurrent session and pool size -
num_init_children = 100
# Number of concurrent sessions allowed
# (change requires restart)
max_pool = 4
# Number of connection pool caches per connection
# (change requires restart)
# - Life time -
#child_life_time = 5min
# Pool exits after being idle for this many seconds
#child_max_connections = 0
# Pool exits after receiving that many connections
# 0 means no exit
#connection_life_time = 0
# Connection to backend closes after being idle for this many seconds
# 0 means no close
#client_idle_limit = 0
# Client is disconnected after being idle for that many seconds
# (even inside an explicit transactions!)
# 0 means no disconnection
#------------------------------------------------------------------------------
# LOGS
#------------------------------------------------------------------------------
# - Where to log -
#log_destination = 'stderr'
# Where to log
# Valid values are combinations of stderr,
# and syslog. Default to stderr.
# - What to log -
#log_line_prefix = '%m: %a pid %p: ' # printf-style string to output at beginning of each log line.
#log_connections = off
# Log connections
#log_disconnections = off
# Log disconnections
#log_hostname = off
# Hostname will be shown in ps status
# and in logs if connections are logged
log_statement = on
# Log all statements
log_per_node_statement = on
# Log all statements
# with node and backend informations
#log_client_messages = off
# Log any client messages
#log_standby_delay = 'if_over_threshold'
# Log standby delay
# Valid values are combinations of always,
# if_over_threshold, none
# - Syslog specific -
#syslog_facility = 'LOCAL0'
# Syslog local facility. Default to LOCAL0
#syslog_ident = 'pgpool'
# Syslog program identification string
# Default to 'pgpool'
# - Debug -
#log_error_verbosity = default # terse, default, or verbose messages
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
# This is used when logging to stderr:
logging_collector = on
# Enable capturing of stderr
# into log files.
# (change requires restart)
# -- Only used if logging_collector is on ---
log_directory = '/var/log/pgpool_log'
# directory where log files are written,
# can be absolute
log_filename = 'pgpool-%Y-%m-%d_%H%M%S.log'
# log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600
# creation mode for log files,
# begin with 0 to use octal notation
log_truncate_on_rotation = on
# If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d
# Automatic rotation of logfiles will
# happen after that (minutes)time.
# 0 disables time based rotation.
#log_rotation_size = 10MB
# Automatic rotation of logfiles will
# happen after that much (KB) log output.
# 0 disables size based rotation.
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
pid_file_name = '/var/run/pgpool/pgpool.pid'
# PID file name
# Can be specified as relative to the"
# location of pgpool.conf file or
# as an absolute path
# (change requires restart)
#logdir = '/var/log/postgresql'
# Directory of pgPool status file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTION POOLING
#------------------------------------------------------------------------------
#connection_cache = on
# Activate connection pools
# (change requires restart)
# Semicolon separated list of queries
# to be issued at the end of a session
# The default is for 8.3 and later
#reset_query_list = 'ABORT; DISCARD ALL'
# The following one is for 8.2 and before
#reset_query_list = 'ABORT; RESET ALL; SET SESSION AUTHORIZATION DEFAULT'
#------------------------------------------------------------------------------
# REPLICATION MODE
#------------------------------------------------------------------------------
#replicate_select = off
# Replicate SELECT statements
# when in replication mode
# replicate_select is higher priority than
# load_balance_mode.
#insert_lock = off
# Automatically locks a dummy row or a table
# with INSERT statements to keep SERIAL data
# consistency
# Without SERIAL, no lock will be issued
#lobj_lock_table = ''
# When rewriting lo_creat command in
# replication mode, specify table name to
# lock
# - Degenerate handling -
#replication_stop_on_mismatch = off
# On disagreement with the packet kind
# sent from backend, degenerate the node
# which is most likely "minority"
# If off, just force to exit this session
#failover_if_affected_tuples_mismatch = off
# On disagreement with the number of affected
# tuples in UPDATE/DELETE queries, then
# degenerate the node which is most likely
# "minority".
# If off, just abort the transaction to
# keep the consistency
#------------------------------------------------------------------------------
# LOAD BALANCING MODE
#------------------------------------------------------------------------------
#load_balance_mode = on
# Activate load balancing mode
# (change requires restart)
#ignore_leading_white_space = on
# Ignore leading white spaces of each query
#read_only_function_list = ''
# Comma separated list of function names
# that don't write to database
# Regexp are accepted
#write_function_list = ''
# Comma separated list of function names
# that write to database
# Regexp are accepted
# If both read_only_function_list and write_function_list
# is empty, function's volatile property is checked.
# If it's volatile, the function is regarded as a
# writing function.
#primary_routing_query_pattern_list = ''
# Semicolon separated list of query patterns
# that should be sent to primary node
# Regexp are accepted
# valid for streaming replicaton mode only.
#database_redirect_preference_list = ''
# comma separated list of pairs of database and node id.
# example: postgres:primary,mydb[0-4]:1,mydb[5-9]:2'
# valid for streaming replicaton mode only.
#app_name_redirect_preference_list = ''
# comma separated list of pairs of app name and node id.
# example: 'psql:primary,myapp[0-4]:1,myapp[5-9]:standby'
# valid for streaming replicaton mode only.
#allow_sql_comments = off
# if on, ignore SQL comments when judging if load balance or
# query cache is possible.
# If off, SQL comments effectively prevent the judgment
# (pre 3.4 behavior).
#disable_load_balance_on_write = 'transaction'
# Load balance behavior when write query is issued
# in an explicit transaction.
#
# Valid values:
#
# 'transaction' (default):
# if a write query is issued, subsequent
# read queries will not be load balanced
# until the transaction ends.
#
# 'trans_transaction':
# if a write query is issued, subsequent
# read queries in an explicit transaction
# will not be load balanced until the session ends.
#
# 'dml_adaptive':
# Queries on the tables that have already been
# modified within the current explicit transaction will
# not be load balanced until the end of the transaction.
#
# 'always':
# if a write query is issued, read queries will
# not be load balanced until the session ends.
#
# Note that any query not in an explicit transaction
# is not affected by the parameter except 'always'.
#dml_adaptive_object_relationship_list= ''
# comma separated list of object pairs
# [object]:[dependent-object], to disable load balancing
# of dependent objects within the explicit transaction
# after WRITE statement is issued on (depending-on) object.
#
# example: 'tb_t1:tb_t2,insert_tb_f_func():tb_f,tb_v:my_view'
# Note: function name in this list must also be present in
# the write_function_list
# only valid for disable_load_balance_on_write = 'dml_adaptive'.
#statement_level_load_balance = off
# Enables statement level load balancing
#------------------------------------------------------------------------------
# STREAMING REPLICATION MODE
#------------------------------------------------------------------------------
# - Streaming -
#sr_check_period = 10
# Streaming replication check period
# Disabled (0) by default
sr_check_user = 'postgres'
# Streaming replication check user
# This is neccessary even if you disable streaming
# replication delay check by sr_check_period = 0
sr_check_password = '*****'
# Password for streaming replication check user
# Leaving it empty will make Pgpool-II to first look for the
# Password in pool_passwd file before using the empty password
#sr_check_database = 'postgres'
# Database name for streaming replication check
#delay_threshold = 0
# Threshold before not dispatching query to standby node
# Unit is in bytes
# Disabled (0) by default
#prefer_lower_delay_standby = off
# If delay_threshold is set larger than 0, Pgpool-II send to
# the primary when selected node is delayed over delay_threshold.
# If this is set to on, Pgpool-II send query to other standby
# delayed lower.
# - Special commands -
#follow_primary_command = ''
# Executes this command after main node failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
# %m = new main node id
# %H = new main node hostname
# %M = old main node id
# %P = old primary node id
# %r = new main port number
# %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
#------------------------------------------------------------------------------
# HEALTH CHECK GLOBAL PARAMETERS
#------------------------------------------------------------------------------
health_check_period = 10
# Health check period
# Disabled (0) by default
#health_check_timeout = 20
# Health check timeout
# 0 means no timeout
health_check_user = 'repuser'
# Health check user
health_check_password = '*****'
# Password for health check user
# Leaving it empty will make Pgpool-II to first look for the
# Password in pool_passwd file before using the empty password
#health_check_database = ''
# Database name for health check. If '', tries 'postgres' frist,
#health_check_max_retries = 0
# Maximum number of times to retry a failed health check before giving up.
#health_check_retry_delay = 1
# Amount of time to wait (in seconds) between retries.
#connect_timeout = 10000
# Timeout value in milliseconds before giving up to connect to backend.
# Default is 10000 ms (10 second). Flaky network user may want to increase
# the value. 0 means no timeout.
# Note that this value is not only used for health check,
# but also for ordinary conection to backend.
#------------------------------------------------------------------------------
# HEALTH CHECK PER NODE PARAMETERS (OPTIONAL)
#------------------------------------------------------------------------------
#health_check_period0 = 0
#health_check_timeout0 = 20
#health_check_user0 = 'nobody'
#health_check_password0 = ''
#health_check_database0 = ''
#health_check_max_retries0 = 0
#health_check_retry_delay0 = 1
#connect_timeout0 = 10000
#------------------------------------------------------------------------------
# FAILOVER AND FAILBACK
#------------------------------------------------------------------------------
#failover_command = ''
# Executes this command at failover
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
# %m = new main node id
# %H = new main node hostname
# %M = old main node id
# %P = old primary node id
# %r = new main port number
# %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
#failback_command = ''
# Executes this command at failback.
# Special values:
# %d = failed node id
# %h = failed node host name
# %p = failed node port number
# %D = failed node database cluster path
# %m = new main node id
# %H = new main node hostname
# %M = old main node id
# %P = old primary node id
# %r = new main port number
# %R = new main database cluster path
# %N = old primary node hostname
# %S = old primary node port number
# %% = '%' character
#failover_on_backend_error = on
# Initiates failover when reading/writing to the
# backend communication socket fails
# If set to off, pgpool will report an
# error and disconnect the session.
#failover_on_backend_shutdown = off
# Initiates failover when backend is shutdown,
# or backend process is killed.
# If set to off, pgpool will report an
# error and disconnect the session.
#detach_false_primary = off
# Detach false primary if on. Only
# valid in streaming replicaton
# mode and with PostgreSQL 9.6 or
# after.
search_primary_node_timeout = 1min
# Timeout in seconds to search for the
# primary node when a failover occurs.
# 0 means no timeout, keep searching
# for a primary node forever.
#------------------------------------------------------------------------------
# ONLINE RECOVERY
#------------------------------------------------------------------------------
#recovery_user = 'nobody'
# Online recovery user
#recovery_password = ''
# Online recovery password
# Leaving it empty will make Pgpool-II to first look for the
# Password in pool_passwd file before using the empty password
#recovery_1st_stage_command = ''
# Executes a command in first stage
#recovery_2nd_stage_command = ''
# Executes a command in second stage
#recovery_timeout = 90
# Timeout in seconds to wait for the
# recovering node's postmaster to start up
# 0 means no wait
#client_idle_limit_in_recovery = 0
# Client is disconnected after being idle
# for that many seconds in the second stage
# of online recovery
# 0 means no disconnection
# -1 means immediate disconnection
#auto_failback = off
# Dettached backend node reattach automatically
# if replication_state is 'streaming'.
#auto_failback_interval = 1min
# Min interval of executing auto_failback in
# seconds.
#------------------------------------------------------------------------------
# WATCHDOG
#------------------------------------------------------------------------------
# - Enabling -
#use_watchdog = off
# Activates watchdog
# (change requires restart)
# -Connection to up stream servers -
#trusted_servers = ''
# trusted server list which are used
# to confirm network connection
# (hostA,hostB,hostC,...)
# (change requires restart)
#ping_path = '/bin'
# ping command path
# (change requires restart)
# - Watchdog communication Settings -
hostname0 = ''
# Host name or IP address of pgpool node
# for watchdog connection
# (change requires restart)
#wd_port0 = 9000
# Port number for watchdog service
# (change requires restart)
#pgpool_port0 = 9999
# Port number for pgpool
# (change requires restart)
#hostname1 = ''
#wd_port1 = 9000
#pgpool_port1 = 9999
#hostname2 = ''
#wd_port2 = 9000
#pgpool_port2 = 9999
#wd_priority = 1
# priority of this watchdog in leader election
# (change requires restart)
#wd_authkey = ''
# Authentication key for watchdog communication
# (change requires restart)
#wd_ipc_socket_dir = '/tmp'
# Unix domain socket path for watchdog IPC socket
# The Debian package defaults to
# /var/run/postgresql
# (change requires restart)
# - Virtual IP control Setting -
#delegate_IP = ''
# delegate IP address
# If this is empty, virtual IP never bring up.
# (change requires restart)
#if_cmd_path = '/sbin'
# path to the directory where if_up/down_cmd exists
# If if_up/down_cmd starts with "/", if_cmd_path will be ignored.
# (change requires restart)
#if_up_cmd = '/usr/bin/sudo /sbin/ip addr add $_IP_$/24 dev eth0 label eth0:0'
# startup delegate IP command
# (change requires restart)
#if_down_cmd = '/usr/bin/sudo /sbin/ip addr del $_IP_$/24 dev eth0'
# shutdown delegate IP command
# (change requires restart)
#arping_path = '/usr/sbin'
# arping command path
# If arping_cmd starts with "/", if_cmd_path will be ignored.
# (change requires restart)
#arping_cmd = '/usr/bin/sudo /usr/sbin/arping -U $_IP_$ -w 1 -I eth0'
# arping command
# (change requires restart)
# - Behaivor on escalation Setting -
#clear_memqcache_on_escalation = on
# Clear all the query cache on shared memory
# when standby pgpool escalate to active pgpool
# (= virtual IP holder).
# This should be off if client connects to pgpool
# not using virtual IP.
# (change requires restart)
#wd_escalation_command = ''
# Executes this command at escalation on new active pgpool.
# (change requires restart)
#wd_de_escalation_command = ''
# Executes this command when leader pgpool resigns from being leader.
# (change requires restart)
# - Watchdog consensus settings for failover -
#failover_when_quorum_exists = on
# Only perform backend node failover
# when the watchdog cluster holds the quorum
# (change requires restart)
#failover_require_consensus = on
# Perform failover when majority of Pgpool-II nodes
# aggrees on the backend node status change
# (change requires restart)
#allow_multiple_failover_requests_from_node = off
# A Pgpool-II node can cast multiple votes
# for building the consensus on failover
# (change requires restart)
#enable_consensus_with_half_votes = off
# apply majority rule for consensus and quorum computation
# at 50% of votes in a cluster with even number of nodes.
# when enabled the existence of quorum and consensus
# on failover is resolved after receiving half of the
# total votes in the cluster, otherwise both these
# decisions require at least one more vote than
# half of the total votes.
# (change requires restart)
# - Watchdog cluster membership settings for quorum computation -
#wd_remove_shutdown_nodes = off
# when enabled cluster membership of properly shutdown
# watchdog nodes gets revoked, After that the node does
# not count towards the quorum and consensus computations
#wd_lost_node_removal_timeout = 0s
# Timeout after which the cluster membership of LOST watchdog
# nodes gets revoked. After that the node node does not
# count towards the quorum and consensus computations
# setting timeout to 0 will never revoke the membership
# of LOST nodes
#wd_no_show_node_removal_timeout = 0s
# Time to wait for Watchdog node to connect to the cluster.
# After that time the cluster membership of NO-SHOW node gets
# revoked and it does not count towards the quorum and
# consensus computations
# setting timeout to 0 will not revoke the membership
# of NO-SHOW nodes
# - Lifecheck Setting -
# -- common --
#wd_monitoring_interfaces_list = ''
# Comma separated list of interfaces names to monitor.
# if any interface from the list is active the watchdog will
# consider the network is fine
# 'any' to enable monitoring on all interfaces except loopback
# '' to disable monitoring
# (change requires restart)
#wd_lifecheck_method = 'heartbeat'
# Method of watchdog lifecheck ('heartbeat' or 'query' or 'external')
# (change requires restart)
#wd_interval = 10
# lifecheck interval (sec) > 0
# (change requires restart)
# -- heartbeat mode --
#heartbeat_hostname0 = ''
# Host name or IP address used
# for sending heartbeat signal.
# (change requires restart)
#heartbeat_port0 = 9694
# Port number used for receiving/sending heartbeat signal
# Usually this is the same as heartbeat_portX.
# (change requires restart)
#heartbeat_device0 = ''
# Name of NIC device (such like 'eth0')
# used for sending/receiving heartbeat
# signal to/from destination 0.
# This works only when this is not empty
# and pgpool has root privilege.
# (change requires restart)
#heartbeat_hostname1 = ''
#heartbeat_port1 = 9694
#heartbeat_device1 = ''
#heartbeat_hostname2 = ''
#heartbeat_port2 = 9694
#heartbeat_device2 = ''
#wd_heartbeat_keepalive = 2
# Interval time of sending heartbeat signal (sec)
# (change requires restart)
#wd_heartbeat_deadtime = 30
# Deadtime interval for heartbeat signal (sec)
# (change requires restart)
# -- query mode --
#wd_life_point = 3
# lifecheck retry times
# (change requires restart)
#wd_lifecheck_query = 'SELECT 1'
# lifecheck query to pgpool from watchdog
# (change requires restart)
#wd_lifecheck_dbname = 'template1'
# Database name connected for lifecheck
# (change requires restart)
#wd_lifecheck_user = 'nobody'
# watchdog user monitoring pgpools in lifecheck
# (change requires restart)
#wd_lifecheck_password = ''
# Password for watchdog user in lifecheck
# Leaving it empty will make Pgpool-II to first look for the
# Password in pool_passwd file before using the empty password
# (change requires restart)
#------------------------------------------------------------------------------
# OTHERS
#------------------------------------------------------------------------------
#relcache_expire = 0
# Life time of relation cache in seconds.
# 0 means no cache expiration(the default).
# The relation cache is used for cache the
# query result against PostgreSQL system
# catalog to obtain various information
# including table structures or if it's a
# temporary table or not. The cache is
# maintained in a pgpool child local memory
# and being kept as long as it survives.
# If someone modify the table by using
# ALTER TABLE or some such, the relcache is
# not consistent anymore.
# For this purpose, cache_expiration
# controls the life time of the cache.
#relcache_size = 256
# Number of relation cache
# entry. If you see frequently:
# "pool_search_relcache: cache replacement happend"
# in the pgpool log, you might want to increate this number.
#check_temp_table = catalog
# Temporary table check method. catalog, trace or none.
# Default is catalog.
#check_unlogged_table = on
# If on, enable unlogged table check in SELECT statements.
# This initiates queries against system catalog of primary/main
# thus increases load of primary.
# If you are absolutely sure that your system never uses unlogged tables
# and you want to save access to primary/main, you could turn this off.
# Default is on.
#enable_shared_relcache = on
# If on, relation cache stored in memory cache,
# the cache is shared among child process.
# Default is on.
# (change requires restart)
#relcache_query_target = primary
# Target node to send relcache queries. Default is primary node.
# If load_balance_node is specified, queries will be sent to load balance node.
#------------------------------------------------------------------------------
# IN MEMORY QUERY MEMORY CACHE
#------------------------------------------------------------------------------
#memory_cache_enabled = off
# If on, use the memory cache functionality, off by default
# (change requires restart)
#memqcache_method = 'shmem'
# Cache storage method. either 'shmem'(shared memory) or
# 'memcached'. 'shmem' by default
# (change requires restart)
#memqcache_memcached_host = 'localhost'
# Memcached host name or IP address. Mandatory if
# memqcache_method = 'memcached'.
# Defaults to localhost.
# (change requires restart)
#memqcache_memcached_port = 11211
# Memcached port number. Mondatory if memqcache_method = 'memcached'.
# Defaults to 11211.
# (change requires restart)
#memqcache_total_size = 64MB
# Total memory size in bytes for storing memory cache.
# Mandatory if memqcache_method = 'shmem'.
# Defaults to 64MB.
# (change requires restart)
#memqcache_max_num_cache = 1000000
# Total number of cache entries. Mandatory
# if memqcache_method = 'shmem'.
# Each cache entry consumes 48 bytes on shared memory.
# Defaults to 1,000,000(45.8MB).
# (change requires restart)
#memqcache_expire = 0
# Memory cache entry life time specified in seconds.
# 0 means infinite life time. 0 by default.
# (change requires restart)
#memqcache_auto_cache_invalidation = on
# If on, invalidation of query cache is triggered by corresponding
# DDL/DML/DCL(and memqcache_expire). If off, it is only triggered
# by memqcache_expire. on by default.
# (change requires restart)
#memqcache_maxcache = 400kB
# Maximum SELECT result size in bytes.
# Must be smaller than memqcache_cache_block_size. Defaults to 400KB.
# (change requires restart)
#memqcache_cache_block_size = 1MB
# Cache block size in bytes. Mandatory if memqcache_method = 'shmem'.
# Defaults to 1MB.
# (change requires restart)
#memqcache_oiddir = '/var/log/pgpool/oiddir'
# Temporary work directory to record table oids
# (change requires restart)
#cache_safe_memqcache_table_list = ''
# Comma separated list of table names to memcache
# that don't write to database
# Regexp are accepted
#cache_unsafe_memqcache_table_list = ''
# Comma separated list of table names not to memcache
# that don't write to database
# Regexp are accepted
|
|
|
Hello, t-ishii..any updates for me? |
|
|
I have checked pgpool.conf and I noticed that streaming replication mode is used. The error message indicates that the DEALLOCATE command was sent to multiple backends. This is very odd in streaming replication mode. Usually read only SELECTs are sent to one of backends (but never sent to multiple backends). The other types of query is sent to primary backend only. For further investigation, I need to know what query is prepared for the named statement "_KEYSET_0x25587a0". If you are using PREPARE command to create the named statement, can share the PREPARE statement? |
|
|
> And please can you tell me why pgpool cannot find primary after reboot primary db..show pool_nodes tell just that this node is standby and down. But really node after reboot is UPS and Primary. Can you share the result of "show pool_nodes" command result? |
|
|
hi, can you prompt me with, how can I get this PREPARE statement from the app? Our App is open text documentum content server. show pool_nodes is okey after pcp_attach_node..but pgpool not do it automatically...maybe this is bug. After reboot primary or standby node pgpool cannot get it back. |
|
|
> hi, can you prompt me with, how can I get this PREPARE statement from the app? Our App is open text documentum content server. set log_client_messages = on. Restart pgpool then wait for the error occurs. You should find "PREPARE name AS ... " in the pgpool.log , where "name" is same as the DEALLOCATE statement which has the same name. > show pool_nodes is okey after pcp_attach_node..but pgpool not do it automatically...maybe this is bug. After reboot primary or standby node pgpool cannot get it back. Please share the result of show pool_nodes . I need it to know to advice you. |
|
|
Hi, this is output after log_client_messages=on: LOG: DB node id: 1 backend pid: 54644 statement: COMMIT 2023-01-25 06:31:51.835: [unknown] pid 160820: LOG: DB node id: 0 backend pid: 5156 statement: COMMIT 2023-01-25 06:31:51.836: [unknown] pid 160820: LOG: Query message from frontend. 2023-01-25 06:31:51.836: [unknown] pid 160820: DETAIL: query: "DEALLOCATE "_KEYSET_0x2a425d0"" 2023-01-25 06:31:51.836: [unknown] pid 160820: LOG: statement: DEALLOCATE "_KEYSET_0x2a425d0" 2023-01-25 06:31:51.836: [unknown] pid 160820: LOG: DB node id: 0 backend pid: 5156 statement: DEALLOCATE "_KEYSET_0x2a425d0" 2023-01-25 06:31:51.837: [unknown] pid 160820: LOG: DB node id: 1 backend pid: 54644 statement: DEALLOCATE "_KEYSET_0x2a425d0" 2023-01-25 06:31:51.838: [unknown] pid 160820: LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 54644 statement: "DEALLOCATE "_KEYSET_0x2a425d0"" message: "prepared statement "_KEYSET_0x2a425d0" does not exist" 2023-01-25 06:31:51.838: [unknown] pid 160820: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-01-25 06:31:51.838: [unknown] pid 160820: FATAL: failed to read kind from backend 2023-01-25 06:31:51.838: [unknown] pid 160820: DETAIL: kind mismatch among backends. Possible last query was: "DEALLOCATE "_KEYSET_0x2a425d0"" kind details are: 0[C] 1[E: prepared statement "_KEYSET_0x2a425d0" does not exist] 2023-01-25 06:31:51.838: [unknown] pid 160820: HINT: check data consistency among db nodes 2023-01-25 06:31:51.842: main pid 160738: LOG: child process with pid: 160820 exits with status 512 2023-01-25 06:31:51.843: main pid 160738: LOG: fork a new child process with pid: 164884 =================== Expanded display is on. postgres=# show pool_nodes; -[ RECORD 1 ]----------+-------------------- node_id | 0 hostname | ******* port | 5432 status | up pg_status | up lb_weight | 0.000000 role | primary pg_role | primary select_cnt | 115 load_balance_node | false replication_delay | 0 replication_state | replication_sync_state | last_status_change | 2023-01-25 06:20:34 -[ RECORD 2 ]----------+-------------------- node_id | 1 hostname | ****** port | 5432 status | up pg_status | up lb_weight | 1.000000 role | standby pg_role | standby select_cnt | 5 load_balance_node | true replication_delay | 0 replication_state | replication_sync_state | last_status_change | 2023-01-25 06:20:34 |
|
|
This is error from app: 2023-01-25T10:31:52.105027 32741[32741] 0000000000000000 [DM_OBJ_MGR_E_SAVE_FAIL]error: "save failed for object with handle 200003e8800000ff of type dmi_sequence: table on which save failed was dmi_sequence_s; error from database system was STATE=08S01, CODE=35, MSG=SQLExecDirect unable due to the connection lost |
|
|
Can you find "_KEYSET_0x2a425d0" in the pgpool log file other than DEALLOCATE? I need to know what SQL statement has created the named statement _KEYSET_0x2a425d0". |
|
|
The show pool_nodes output indicates that there's misconfiguration or lacking of setting in the pgpool.conf. Have you specified backend_application_name0 etc. in the pgpool.conf? I suspect this because replication_state and replication_sync_state column is empty. Expanded display is on. postgres=# show pool_nodes; -[ RECORD 1 ]----------+-------------------- node_id | 0 : : replication_state | replication_sync_state | |
|
|
> Can you find "_KEYSET_0x2a425d0" in the pgpool log file other than DEALLOCATE? I need to know what SQL statement has created the named statement _KEYSET_0x2a425d0". 2023-01-25 06:31:51.825: [unknown] pid 160820: DETAIL: query: "RELEASE _EXEC_SVP_0x297e630;SAVEPOINT _EXEC_SVP_0x297e630;PREPARE "_KEYSET_0x2a425d0"(tid,tid) as SELECT * , "ctid" FROM dmi_vstamp_s M_ where ctid in ($1,$2)" |
|
|
> Have you specified backend_application_name0 etc. in the pgpool.conf? No, I haven't set this parameter |
|
|
> show pool_nodes is okey after pcp_attach_node..but pgpool not do it automatically...maybe this is bug. This is not a bug but an expected behavior. However you can change this by using auto_failback. See the manual for more details. https://www.pgpool.net/docs/43/en/html/runtime-config-failover.html#RUNTIME-CONFIG-FAILOVER-SETTINGS Note that that in order to work this feature, you need to specify backend_application_name0 etc. |
|
|
I see the cause. > 2023-01-25 06:31:51.825: [unknown] pid 160820: DETAIL: query: "RELEASE _EXEC_SVP_0x297e630;SAVEPOINT _EXEC_SVP_0x297e630;PREPARE "_KEYSET_0x2a425d0"(tid,tid) as SELECT * , "ctid" FROM dmi_vstamp_s M_ where ctid in ($1,$2)" The application seem to use multi-statement queries (multiple queries combined with ";" in single line). Pgpool-II does not support it. https://www.pgpool.net/docs/43/en/html/restrictions.html |
|
|
Thank you very much for your support, t-ishii ! |
|
|
> The application seem to use multi-statement queries (multiple queries combined with ";" in single line). Pgpool-II does not support it. Load Balancing I found this: Multi-statement queries (multiple SQL commands on single line) are always sent to primary node (in streaming replication mode) or main node (in other modes). Usually Pgpool-II dispatch query to appropriate node, but it's not applied to multi-statement queries. ===== What does it mean... "sent to primary, but not support by dispatch" ? |
|
|
Multi-statement queries are always sent to primary node. Never load balanced. |
|
|
But, why this query sent to the backend node 1(standby) : Error or notice message from backend: : DB node id: 1 backend pid: 54644 statement: "DEALLOCATE "_KEYSET_0x2a425d0"" message: "prepared statement "_KEYSET_0x2a425d0" does not exist"? |
|
|
Good question. I suspect it's a bug of Pgpool-II in case when the prepared statement was created by multi-statement query. |
|
|
Our app uses prepare-statemenst, but why pgpool sent that query to standby too..if it is a bug, maybe exist a patch for this issue? |
|
|
No, the patch does not exist because no one has investigated this yet. |
|
|
I came up with attached patch for this issue and doing tests for now. If they are ok, will be included in the next minor releases (supposed to be released in May). |
|
|
This is the patch. deallocate.patch (3,923 bytes)
diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c
index 2cf51326..70be819e 100644
--- a/src/context/pool_query_context.c
+++ b/src/context/pool_query_context.c
@@ -4,7 +4,7 @@
* pgpool: a language independent connection pool server for PostgreSQL
* written by Tatsuo Ishii
*
- * Copyright (c) 2003-2020 PgPool Global Development Group
+ * Copyright (c) 2003-2023 PgPool Global Development Group
*
* Permission to use, copy, modify, and distribute this software and
* its documentation for any purpose and without fee is hereby
@@ -756,24 +756,9 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
}
/*
- * EXECUTE?
+ * DEALLOCATE or EXECUTE?
*/
- if (IsA(node, ExecuteStmt))
- {
- POOL_SENT_MESSAGE *msg;
-
- msg = pool_get_sent_message('Q', ((ExecuteStmt *) node)->name, POOL_SENT_MESSAGE_CREATED);
- if (!msg)
- msg = pool_get_sent_message('P', ((ExecuteStmt *) node)->name, POOL_SENT_MESSAGE_CREATED);
- if (msg)
- pool_copy_prep_where(msg->query_context->where_to_send,
- query_context->where_to_send);
- }
-
- /*
- * DEALLOCATE?
- */
- else if (IsA(node, DeallocateStmt))
+ if (IsA(node, DeallocateStmt) || IsA(node, ExecuteStmt))
{
where_to_send_deallocate(query_context, node);
}
@@ -1520,26 +1505,55 @@ static POOL_DEST send_to_where(Node *node, char *query)
return POOL_PRIMARY;
}
+/*
+ * Decide where to send given message.
+ * "node" must be a parse tree of either DEALLOCATE or EXECUTE.
+ */
static
void
where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node)
{
- DeallocateStmt *d = (DeallocateStmt *) node;
+ DeallocateStmt *d = NULL;
+ ExecuteStmt *e = NULL;
+ char *name;
POOL_SENT_MESSAGE *msg;
+ if (IsA(node, DeallocateStmt))
+ {
+ d = (DeallocateStmt *) node;
+ name = d->name;
+ }
+ else if (IsA(node, ExecuteStmt))
+ {
+ e = (ExecuteStmt *) node;
+ name = e->name;
+ }
+ else
+ {
+ ereport(ERROR,
+ (errmsg("invalid node type for where_to_send_deallocate")));
+ return;
+ }
+
/* DEALLOCATE ALL? */
- if (d->name == NULL)
+ if (d && (name == NULL))
{
+ /* send to all backend node */
pool_setall_node_to_be_sent(query_context);
+ return;
}
+
+ /* ordinary DEALLOCATE or EXECUTE */
else
{
- msg = pool_get_sent_message('Q', d->name, POOL_SENT_MESSAGE_CREATED);
+ /* check if message was created by SQL PREPARE */
+ msg = pool_get_sent_message('Q', name, POOL_SENT_MESSAGE_CREATED);
if (!msg)
- msg = pool_get_sent_message('P', d->name, POOL_SENT_MESSAGE_CREATED);
+ /* message may be created by Parse message */
+ msg = pool_get_sent_message('P', name, POOL_SENT_MESSAGE_CREATED);
if (msg)
{
- /* Inherit same map from PREPARE or PARSE */
+ /* Inherit same map from PREPARE or Parse */
pool_copy_prep_where(msg->query_context->where_to_send,
query_context->where_to_send);
@@ -1547,8 +1561,33 @@ where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node)
query_context->load_balance_node_id = msg->query_context->load_balance_node_id;
}
else
- /* prepared statement was not found */
- pool_setall_node_to_be_sent(query_context);
+ {
+ /*
+ * prepared statement was not found.
+ * There are two cases when this could happen.
+ * (1) mistakes by client. In this case backend will return ERROR
+ * anyway.
+ * (2) previous query was issued as multi-statement query. e.g.
+ * SELECT 1\;PREPARE foo AS SELECT 1;
+ * In this case pgpool does not know anything about the prepared
+ * statement "foo".
+ */
+ if (SL_MODE)
+ {
+ /*
+ * In streaming replication or logical replication, sent to
+ * primary node only.
+ */
+ pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
+ }
+ else
+ {
+ /*
+ * In other mode, sent to all node.
+ */
+ pool_setall_node_to_be_sent(query_context);
+ }
+ }
}
}
|
|
|
Hi, t-ishii thank you for support. Can i test or apply this patch inparallel and help you for investigation.? |
|
|
I have committed slightly modified version into master and all supported branches including 4.3. If you were able to build pgpool from source code and do some testing, it would be a great help. F.Y.I. This is the commit for 4.3 stable tree. https://git.postgresql.org/gitweb/?p=pgpool2.git;a=commit;h=633bdca91638e271c59134b768e35880218d7a13 |
|
|
I have clone from this link :https://git.postgresql.org/git/pgpool2.git is this a patched version ? |
|
|
Yes. |
|
|
I have problem with source code instalation on Ubuntu: can you help: ============================================= make[2]: Entering directory '/home/pgadmin/pgpool2/src/parser' /bin/bash ../../ylwrap gram.y y.tab.c gram.c y.tab.h `echo gram.c | sed -e s/cc$/hh/ -e s/cpp$/hpp/ -e s/cxx$/hxx/ -e s/c++$/h++/ -e s/c$/h/` y.output gram.output -- yacc -d -Wno-yacc yacc: invalid option -- 'W' Usage: yacc [options] filename Options: -b file_prefix set filename prefix (default "y.") -B create a backtracking parser -d write definitions (.tab.h) -h print this help-message -H defines_file write definitions to defines_file -i write interface (y.tab.i) -g write a graphical description -l suppress #line directives -L enable position processing, e.g., "%locations" -o output_file (default ".tab.c") -p symbol_prefix set symbol prefix (default "yy") -P create a reentrant parser, e.g., "%pure-parser" -r produce separate code and table files (y.code.c) -s suppress #define's for quoted names in %token lines -t add debugging support -v write description (y.output) -V show version information and exit Long options: --defines -H --file-prefix -b --graph -g --help -h --name-prefix -p --no-lines -l --output -o --version -V make[2]: *** [Makefile:442: gram.c] Error 1 make[2]: Leaving directory '/home/pgadmin/pgpool2/src/parser' make[1]: *** [Makefile:937: all-recursive] Error 1 make[1]: Leaving directory '/home/pgadmin/pgpool2/src' make: *** [Makefile:414: all-recursive] Error 1 |
|
|
Have you installed bison? You will also need flex. |
|
|
No I havent. Have you any instructions for installing from the source with necessary components? |
|
|
> Have you any instructions for installing from the source with necessary components? Yes. https://www.pgpool.net/docs/43/en/html/install-requirements.html Also make sure that you checkout proper branch. Since you are using 4.3, I assume you need to checkout 4.3 stable tree.i.e. git checkout V4_3_STABLE |
|
|
after connecting our app we have new error like: =================================================== ool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 231784 statement: "SAVEPOINT _EXEC_SVP_0x4231640" message: "SAVEPOINT can only be used in transaction blocks" 2023-02-01 14:24:46.643: [unknown] pid 155721: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-02-01 14:24:46.643: [unknown] pid 155721: FATAL: failed to read kind from backend ======================================================================================================== error from app server : "save failed for object with handle 5f0003e8801cb92b of type dm_audittrail: table on which save failed was dmi_object_type; error from database system was STATE=HY000, CODE=8, MSG=internal SAVEPOINT failed |
|
|
DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x4231640" kind details are: 0[C] 1[E: SAVEPOINT can only be used in transaction blocks] |
|
|
How did you start transaction before the error? You should be able to find by searching "BEGIN" in the pgpool.log. |
|
|
Just start the application and connect to pgpool. This is output from pgpool.log: 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: Query message from frontend. 2023-02-01 14:27:33.913: [unknown] pid 164243: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x422c820" 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: DB node id: 0 backend pid: 31201 statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.914: [unknown] pid 164243: LOG: DB node id: 1 backend pid: 232104 statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.915: [unknown] pid 164243: LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 232104 statement: "SAVEPOINT _EXEC_SVP_0x422c820" message: "SAVEPOINT can only be used in transaction blocks" 2023-02-01 14:27:33.915: [unknown] pid 164243: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-02-01 14:27:33.915: [unknown] pid 164243: FATAL: failed to read kind from backend 2023-02-01 14:27:33.915: [unknown] pid 164243: DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x422c820" kind details are: 0[C] 1[E: SAVEPOINT can only be used in transaction blocks] 2023-02-01 14:27:33.915: [unknown] pid 164243: HINT: check data consistency among db nodes 2023-02-01 14:27:33.920: main pid 155649: LOG: child process with pid: 164243 exits with status 512 2023-02-01 14:27:33.920: [unknown] pid 162944: LOG: Query message from frontend. |
|
|
Can you share a little bit previous log? There should be "BEGIN" query before. I also suspect that the BEGIN was issued as multi-statement query. |
|
|
2023-02-01 14:27:33.909: [unknown] pid 162944: LOG: Sync message from frontend. 2023-02-01 14:27:33.910: [unknown] pid 164243: LOG: Query message from frontend. 2023-02-01 14:27:33.910: [unknown] pid 164243: DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801cb9ea','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-01 14:27:33.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*******','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e45ea','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-01 14:27:33.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0)" 2023-02-01 14:27:33.911: [unknown] pid 164243: LOG: statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801cb9ea','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-01 14:27:33.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;******','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e45ea','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-01 14:27:33.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-01 14:27:33.911: [unknown] pid 164243: LOG: DB node id: 0 backend pid: 31201 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801cb9ea','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-01 14:27:33.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*******','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e45ea','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-01 14:27:33.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: Query message from frontend. 2023-02-01 14:27:33.913: [unknown] pid 164243: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x422c820" 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.913: [unknown] pid 164243: LOG: DB node id: 0 backend pid: 31201 statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.914: [unknown] pid 164243: LOG: DB node id: 1 backend pid: 232104 statement: SAVEPOINT _EXEC_SVP_0x422c820 2023-02-01 14:27:33.915: [unknown] pid 164243: LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 232104 statement: "SAVEPOINT _EXEC_SVP_0x422c820" message: "SAVEPOINT can only be used in transaction blocks" 2023-02-01 14:27:33.915: [unknown] pid 164243: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-02-01 14:27:33.915: [unknown] pid 164243: FATAL: failed to read kind from backend 2023-02-01 14:27:33.915: [unknown] pid 164243: DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x422c820" kind details are: 0[C] 1[E: SAVEPOINT can only be used in transaction blocks] |
|
|
> 2023-02-01 14:27:33.910: [unknown] pid 164243: DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801cb9ea','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-01 14:27:33.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*******','OS External(checkProgram=/content/documentum So my guess was correct: multi-statement query was used. With the fix, any multi-statement query is sent to primary but SAVEPOINT is sent to all backend. This is the reason why you have new error. Let me consider if we can fix this. |
|
|
hi, any update for this issue? |
|
|
It's harder than I thought. Please allow me to spend more time. |
|
|
Hi t-ishii, any updates ? |
|
|
I finally pushed a fix for this. This is the commit for 4.3 stable tree. https://git.postgresql.org/gitweb/?p=pgpool2.git;a=commit;h=b7acec8d621e3a8659a836cb7e1f13d6776e4fd6 |
|
|
Hi, can i reinstall from source and check? |
|
|
Hi, i have reinstalled pgpool with your fix, application started successefully ...but we have new error when operate: 2023-02-15 14:29:10.100: [unknown] pid 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350" 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: DB node id: 1 backend pid: 737420 statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.101: [unknown] pid 527810: LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 737420 statement: "SAVEPOINT _EXEC_SVP_0x3a93350" message: "SAVEPOINT can only be used in transaction blocks" 2023-02-15 14:29:10.101: [unknown] pid 527810: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-02-15 14:29:10.102: [unknown] pid 527810: FATAL: failed to read kind from backend 2023-02-15 14:29:10.102: [unknown] pid 527810: DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x3a93350" kind details are: 0[C]1[E: SAVEPOINT can only be used in transaction blocks] 2023-02-15 14:29:10.102: [unknown] pid 527810: HINT: check data consistency among db nodes 2023-02-15 14:29:10.107: main pid 526391: LOG: reaper handler 2023-02-15 14:29:10.109: main pid 526391: LOG: reaper handler: exiting normally ============================================= log from app: Wed Feb 15 18:28:30 2023 [ERROR] [AGENTEXEC 80037] Detected during program initialization: Command Failed: connect,SESDA.SESDA,dmadmin,'',,,try_native_first, status: 0, with error message [DM_OBJ_MGR_E_SAVE_FAIL]error: "save failed for object with handle 5f0003e8801d892a of type dm_audittrail: table on which save failed was dmi_object_type; error from database system was STATE=HY000, CODE=8, MSG=internal SAVEPOINT failed |
|
|
Strange. I have tested following case successfully. t-ishii$ psql -p 11000 test psql (15.2) Type "help" for help. test=# show pool_version; pool_version -------------- (0 rows) pool_version ----------------------- 4.3.5 (tamahomeboshi) (1 row) test=# begin\;select 1; BEGIN ?column? ---------- 1 (1 row) test=*# savepoint foo; SAVEPOINT test=*# end; COMMIT Please try the case above on your side. If that's fine, can you share the log line which started the transaction when you saw the error? If so your case must be different from what I have been working on. |
|
|
postgres@pgpool:~$ psql -h localhost -p 9999 psql (13.9 (Ubuntu 13.9-1.pgdg22.04+1), server 13.7) Type "help" for help. postgres=# show pool_version; pool_version ------------------------- 4.5devel (hotooriboshi) (1 row) postgres=# begin\;select 1; ?column? ---------- 1 (1 row) postgres=*# savepoint foo; SAVEPOINT postgres=*# end; COMMIT This is the output? is it okay? pool_version is 4.5devel (hotooriboshi). Maybe my version is wrong? |
|
|
The test result looks good to me. Please share the log line including "BEGIN". > This is the output? is it okay? pool_version is 4.5devel (hotooriboshi). Maybe my version is wrong? Yeah, 4.5devel is under development version. |
|
|
DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d898c','dm_connect','System Unspeci fic', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000 000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d89','110003e880000102' ,'dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:30:12.000' , 2,'260003e 880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0)" 2023-02-15 14:30:12.351: [unknown] pid 526407: LOG: statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d898c','dm_connect','System Unspeci fic', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000 000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d89','110003e880000102' ,'dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:30:12.000' , 2,'260003e 880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:30:12.351: [unknown] pid 526407: LOG: DB node id: 0 backend pid: 30500 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d8 98c','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ', 'snmesdspsqlctsindxp01;******','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000', '0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010 003e8800e6d89','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-0 2-15 14:30:12.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) ======================================== >Yeah, 4.5devel is under development version Correct version? |
|
|
> Please share the log line including "BEGIN". I meant when you saw the SAVEPOINT error using your application. |
|
|
> Correct version? You should not use it production. If you want to use 4.3.x please do following: git checkout V4_3_STABLE git pull |
|
|
>I meant when you saw the SAVEPOINT error using your application. What do you mean...i shared log line with "begin" Am i using correct version? 4.5devel |
|
|
> git checkout V4_3_STABLE > git pull okey, i will reinstall and check again |
|
|
Ok, so the error happened in following sequence: BEGIN\;INSERT INTO ... SAVEPOINT ... I tried following but failed to reproduce the error by using psql: CREATE TABLE t1(i int); BEGIN\;INSERT INTO t1 VALUES(1); SAVEPOINT foo; I expected the same kind of error you had. Do you have any idea how to reproduce your error? |
|
|
>I expected the same kind of error you had. This is mean that your patch in not solve our problem? Or this is another issue? |
|
|
> 2023-02-15 14:29:10.100: [unknown] pid 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350" Can you share all the log of pid 527810? There must be something between BEGIN... and SAVEPOINT. I suspect that is the key to reproduce the error. |
|
|
> This is mean that your patch in not solve our problem? Yes, I expected my patch solves your problem. > Or this is another issue? Maybe. |
|
|
> Can you share all the log of pid 527810? There must be something between BEGIN... and SAVEPOINT. I suspect that is the key to reproduce the error. This is several output from log with pid pid 527810: LOG: Query message from frontend pid 527810: DETAIL: query: "SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation" pid 527810: LOG: statement: SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SELECT version() 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.010: [unknown] pid 527810: DETAIL: query: "select oid, typbasetype from pg_type where typname = 'lo'" 2023-02-15 14:29:10.010: [unknown] pid 527810: LOG: statement: select oid, typbasetype from pg_type where typname = 'lo' 2023-02-15 14:29:10.012: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SELECT count(*) FROM pg_class AS c, pg_namespace AS n WHERE c.oid = pg_catalog.to_regclass('"pg_type"') AND c.relnamespace = n.oid AND n.nspname = 'pg_catalog' 2023-02-15 14:29:10.012: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: select oid, typbasetype from pg_type where typname = 'lo'n 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid();" 2023-02-15 14:29:10.021: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid(); 2023-02-15 14:29:10.021: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid(); pid 527810: DETAIL: statement: "", query: "select r_object_id, resync_needed from dmi_dd_common_info_s where type_name = $1 and business_policy_id = $2 and state_name = $3 and nls_key = $4 and r_object_id like '69%'" 2023-02-15 14:29:10.026: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: Parse: select r_object_id, resync_needed from dmi_dd_common_info_s where type_name = $1 and business_policy_id = $2 and state_name = $3 and nls_key = $4 and r_object_id like '69%' 2023-02-15 14:29:10.026: [unknown] pid 527810: LOG: Bind message from frontend. 2023-02-15 14:29:10.026: [unknown] pid 527810: DETAIL: portal: "", statement: "" pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.100: [unknown] pid 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350" 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.006: [unknown] pid 527810: LOG: statement: SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SELECT version() 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation |
|
|
Is that all? There's no statement "BEGIN", meaning all the queries did not run in an explicit transaction. From the error message: > 2023-02-15 14:29:10.102: [unknown] pid 527810: DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x3a93350" kind details are: 0[C]1[E: SAVEPOINT can only be used in transaction blocks] At least backend 0 was running in an explicit transaction. |
|
|
>Is that all? There's no statement "BEGIN", meaning all the queries did not run in an explicit transaction. DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d898c','dm_connect','System Unspeci fic', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000 000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d89','110003e880000102' ,'dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:30:12.000' , 2,'260003e 880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0)" 2023-02-15 14:30:12.351: [unknown] pid 526407: LOG: statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d898c','dm_connect','System Unspeci fic', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000 000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d89','110003e880000102' ,'dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:30:12.000' , 2,'260003e 880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:30:12.351: [unknown] pid 526407: LOG: DB node id: 0 backend pid: 30500 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d8 98c','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:30:13.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ', 'snmesdspsqlctsindxp01;******','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000', '0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010 003e8800e6d89','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-0 2-15 14:30:12.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) |
|
|
That's different log lines? I failed to see pid 527810. |
|
|
2023-02-15 14:29:10.096: [unknown] pid 527810: DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894d','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:10.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d4a','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:10.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0)" 2023-02-15 14:29:10.096: [unknown] pid 527810: LOG: statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894d','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:10.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d4a','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:10.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:29:10.096: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894d','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:10.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d4a','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:10.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.100: [unknown] pid 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350" 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a9335 |
|
|
this is same log with query |
|
|
2023-02-15 14:29:08.602: [unknown] pid 527943: DETAIL: query: "BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894b','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:09.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;172.16.208.114','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d48','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:09.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0)" 2023-02-15 14:29:08.602: [unknown] pid 527943: LOG: statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894b','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:09.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;172.16.208.114','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d48','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:09.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:29:08.602: [unknown] pid 527943: LOG: DB node id: 0 backend pid: 30400 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894b','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:09.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;172.16.208.114','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d48','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:09.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) |
|
|
same insert with diff pid |
|
|
I had hard time to reconstruct and sort out by time sequence for the log lines of pid 527810. So far this is the result. I hope I did not miss anything. 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SELECT version() 2023-02-15 14:29:10.007: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SET DateStyle = 'ISO';SET extra_float_digits = 2;show transaction_isolation pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.010: [unknown] pid 527810: DETAIL: query: "select oid, typbasetype from pg_type where typname = 'lo'" 2023-02-15 14:29:10.010: [unknown] pid 527810: LOG: statement: select oid, typbasetype from pg_type where typname = 'lo' 2023-02-15 14:29:10.012: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SELECT count(*) FROM pg_class AS c, pg_namespace AS n WHERE c.oid = pg_catalog.to_regclass('"pg_type"') AND c.relnamespace = n.oid AND n.nspname = 'pg_catalog' 2023-02-15 14:29:10.012: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: select oid, typbasetype from pg_type where typname = 'lo'n 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid();" 2023-02-15 14:29:10.021: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid(); 2023-02-15 14:29:10.021: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid(); pid 527810: DETAIL: statement: "", query: "select r_object_id, resync_needed from dmi_dd_common_info_s where type_name = $1 and business_policy_id = $2 and state_name = $3 and nls_key = $4 and r_object_id like '69%'" 2023-02-15 14:29:10.026: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: Parse: select r_object_id, resync_needed from dmi_dd_common_info_s where type_name = $1 and business_policy_id = $2 and state_name = $3 and nls_key = $4 and r_object_id like '69%' 2023-02-15 14:29:10.026: [unknown] pid 527810: LOG: Bind message from frontend. 2023-02-15 14:29:10.026: [unknown] pid 527810: DETAIL: portal: "", statement: "" pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.096: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: BEGIN; INSERT INTO dm_audittrail_s VALUES('5f0003e8801d894d','dm_connect','System Unspecific', 1,'dmadmin','110003e880000102', timestamp '2023-02-15 14:29:10.000' ,'dfc_eXW2sONQCHT6MCDLrcVuo5EF96Ea;',' ',' ','snmesdspsqlctsindxp01;*****','OS External(checkProgram=/content/documentum/dba/dm_check_password_cs16.4)','0000000000000000','0000000000000000','0000000000000000','0000000000000000','0000000000000000','110003e880000102','dmadmin',' ','dm_user','Logon','0000000000000000',' ','0000000000000000','010003e8800e6d4a','110003e880000102','dmadmin','dm_450003e880000101','dmadmin',' ',' ',' ','0000000000000000',' ', 4,'snmesdspsqlcont01', timestamp '2023-02-15 14:29:10.000' , 2,'260003e880000d02', 0, 7,' ','0000000000000000', nextval('dm_audittrail_s_r_object_sequence_seq') , 0, 0) 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: Query message from frontend. 2023-02-15 14:29:10.100: [unknown] pid 527810: DETAIL: query: "SAVEPOINT _EXEC_SVP_0x3a93350" 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.100: [unknown] pid 527810: LOG: DB node id: 1 backend pid: 737420 statement: SAVEPOINT _EXEC_SVP_0x3a93350 2023-02-15 14:29:10.101: [unknown] pid 527810: LOG: pool_send_and_wait: Error or notice message from backend: : DB node id: 1 backend pid: 737420 statement: "SAVEPOINT _EXEC_SVP_0x3a93350" message: "SAVEPOINT can only be used in transaction blocks" 2023-02-15 14:29:10.101: [unknown] pid 527810: WARNING: packet kind of backend 1 ['E'] does not match with main/majority nodes packet kind ['C'] 2023-02-15 14:29:10.102: [unknown] pid 527810: FATAL: failed to read kind from backend 2023-02-15 14:29:10.102: [unknown] pid 527810: DETAIL: kind mismatch among backends. Possible last query was: "SAVEPOINT _EXEC_SVP_0x3a93350" kind details are: 0[C]1[E: SAVEPOINT can only be used in transaction blocks] 2023-02-15 14:29:10.102: [unknown] pid 527810: HINT: check data consistency among db nodes 2023-02-15 14:29:10.107: main pid 526391: LOG: reaper handler 2023-02-15 14:29:10.109: main pid 526391: LOG: reaper handler: exiting normally |
|
|
How can i send to you pgpool logfile with size 15Mb? |
|
|
I want to send to you the whole pgpool log. How can I do it? |
|
|
> How can i send to you pgpool logfile with size 15Mb? No, you can't. |
|
|
are the logs that I already sent enough for you? |
|
|
Not sure at the moment. I need to investigate the log. It will take for a while. |
|
|
So far the log lines I reconstructed does not make sense. > 2023-02-15 14:29:10.021: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: SAVEPOINT _EXEC_SVP_0x3a93350;select pg_backend_pid(); SAVEPOINT suceeded. This means there must be "BEGIN" (or other transaction start statement) was issued before 14:29:10.021, but I failed to find it. Also, > 2023-02-15 14:29:10.096: [unknown] pid 527810: LOG: DB node id: 0 backend pid: 30410 statement: BEGIN; INSERT INTO dm_audittrail_s ... This means the former transaction was closed before 14:29:10.096 but I failed to find it. So I got stuck. Can you grep 527810 from the 15G log file? If it's smaller than 15G, you should be able to upload. |
|
|
I Have 15mb log, not 15G, how can i upload or send it to you? |
|
|
Sorry, the upper limit is 15M, not 15G. Is the 15M file is the result of grep 527810? |
|
|
Hello, this is whole log. |
|
|
Thanks. |
|
|
I succeeded in creating a minimal test case by studying your log file. BEGIN\;INSERT INTO t1 VALUES(1); SAVEPOINT foo; COMMIT; The problem is, BEGIN is followed by INSERT. Pgpool-II optimizes INSERT and UPDATE query by using special SQL parser to reduce time to parse very long queries. If it finds INSERT/UPDATE, it quickly returns with very simple parse tree, which could reduce the parsing time However as a side effect, the returned parse tree only includes the simplified INSERT/UPDATE statement, thus the fact that it was included in a multi-statement query is lost. So pgpool sends SAVEPOINT to both primary and standy, which fails because a transaction has not been started on standby server. We could "fix" this without using the shortcut but then it will take longer time to parse lengthy INSERT/UPDATE queries. |
|
|
Hi, lets try to fix it and check again how longer it will take |
|
|
Actually it was already tested. https://www.pgpool.net/pipermail/pgpool-hackers/2019-February/003242.html According the test, the performance became 655% better than before. That means if pgpool doesn't use the feature, INSERT/UPDATE will become 6 times slower than now in certain test cases. |
|
|
What will be your recomendation on our case(issue)? We can’t slow down our application(insert/update) |
|
|
Change you application so that it does not use "BGIN;INSERT...". Instead use: BEGIN; INSERT... Is it possible? |
|
|
I think it not possible but i will discuss about it with app developers tomorrow. |
|
|
I will discuss with other Pgpool-II developers tomorrow to check if any solution on the Pgpool-II side. |
|
|
Ok, I made a patch for Pgpool-II. This patch checks the transaction state on standby node so that SAVEPOINT is not sent to standby if the standby node has not started a transaction yet. Attached patch should be able to be applied to 4.3 by: $ git apply /some/where/fix-multi-statement.patch Then you need to recompile and re-install Pgpool-II. fix-multi-statement.patch (1,624 bytes)
diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c
index 83c0a2b2..c200ad52 100644
--- a/src/context/pool_query_context.c
+++ b/src/context/pool_query_context.c
@@ -802,6 +802,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
POOL_CONNECTION *frontend;
POOL_CONNECTION_POOL *backend;
bool is_commit;
+ bool is_savepoint;
bool is_begin_read_write;
int i;
int len;
@@ -811,6 +812,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
frontend = session_context->frontend;
backend = session_context->backend;
is_commit = is_commit_or_rollback_query(query_context->parse_tree);
+ is_savepoint = is_savepoint_query(query_context->parse_tree);
is_begin_read_write = false;
len = 0;
string = NULL;
@@ -851,9 +853,12 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
/*
* If we are in streaming replication mode or logical replication mode,
- * we do not send COMMIT/ABORT to standbys if it's in I (idle) state.
+ * we do not send COMMIT/ABORT/SAVEPOINT etc. to standbys if it's in I (idle) state.
+ * This could happen when a multi-statement query includes INSERT/UPDATE. Because
+ * the multi-statement is transformed into a single-statement query by the minimal-parser and
+ * Pgpool-II does not recognize a transaction is started by a multi-statement query.
*/
- if (is_commit && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
+ if ((is_commit || is_savepoint) && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
{
pool_unset_node_to_be_sent(query_context, i);
continue;
|
|
|
Hi, i have applied patch and now it woking on test mode.. I will back to you after full testing. |
|
|
App is up and working and not test fully yet. But we see error from app log like: [DM_OBJ_MGR_E_CURSOR_FAIL]error: "In operation Exec an attempt to create cursor failed; query was: ' SELECT * FROM dm_docbase_config_rv DF_ , dm_docbase_config_sv CF_ WHERE (CF_.r_object_id=:dmb_handle AND CF_.r_object_id=DF_.r_object_id) ORDER BY DF_.r_object_id,DF_.i_position'; error from database system was: STATE=08S01, CODE=35, MSG=SQLExecDirect unable due to the connection lost |
|
|
Please share pgpool.log. |
|
|
pgpool.log is clear no errors and warnings |
|
|
|
|
|
I don't see any statement like "SELECT * FROM dm_docbase_config_rv DF_ ..." in the log. Does your application really connect to the pgpool with log file you uploaded? |
|
|
Yes, app connect through pgpool. |
|
|
I have no idea what's wrong. |
|
|
I have updated fix-multi-statement.patch. It did not consider the case when extended queries are used. Attached is the v2 patch. I am going to commit/push to Pgpool-II repositopry. I don't know if this is related to your issue though. fix-multi-statement-v2.patch (3,178 bytes)
diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c
index 83c0a2b2..ebc5c1d4 100644
--- a/src/context/pool_query_context.c
+++ b/src/context/pool_query_context.c
@@ -802,6 +802,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
POOL_CONNECTION *frontend;
POOL_CONNECTION_POOL *backend;
bool is_commit;
+ bool is_savepoint;
bool is_begin_read_write;
int i;
int len;
@@ -811,6 +812,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
frontend = session_context->frontend;
backend = session_context->backend;
is_commit = is_commit_or_rollback_query(query_context->parse_tree);
+ is_savepoint = is_savepoint_query(query_context->parse_tree);
is_begin_read_write = false;
len = 0;
string = NULL;
@@ -851,9 +853,12 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
/*
* If we are in streaming replication mode or logical replication mode,
- * we do not send COMMIT/ABORT to standbys if it's in I (idle) state.
+ * we do not send COMMIT/ABORT/SAVEPOINT etc. to standbys if it's in I (idle) state.
+ * This could happen when a multi-statement query includes INSERT/UPDATE. Because
+ * the multi-statement is transformed into a single-statement query by the minimal-parser and
+ * Pgpool-II does not recognize a transaction is started by a multi-statement query.
*/
- if (is_commit && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
+ if ((is_commit || is_savepoint) && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
{
pool_unset_node_to_be_sent(query_context, i);
continue;
@@ -952,6 +957,7 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
POOL_CONNECTION *frontend;
POOL_CONNECTION_POOL *backend;
bool is_commit;
+ bool is_savepoint;
bool is_begin_read_write;
int i;
int str_len;
@@ -963,6 +969,7 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
frontend = session_context->frontend;
backend = session_context->backend;
is_commit = is_commit_or_rollback_query(query_context->parse_tree);
+ is_savepoint = is_savepoint_query(query_context->parse_tree);
is_begin_read_write = false;
str_len = 0;
rewritten_len = 0;
@@ -1000,10 +1007,13 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
continue;
/*
- * If in reset context, we send COMMIT/ABORT to nodes those are not in
- * I(idle) state. This will ensure that transactions are closed.
+ * If we are in streaming replication mode or logical replication mode,
+ * we do not send COMMIT/ABORT/SAVEPOINT etc. to standbys if it's in I (idle) state.
+ * This could happen when a multi-statement query includes INSERT/UPDATE. Because
+ * the multi-statement is transformed into a single-statement query by the minimal-parser and
+ * Pgpool-II does not recognize a transaction is started by a multi-statement query.
*/
- if (is_commit && session_context->reset_context && TSTATE(backend, i) == 'I')
+ if ((is_commit || is_savepoint) && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
{
pool_unset_node_to_be_sent(query_context, i);
continue;
|
|
|
Thank you for patch. Am I need again reinstall pgpool and apply the patch? |
|
|
Yes, the v2 patch should be applied to the source code. You need to run "git reset" before applying the patch so that previous patch is reverted. $ git reset --hard $ git apply /some/where/fix-multi-statement-v2.patch And build pgpool. |
|
|
HI, after apply new patch, we have same failure from app. But all is working fine. Error not distrub our app. After long tests, I noticed that PGPOOL does not correctly see the status of the Standby database and all requests go to the NODE 0. I decided to add standby to PGPOOL again(pcp_node_attach), now the status is okay, but again the requests going to the NODE 0. |
|
|
Can you share the pgpool.log which includes the SELECT, plus previous BEGIN which started the transaction? |
|
|
Hi t-ishii, Has the version of pgpool we need, along with the patches, been officially published or do we still need to follow the instructions above to install pgpool? |
|
|
Actually we have decided to solve the problem using more fundamental solution. The solution does not need fix-multi-statement-v2.patch. Background: All this kind of problems are caused by DML (INSERT/UPDATE/DELETE) optimization in parser. The optimization is aiming to eliminate the parser overhead for very long SQL statements. Unfortunately as a side effect, the optimization loses information that a query string comprises multiple SQL statements. So I have imported "psqlscan" module from PostgreSQL which allows to find a query includes multiple SQL statements quickly. We first analyse a query string using psqlscan. If it's a multi-statement query, we skip the optimization and we don't lose the information regarding the multi-statement query. The fix using psqlscan has been already in the git reporitory: https://git.postgresql.org/gitweb/?p=pgpool2.git;a=commit;h=d9ad90b0be38e3e8583006fd3b780905bd06e6dd We will make minor releases comprising the fix in this month. https://pgpool.net/mediawiki/index.php/Roadmap |
|
|
Very good news. Am i need just git clone https://git.postgresql.org/gitweb/?p=pgpool2.git;a=commit;h=d9ad90b0be38e3e8583006fd3b780905bd06e6dd like this? from which branch? |
|
|
as you remember i use v4_3_STABLE branch |
|
|
No, you just do: git clone https://git.postgresql.org/git/pgpool2.git cd pgpool2 git checkout V4_3_STABLE If you already have done git clone, just do cd pgpool2 git pull |
|
|
hi, sometimes we encounter a problem like: ===== LOG: reading and processing packets 2023-05-15 11:45:51.536: [unknown] pid 2766906: DETAIL: postmaster on DB node 1 was shutdown by administrative command pid 2773484: LOG: received degenerate backend request for node_id: 0 from pid [2773484] 2023-05-15 17:13:34.371: psql pid 2773484: LOG: signal_user1_to_parent_with_reason(0) 2023-05-15 17:13:34.372: main pid 2769610: LOG: Pgpool-II parent process received SIGUSR1 2023-05-15 17:13:34.372: main pid 2769610: LOG: Pgpool-II parent process has received failover request 2023-05-15 17:13:34.372: main pid 2769610: LOG: starting degeneration. shutdown host 172.16.212.52(5432) 2023-05-15 17:13:34.384: main pid 2769610: LOG: Restart all children 2023-05-15 17:13:34.579: main pid 2769610: LOG: find_primary_node_repeatedly: waiting for finding a primary node 2023-05-15 17:13:34.598: main pid 2769610: LOG: find_primary_node: standby node is 1 2023-05-15 17:13:35.608: main pid 2769610: LOG: find_primary_node: standby node is 1 ================ And production is stuck. .. after manually reattaching the node " ./pcp_attach_node -n 0" everything works again .. why does this problem occur? |
|
|
You mean shudown DB node 0, rather than 1? I am asking because > 2023-05-15 17:13:34.598: main pid 2769610: LOG: find_primary_node: standby node is 1 Pgpool-II recognizes node 1 is alive. But if so, > .. after manually reattaching the node " ./pcp_attach_node -n 0" everything works again .. why does this problem occur? pcp_attach_node against node 0 is impossible. Can you share what you actually did? |
|
|
I saw complaints from the client and noticed that node 0 (primary) could not be found..after I decided to quickly add this node manually( attache) ...now I just want to understand the reason for such a problem. How can I check it? |
|
|
In general you should check pgpool log and PostgreSQL logs. |
|
|
How can we improve the performance of our app? After connecting the pgpool, I had to setup replication in sync mode in postgres.conf (remote_apply) In connection with this, the performance has decreased. can you give us some advice? |
|
|
Hi t-ishii, please consider my question |
|
|
This is not only related to this issue and is not a bug report. This forum is for bugs. Please send questions regarding performance to pgpool-general mailing list. |
|
|
Thank you! |
| Date Modified | Username | Field | Change |
|---|---|---|---|
| 2023-01-19 16:32 | shahmir | New Issue | |
| 2023-01-19 16:32 | shahmir | Tag Attached: error | |
| 2023-01-19 21:15 | t-ishii | Note Added: 0004187 | |
| 2023-01-19 21:34 | shahmir | Note Added: 0004188 | |
| 2023-01-19 21:34 | shahmir | File Added: pgpool.conf | |
| 2023-01-23 18:01 | shahmir | Note Added: 0004191 | |
| 2023-01-24 10:29 | t-ishii | Assigned To | => t-ishii |
| 2023-01-24 10:29 | t-ishii | Status | new => assigned |
| 2023-01-24 10:40 | t-ishii | Note Added: 0004192 | |
| 2023-01-24 10:40 | t-ishii | Status | assigned => feedback |
| 2023-01-24 10:43 | t-ishii | Note Added: 0004193 | |
| 2023-01-25 00:38 | shahmir | Note Added: 0004194 | |
| 2023-01-25 00:38 | shahmir | Status | feedback => assigned |
| 2023-01-25 14:22 | t-ishii | Note Added: 0004195 | |
| 2023-01-25 14:22 | t-ishii | Note Edited: 0004195 | |
| 2023-01-25 15:40 | shahmir | Note Added: 0004196 | |
| 2023-01-25 15:43 | shahmir | Note Added: 0004197 | |
| 2023-01-25 16:41 | t-ishii | Note Added: 0004198 | |
| 2023-01-25 16:44 | t-ishii | Note Added: 0004199 | |
| 2023-01-25 16:44 | shahmir | Note Added: 0004200 | |
| 2023-01-25 16:57 | shahmir | Note Added: 0004201 | |
| 2023-01-25 17:15 | t-ishii | Note Added: 0004202 | |
| 2023-01-25 17:26 | t-ishii | Note Added: 0004203 | |
| 2023-01-25 17:29 | shahmir | Note Added: 0004204 | |
| 2023-01-25 22:22 | shahmir | Note Added: 0004205 | |
| 2023-01-26 17:17 | t-ishii | Note Added: 0004206 | |
| 2023-01-26 17:53 | shahmir | Note Added: 0004207 | |
| 2023-01-26 18:00 | t-ishii | Note Added: 0004208 | |
| 2023-01-26 18:04 | shahmir | Note Added: 0004209 | |
| 2023-01-26 18:11 | t-ishii | Note Added: 0004210 | |
| 2023-01-31 09:38 | t-ishii | Note Added: 0004211 | |
| 2023-01-31 09:39 | t-ishii | Note Added: 0004212 | |
| 2023-01-31 09:39 | t-ishii | File Added: deallocate.patch | |
| 2023-01-31 20:09 | shahmir | Note Added: 0004215 | |
| 2023-01-31 20:32 | t-ishii | Note Added: 0004216 | |
| 2023-01-31 21:15 | shahmir | Note Added: 0004217 | |
| 2023-02-01 08:20 | t-ishii | Note Added: 0004218 | |
| 2023-02-01 16:56 | shahmir | Note Added: 0004219 | |
| 2023-02-01 19:46 | t-ishii | Note Added: 0004220 | |
| 2023-02-01 19:58 | shahmir | Note Added: 0004221 | |
| 2023-02-01 20:31 | t-ishii | Note Added: 0004222 | |
| 2023-02-01 23:27 | shahmir | Note Added: 0004223 | |
| 2023-02-01 23:30 | shahmir | Note Added: 0004224 | |
| 2023-02-02 08:32 | t-ishii | Note Added: 0004225 | |
| 2023-02-02 15:47 | shahmir | Note Added: 0004226 | |
| 2023-02-02 16:12 | t-ishii | Note Added: 0004227 | |
| 2023-02-02 16:32 | shahmir | Note Added: 0004228 | |
| 2023-02-02 16:43 | t-ishii | Note Added: 0004229 | |
| 2023-02-06 21:28 | shahmir | Note Added: 0004230 | |
| 2023-02-08 15:56 | t-ishii | Note Added: 0004231 | |
| 2023-02-14 20:06 | shahmir | Note Added: 0004236 | |
| 2023-02-15 19:00 | t-ishii | Note Added: 0004237 | |
| 2023-02-15 21:28 | shahmir | Note Added: 0004238 | |
| 2023-02-15 23:32 | shahmir | Note Added: 0004239 | |
| 2023-02-16 10:51 | t-ishii | Note Added: 0004240 | |
| 2023-02-16 10:51 | t-ishii | Status | assigned => feedback |
| 2023-02-16 10:54 | t-ishii | Note Edited: 0004240 | |
| 2023-02-16 15:19 | shahmir | Note Added: 0004241 | |
| 2023-02-16 15:19 | shahmir | Status | feedback => assigned |
| 2023-02-16 15:31 | t-ishii | Note Added: 0004242 | |
| 2023-02-16 15:35 | shahmir | Note Added: 0004243 | |
| 2023-02-16 15:43 | t-ishii | Note Added: 0004244 | |
| 2023-02-16 15:46 | t-ishii | Note Added: 0004245 | |
| 2023-02-16 15:52 | shahmir | Note Added: 0004246 | |
| 2023-02-16 15:56 | shahmir | Note Added: 0004247 | |
| 2023-02-16 15:59 | t-ishii | Note Added: 0004248 | |
| 2023-02-16 16:00 | t-ishii | Note Edited: 0004248 | |
| 2023-02-16 16:03 | shahmir | Note Added: 0004249 | |
| 2023-02-16 16:12 | t-ishii | Note Added: 0004250 | |
| 2023-02-16 16:14 | t-ishii | Note Added: 0004251 | |
| 2023-02-16 16:37 | shahmir | Note Added: 0004252 | |
| 2023-02-16 18:16 | t-ishii | Note Added: 0004253 | |
| 2023-02-16 19:14 | shahmir | Note Added: 0004254 | |
| 2023-02-16 21:41 | t-ishii | Note Added: 0004255 | |
| 2023-02-16 21:46 | shahmir | Note Added: 0004256 | |
| 2023-02-16 21:46 | shahmir | Note Added: 0004257 | |
| 2023-02-16 21:48 | shahmir | Note Added: 0004258 | |
| 2023-02-16 21:48 | shahmir | Note Added: 0004259 | |
| 2023-02-16 22:28 | t-ishii | Note Added: 0004260 | |
| 2023-02-16 22:29 | t-ishii | Note Edited: 0004260 | |
| 2023-02-16 22:31 | t-ishii | Note Edited: 0004260 | |
| 2023-02-16 22:58 | shahmir | Note Added: 0004261 | |
| 2023-02-17 16:51 | shahmir | Note Added: 0004262 | |
| 2023-02-17 16:56 | t-ishii | Note Added: 0004263 | |
| 2023-02-17 16:59 | shahmir | Note Added: 0004264 | |
| 2023-02-17 17:04 | t-ishii | Note Added: 0004265 | |
| 2023-02-18 16:08 | t-ishii | Note Edited: 0004260 | |
| 2023-02-18 16:16 | t-ishii | Note Added: 0004266 | |
| 2023-02-18 16:22 | shahmir | Note Added: 0004267 | |
| 2023-02-18 16:24 | t-ishii | Note Added: 0004268 | |
| 2023-02-18 17:51 | shahmir | Note Added: 0004269 | |
| 2023-02-18 17:51 | shahmir | File Added: pgpool-2023-02-15_142451.log | |
| 2023-02-18 20:47 | t-ishii | Note Added: 0004270 | |
| 2023-02-19 16:42 | t-ishii | Note Added: 0004271 | |
| 2023-02-19 16:58 | shahmir | Note Added: 0004272 | |
| 2023-02-19 17:49 | t-ishii | Note Added: 0004273 | |
| 2023-02-19 17:55 | shahmir | Note Added: 0004274 | |
| 2023-02-19 18:00 | t-ishii | Note Added: 0004275 | |
| 2023-02-19 18:13 | shahmir | Note Added: 0004276 | |
| 2023-02-19 20:26 | t-ishii | Note Added: 0004277 | |
| 2023-02-20 14:18 | t-ishii | Note Added: 0004278 | |
| 2023-02-20 14:18 | t-ishii | File Added: fix-multi-statement.patch | |
| 2023-02-21 22:22 | shahmir | Note Added: 0004283 | |
| 2023-02-22 15:31 | shahmir | Note Added: 0004287 | |
| 2023-02-22 18:47 | t-ishii | Note Added: 0004289 | |
| 2023-02-22 20:59 | shahmir | Note Added: 0004290 | |
| 2023-02-22 21:01 | shahmir | Note Added: 0004291 | |
| 2023-02-22 21:01 | shahmir | File Added: pgpool-2023-02-22_154824.log | |
| 2023-02-22 22:07 | t-ishii | Note Added: 0004292 | |
| 2023-02-22 22:15 | shahmir | Note Added: 0004293 | |
| 2023-02-23 07:43 | t-ishii | Note Added: 0004294 | |
| 2023-02-23 14:28 | t-ishii | Note Added: 0004296 | |
| 2023-02-23 14:28 | t-ishii | File Added: fix-multi-statement-v2.patch | |
| 2023-02-23 15:15 | shahmir | Note Added: 0004297 | |
| 2023-02-23 15:25 | t-ishii | Note Added: 0004298 | |
| 2023-02-27 20:35 | shahmir | Note Added: 0004299 | |
| 2023-03-01 08:54 | t-ishii | Note Added: 0004300 | |
| 2023-05-02 16:08 | shahmir | Note Added: 0004370 | |
| 2023-05-02 17:10 | t-ishii | Note Added: 0004371 | |
| 2023-05-02 18:09 | shahmir | Note Added: 0004372 | |
| 2023-05-02 19:46 | shahmir | Note Added: 0004373 | |
| 2023-05-02 19:56 | t-ishii | Note Added: 0004374 | |
| 2023-05-15 22:33 | shahmir | Note Added: 0004378 | |
| 2023-05-17 09:33 | t-ishii | Note Added: 0004388 | |
| 2023-05-17 16:22 | shahmir | Note Added: 0004389 | |
| 2023-05-17 16:58 | t-ishii | Note Added: 0004390 | |
| 2023-06-20 16:20 | shahmir | Note Added: 0004401 | |
| 2023-06-21 15:13 | shahmir | Note Added: 0004403 | |
| 2023-06-21 18:18 | t-ishii | Note Added: 0004404 | |
| 2023-06-21 19:42 | shahmir | Note Added: 0004406 |