2013-09-30 6 views
1

내가 PostgreSQL의 성능포스트 그레스,

내 서버를 조정 내 PostgreSQL의 파일을 구성한에 도움을 필요로 성능 그러나 그것은 느리고 CPU 리소스는

난에 아무 생각보다 높은 120 %입니다 방법 이 문제를 해결하기 위해 Google에서 더 많은 정보를 검색하려고했지만 충분하지 않습니다. 자동 반복 문장과 색인 생성을 시도했지만 속도가 느립니다.

내 앱은 6000 개가 넘는 레코드를 삽입하는 gps 수신기입니다. 분은 비뚤림에 개발 된 TCP 서버를 사용하여 문제가 없는데, 문제는 내가 relatime에지도에 장치를 GPS를, 내 장고 애플 리케이션에서 내 데이터베이스에 쿼리를 각각 6 초 저장 프로 시저를 사용하여 요청 마지막 위치에 대한 쿼리를하고 있지만 쿼리가 50 개 이상의 장치에 느리게 도착하고 CPU가 이상을 사용하여 시작합니다. 자원의 120 %

Django App은 postgres 데이터베이스를 직접 연결하고 pgbouncer를 사용하여 스레드 방식으로 데이터베이스에 연결하는 tcp listener 서버입니다. 충돌을 원하지 않기 때문에 pgbouncer에서 내 django 웹 앱을 사용하지 않았습니다. pgbouncer에 GPS 장치 연결

난 당신이 더 나은 성능

내 저장 프로 시저를 부착하고, 내 conf의 파일과 내 CPU, 메모리 난을 얻을에 도움이 될 수 괭이 저장 프로 시저

CREATE OR REPLACE FUNCTION gps_get_live_location (
    _imeis varchar(8) 
) 
RETURNS TABLE (
    imei varchar, 
    device_id integer, 
    date_time_process timestamp with time zone, 
    latitude double precision, 
    longitude double precision, 
    course smallint, 
    speed smallint, 
    mileage integer, 
    gps_signal smallint, 
    gsm_signal smallint, 
    alarm_status boolean, 
    gsm_status boolean, 
    vehicle_status boolean, 
    alarm_over_speed boolean, 
    other text, 
    address varchar 
) AS $func$ 
DECLARE 
    arr varchar[]; 
BEGIN 
    arr := regexp_split_to_array(_imeis, E'\\s+'); 
    FOR i IN 1..array_length(arr, 1) LOOP 
     RETURN QUERY 
     SELECT 
     gpstracking_device_tracks.imei, 
     gpstracking_device_tracks.device_id, 
     gpstracking_device_tracks.date_time_process, 
     gpstracking_device_tracks.latitude, 
     gpstracking_device_tracks.longitude, 
     gpstracking_device_tracks.course, 
     gpstracking_device_tracks.speed, 
     gpstracking_device_tracks.mileage, 
     gpstracking_device_tracks.gps_signal, 
     gpstracking_device_tracks.gsm_signal, 
     gpstracking_device_tracks.alarm_status, 
     gpstracking_device_tracks.gps_status, 
     gpstracking_device_tracks.vehicle_status, 
     gpstracking_device_tracks.alarm_over_speed, 
     gpstracking_device_tracks.other, 
     gpstracking_device_tracks.address 
     FROM gpstracking_device_tracks 
     WHERE gpstracking_device_tracks.imei = arr[i]::VARCHAR 
     AND gpstracking_device_tracks.date_time_process >= date_trunc('hour', now()) 
     AND gpstracking_device_tracks.date_time_process <= NOW() 
     ORDER BY gpstracking_device_tracks.date_time_process DESC 
     LIMIT 1; 
    END LOOP; 
    RETURN; 
END; 
$func$ 
LANGUAGE plpgsql VOLATILE SECURITY DEFINER; 

$ 고양이 적은 /etc/postgresql/9.1/main/postgresql.conf

kernel.shmmax = 6871947673 
kernel.shmall = 6871947673 
fs.file-max = 4194304 

$ 고양이 /etc/sysctl.conf에

nformation

data_directory = '/var/lib/postgresql/9.1/main'   # use data in another directory 
hba_file = '/etc/postgresql/9.1/main/pg_hba.conf'  # host-based authentication file 
ident_file = '/etc/postgresql/9.1/main/pg_ident.conf' # ident configuration file 
external_pid_file = '/var/run/postgresql/9.1-main.pid'   # write an extra PID file 
listen_addresses = 'localhost'   # what IP address(es) to listen on; 
port = 5432        # (change requires restart) 
max_connections = 80     # (change requires restart) 
superuser_reserved_connections = 3  # (change requires restart) 
unix_socket_directory = '/var/run/postgresql'   # (change requires restart) 
#unix_socket_group = ''     # (change requires restart) 
#unix_socket_permissions = 0777   # begin with 0 to use octal notation 
#bonjour = off       # advertise server via Bonjour 
#bonjour_name = ''      # defaults to the computer name 
ssl = true        # (change requires restart) 
#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH'  # allowed SSL ciphers 
#ssl_renegotiation_limit = 512MB  # amount of data between renegotiations 
#password_encryption = on 
#db_user_namespace = off 
#krb_server_keyfile = '' 
#krb_srvname = 'postgres'    # (Kerberos only) 
#krb_caseins_users = off 
#tcp_keepalives_idle = 0    # TCP_KEEPIDLE, in seconds; 
#tcp_keepalives_interval = 0   # TCP_KEEPINTVL, in seconds; 
#tcp_keepalives_count = 0    # TCP_KEEPCNT; 
# shared_buffers = 4096MB      # min 128kB 
temp_buffers = 16MB      # min 800kB 
# work_mem = 80MB        # min 64kB 
# maintenance_work_mem = 2048MB   # min 1MB 
max_stack_depth = 4MB     # min 100kB 
#max_files_per_process = 1000   # min 25 
#vacuum_cost_delay = 0ms    # 0-100 milliseconds 
#vacuum_cost_page_hit = 1    # 0-10000 credits 
#vacuum_cost_page_miss = 10    # 0-10000 credits 
#vacuum_cost_page_dirty = 20   # 0-10000 credits 
#vacuum_cost_limit = 200    # 1-10000 credits 
#bgwriter_delay = 200ms     # 10-10000ms between rounds 
#bgwriter_lru_maxpages = 100   # 0-1000 max buffers written/round 
#bgwriter_lru_multiplier = 2.0   # 0-10.0 multipler on buffers scanned/round 
#effective_io_concurrency = 1   # 1-1000. 0 disables prefetching 
#wal_level = minimal     # minimal, archive, or hot_standby 
#fsync = on        # turns forced synchronization on or off 
#synchronous_commit = on    # synchronization level; on, off, or local 
#wal_sync_method = fsync    # the default is the first option 
#full_page_writes = on     # recover from partial page writes 
#wal_buffers = -1      # min 32kB, -1 sets based on shared_buffers 
#wal_writer_delay = 200ms    # 1-10000 milliseconds 
#commit_delay = 0      # range 0-100000, in microseconds 
#commit_siblings = 5     # range 1-1000 
# checkpoint_segments = 64    # in logfile segments, min 1, 16MB each 
checkpoint_timeout = 5min    # range 30s-1h 
# checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 
#checkpoint_warning = 30s    # 0 disables 
#archive_mode = off    # allows archiving to be done 
#archive_command = ''   # command to use to archive a logfile segment 
#archive_timeout = 0   # force a logfile segment switch after this 
#max_wal_senders = 0   # max number of walsender processes 
#wal_sender_delay = 1s   # walsender cycle time, 1-10000 milliseconds 
#wal_keep_segments = 0   # in logfile segments, 16MB each; 0 disables 
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed 
#replication_timeout = 60s  # in milliseconds; 0 disables 
#synchronous_standby_names = '' # standby servers that provide sync rep 
#hot_standby = off      # "on" allows queries during recovery 
#max_standby_archive_delay = 30s  # max delay before canceling queries 
#max_standby_streaming_delay = 30s  # max delay before canceling queries 
#wal_receiver_status_interval = 10s  # send replies at least this often 
#hot_standby_feedback = off    # send info from standby to prevent 
#enable_bitmapscan = on 
#enable_hashagg = on 
#enable_hashjoin = on 
#enable_indexscan = on 
#enable_material = on 
#enable_mergejoin = on 
#enable_nestloop = on 
#enable_seqscan = on 
#enable_sort = on 
#enable_tidscan = on 
#seq_page_cost = 1.0     # measured on an arbitrary scale 
#random_page_cost = 4.0     # same scale as above 
cpu_tuple_cost = 0.01     # same scale as above 
cpu_index_tuple_cost = 0.005   # same scale as above 
#cpu_operator_cost = 0.0025    # same scale as above 
# effective_cache_size = 8192MB 
#geqo = on 
#geqo_threshold = 12 
#geqo_effort = 5      # range 1-10 
#geqo_pool_size = 0      # selects default based on effort 
#geqo_generations = 0     # selects default based on effort 
#geqo_selection_bias = 2.0    # range 1.5-2.0 
#geqo_seed = 0.0      # range 0.0-1.0 
#default_statistics_target = 100  # range 1-10000 
#constraint_exclusion = partition  # on, off, or partition 
#cursor_tuple_fraction = 0.1   # range 0.0-1.0 
#from_collapse_limit = 8 
#join_collapse_limit = 8    # 1 disables collapsing of explicit 
#log_destination = 'stderr'    # Valid values are combinations of 
#logging_collector = off    # Enable capturing of stderr and csvlog 
# These are only used if logging_collector is on: 
#log_directory = 'pg_log'    # directory where log files are written, 
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'  # log file name pattern, 
#log_file_mode = 0600     # creation mode for log files, 
#log_truncate_on_rotation = off   # If on, an existing log file with the 
#log_rotation_age = 1d     # Automatic rotation of logfiles will 
#log_rotation_size = 10MB    # Automatic rotation of logfiles will 
#syslog_facility = 'LOCAL0' 
#syslog_ident = 'postgres' 
#silent_mode = off      # Run server silently. 
#client_min_messages = notice   # values in order of decreasing detail: 
#log_min_messages = warning    # values in order of decreasing detail: 
#log_min_error_statement = error  # values in order of decreasing detail: 
#log_min_duration_statement = -1  # -1 is disabled, 0 logs all statements 
#debug_print_parse = off 
#debug_print_rewritten = off 
#debug_print_plan = off 
#debug_pretty_print = on 
#log_checkpoints = off 
#log_connections = off 
#log_disconnections = off 
#log_duration = off 
#log_error_verbosity = default   # terse, default, or verbose messages 
#log_hostname = off 
log_line_prefix = '%t '     # special values: 
#log_lock_waits = off     # log lock waits >= deadlock_timeout 
#log_statement = 'none'     # none, ddl, mod, all 
#log_temp_files = -1     # log temporary files equal or larger 
#log_timezone = '(defaults to server environment setting)' 
#track_activities = on 
#track_counts = on 
#track_functions = none     # none, pl, all 
#track_activity_query_size = 1024  # (change requires restart) 
#update_process_title = on 
#stats_temp_directory = 'pg_stat_tmp' 
#log_parser_stats = off 
#log_planner_stats = off 
#log_executor_stats = off 
#log_statement_stats = off 
#autovacuum = on      # Enable autovacuum subprocess? 'on' 
#log_autovacuum_min_duration = -1  # -1 disables, 0 logs all actions and 
#autovacuum_max_workers = 3    # max number of autovacuum subprocesses 
#autovacuum_naptime = 1min    # time between autovacuum runs 
#autovacuum_vacuum_threshold = 50  # min number of row updates before 
#autovacuum_analyze_threshold = 50  # min number of row updates before 
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for 
#autovacuum_vacuum_cost_limit = -1  # default vacuum cost limit for 
#search_path = '"$user",public'   # schema names 
#default_tablespace = ''    # a tablespace name, '' uses the default 
#temp_tablespaces = ''     # a list of tablespace names, '' uses 
#check_function_bodies = on 
#default_transaction_isolation = 'read committed' 
#default_transaction_read_only = off 
#default_transaction_deferrable = off 
#session_replication_role = 'origin' 
#statement_timeout = 0     # in milliseconds, 0 is disabled 
#vacuum_freeze_min_age = 50000000 
#vacuum_freeze_table_age = 150000000 
#bytea_output = 'hex'     # hex, escape 
#xmlbinary = 'base64' 
#xmloption = 'content' 
datestyle = 'iso, mdy' 
#intervalstyle = 'postgres' 
#timezone = '(defaults to server environment setting)' 
#timezone_abbreviations = 'Default'  # Select the set of available time zone 
#extra_float_digits = 0     # min -15, max 3 
#client_encoding = sql_ascii   # actually, defaults to database 
lc_messages = 'en_US.UTF-8'      # locale for system error message 
lc_monetary = 'en_US.UTF-8'      # locale for monetary formatting 
lc_numeric = 'en_US.UTF-8'      # locale for number formatting 
lc_time = 'en_US.UTF-8'       # locale for time formatting 
default_text_search_config = 'pg_catalog.english' 
#dynamic_library_path = '$libdir' 
#local_preload_libraries = '' 
#deadlock_timeout = 1s 
#max_locks_per_transaction = 64   # min 10 
#max_pred_locks_per_transaction = 64 # min 10 
#array_nulls = on 
#backslash_quote = safe_encoding  # on, off, or safe_encoding 
#default_with_oids = off 
#escape_string_warning = on 
#lo_compat_privileges = off 
#quote_all_identifiers = off 
#sql_inheritance = on 
#standard_conforming_strings = on 
#synchronize_seqscans = on 
#transform_null_equals = off 
#exit_on_error = off       # terminate session on any error? 
#restart_after_crash = on      # reinitialize after backend crash? 
#custom_variable_classes = ''   # list of custom variable class names 
default_statistics_target = 50 # pgtune wizard 2013-09-24 
maintenance_work_mem = 960MB # pgtune wizard 2013-09-24 
constraint_exclusion = on # pgtune wizard 2013-09-24 
checkpoint_completion_target = 0.9 # pgtune wizard 2013-09-24 
effective_cache_size = 11GB # pgtune wizard 2013-09-24 
work_mem = 96MB # pgtune wizard 2013-09-24 
wal_buffers = 8MB # pgtune wizard 2013-09-24 
checkpoint_segments = 16 # pgtune wizard 2013-09-24 
shared_buffers = 3840MB # pgtune wizard 2013-09-24 
,451,515,

$ 고양이

[databases] 
anfitrion = host=127.0.0.1 port=5432 dbname=**** user=**** password=**** client_encoding=UNICODE datestyle=ISO connect_query='SELECT 1' 

[pgbouncer] 
logfile = /var/log/postgresql/pgbouncer.log 
pidfile = /var/run/postgresql/pgbouncer.pid 
listen_addr = 127.0.0.1 
listen_port = 6432 
unix_socket_dir = /var/run/postgresql 
auth_type = trust 
auth_file = /etc/pgbouncer/userlist.txt 
;admin_users = user2, someadmin, otheradmin 
;stats_users = stats, root 
pool_mode = statement 
server_reset_query = DISCARD ALL 
;ignore_startup_parameters = extra_float_digits 
;server_check_query = select 1 
;server_check_delay = 30 
; total number of clients that can connect 
max_client_conn = 1000 
default_pool_size = 80 
;reserve_pool_size = 5 
;reserve_pool_timeout = 3 
;log_connections = 1 
;log_disconnections = 1 
;log_pooler_errors = 1 
;server_round_robin = 0 
;server_lifetime = 1200 
;server_idle_timeout = 60 
;server_connect_timeout = 15 
;server_login_retry = 15 
;query_timeout = 0 
;query_wait_timeout = 0 
;client_idle_timeout = 0 
;client_login_timeout = 60 
;autodb_idle_timeout = 3600 
;pkt_buf = 2048 
;listen_backlog = 128 
;tcp_defer_accept = 0 
;tcp_socket_buffer = 0 
;tcp_keepalive = 1 
;tcp_keepcnt = 0 
;tcp_keepidle = 0 
;tcp_keepintvl = 0 
;dns_max_ttl = 15 
;dns_zone_check_period = 0 

$ 무료 -h

   total  used  free  shared buffers  cached 
Mem:   15G  11G  4.1G   0B  263M  10G 
-/+ buffers/cache:  1.2G  14G 
Swap:   30G   0B  30G 

$ 고양이/PROC/

processor  : 0 
vendor_id  : GenuineIntel 
cpu family  : 6 
model   : 58 
model name  : Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz 
stepping  : 9 
microcode  : 0x15 
cpu MHz   : 3101.000 
cache size  : 8192 KB 
physical id  : 0 
siblings  : 4 
core id   : 0 
cpu cores  : 4 
apicid   : 0 
initial apicid : 0 
fpu    : yes 
fpu_exception : yes 
cpuid level  : 13 
wp    : yes 
flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms 
bogomips  : 6186.05 
clflush size : 64 
cache_alignment : 64 
address sizes : 36 bits physical, 48 bits virtual 
power management: 
processor  : 1 
vendor_id  : GenuineIntel 
cpu family  : 6 
model   : 58 
model name  : Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz 
stepping  : 9 
microcode  : 0x15 
cpu MHz   : 3101.000 
cache size  : 8192 KB 
physical id  : 0 
siblings  : 4 
core id   : 1 
cpu cores  : 4 
apicid   : 2 
initial apicid : 2 
fpu    : yes 
fpu_exception : yes 
cpuid level  : 13 
wp    : yes 
flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms 
bogomips  : 6185.65 
clflush size : 64 
cache_alignment : 64 
address sizes : 36 bits physical, 48 bits virtual 
power management: 
processor  : 2 
vendor_id  : GenuineIntel 
cpu family  : 6 
model   : 58 
model name  : Intel(R) Xeon(R) CPU E3-1220 V2 @ 3.10GHz 
stepping  : 9 
microcode  : 0x15 
cpu MHz   : 3101.000 
cache size  : 8192 KB 
physical id  : 0 
siblings  : 4 
core id   : 2 
cpu cores  : 4 
apicid   : 4 
initial apicid : 4 
fpu    : yes 
fpu_exception : yes 
cpuid level  : 13 
wp    : yes 
flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm ida arat epb xsaveopt pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase smep erms 
bogomips  : 6185.66 
clflush size : 64 
cache_alignment : 64 
address sizes : 36 bits physical, 48 bits virtual 
power management: 
+0

http://postgis.net/ – dm03514

+0

96메가바이트 workmem 보인다 매우 높은, 여러 사용자가있을 때 확실하게 정렬 작업을 수행하기 위해 여러 번에게 96MB를 취할 수있는 각을 보장 할 수없는 등에게 : 내부 쿼리는 같을 것 (80 conn는 적어도 7.5 Go of RAM을 의미합니다). 그리고 gpstracking_device_tracks에서 사용할 수있는 색인을 제공해야합니다. – regilero

+0

함수에 쿼리의'EXPLAIN ANALYZE'를 추가 할 수 있습니까? –

답변

1
FOR i IN 1..array_length(arr, 1) LOOP 
cpuinfo의에게 /etc/pgbouncer/pgbouncer.ini

절대 이렇게하십시오. 스토어드 프로시 저는 결과 세트를 반복적으로 작성합니다. 실제로 에 대해 각각 개의 대상 장치에 대해 하나의 쿼리를 실행하고 있습니다. 대신, 사용 ANY + ROW_NUMBER :

arr := regexp_split_to_array(_imeis, E'\\s+'); 
RETURN QUERY 
    SELECT * FROM (
     SELECT 
     ROW_NUMBER() OVER(PARTITION BY dt.imei ORDER BY dt.date_time_process DESC) as rnumber 
     --other fields here 
     FROM gpstracking_device_tracks dt --added alias 
     WHERE dt.imei = ANY(arr) 
     AND dt.date_time_process >= date_trunc('hour', now()) 
     AND dt.date_time_process <= NOW() 
    ) 
    where rnumber = 1 

내가 솔루션의 정확성을 확인하기 위해 손에 포스트 그레스와 데이터베이스가없는,하지만 난 당신이이 지점을 디버깅 할 수 있어야한다고 생각합니다. 적어도, 따라야 할 방향입니다.

자세한 내용은 window functions docsrow and array comparisons docs을 참조하십시오.

응답 성을 더욱 향상 시키려면 위치 업데이트 만 가져 오는 것이 좋습니다. 따라서 6 초마다 모든 기록을 가져 오는 대신 페이지로드/app init/any에서 페이지를 한 번 가져오고 서버를 번으로 업데이트하여 만 폴링하십시오.

SELECT 
     ROW_NUMBER() OVER(PARTITION BY dt.imei ORDER BY dt.date_time_process DESC) as rnumber 
     --other fields here 
    FROM gpstracking_device_tracks dt --added alias 
    WHERE dt.imei = ANY(arr) 
    AND dt.date_time_process >= @last_poll_timestamp 
    AND dt.date_time_process <= NOW() 
+0

나는 그것에 노력하고 있지만 rnumber는 나를 오류로 만드는 where 절에서 사용할 수 없다. – Carlos

+1

수정하는 가장 간단한 방법은이를 받아들이는 select에 랩핑하고 'where rnumber = 1'을 바깥 쪽 select에 적용하는 것이다.'select * from (query goes 여기) rnumber = 1'. 그러나 그러한 접근 방식의 성능에 대해서는 확신 할 수 없습니다. 답변을 업데이트했습니다. – J0HN

+0

응답으로 선택하면 응답이 정말 좋아집니다. – Carlos