本文共 12242 字,大约阅读时间需要 40 分钟。
--由于pg中表和索引的信息收集都是基于时间点的,对于以往的信息无法与现在的信息进行对比,故写下此工具进行统计信息收集--创建数据信息的schemacreate schema db_stat;--创建收集信息的基础表create table db_stat.snapshot_pg_stat_all_indexes(relid int,indexrelid int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),idx_scan bigint,idx_tup_read bigint,idx_tup_fetch bigint,snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));create index idx_stat_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_stat_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time);create index idx_stat_indexe_snapid on db_stat.snapshot_pg_stat_all_indexes(snapid);create table db_stat.snapshot_pg_stat_all_tables(relid int,schemaname varchar(200),relname varchar(550),seq_scan bigint,seq_tup_read bigint,idx_scan bigint,idx_tup_fetch bigint,n_tup_ins bigint,n_tup_upd bigint,n_tup_del bigint,n_tup_hot_upd bigint,n_live_tup bigint,n_dead_tup bigint,last_vacuum timestamp,last_autovacuum timestamp,last_analyze timestamp,last_autoanalyze timestamp,vacuum_count bigint,autovacuum_count bigint,analyze_count bigint,autoanalyze_count bigint,snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));create index idx_stat_table_relname_createtime on db_stat.snapshot_pg_stat_all_tables(relname varchar_pattern_ops,snap_create_time);create index idx_stat_table_snapid on db_stat.snapshot_pg_stat_all_tables(snapid);create table db_stat.snapshot_pg_statio_all_indexes(relid int,indexrelid int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),idx_blks_read bigint,idx_blks_hit bigint,snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));create index idx_statio_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_statio_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time);create index idx_statio_indexe_snapid on db_stat.snapshot_pg_statio_all_indexes(snapid);create table db_stat.snapshot_pg_statio_all_tables(relid int,schemaname varchar(200),relname varchar(550),heap_blks_read bigint,heap_blks_hit bigint,idx_blks_read bigint,idx_blks_hit bigint,toast_blks_read bigint,toast_blks_hit bigint,tidx_blks_read bigint,tidx_blks_hit bigint,snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));create index idx_statio_table_relname_createtime on db_stat.snapshot_pg_statio_all_tables(relname varchar_pattern_ops,snap_create_time);create index idx_statio_table_snapid on db_stat.snapshot_pg_statio_all_tables(snapid);--创建快照的序列create sequence db_stat.seq_snapshot minvalue 1 maxvalue 99999999999999;--每收集完信息之后,对时间,主机列等进行填充create or replace function db_stat.process_snapshot_table(in i_host_ip cidr,in i_host_port int,in i_host_type varchar,in i_comment varchar default '') returns int as $$declarev_snapid int;_detail text;_hint text;_message text;beginselect nextval('db_stat.seq_snapshot') into v_snapid;update db_stat.snapshot_pg_stat_all_indexes set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;update db_stat.snapshot_pg_stat_all_tables set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;update db_stat.snapshot_pg_statio_all_indexes set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;update db_stat.snapshot_pg_statio_all_tables set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;-- 返回值 1 代表成功,0 代表失败return 1;EXCEPTION WHEN others thenGET STACKED DIAGNOSTICS_message = message_text,_detail = pg_exception_detail,_hint = pg_exception_hint;raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint;return 0;end; $$ language plpgsql;--收动进行信息采集,测试用INSERT INTO db_stat.snapshot_pg_stat_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch)SELECT relid , indexrelid , schemaname , relname , indexrelname, idx_scan , idx_tup_read, idx_tup_fetchFROM pg_stat_all_indexes;INSERT INTO db_stat.snapshot_pg_stat_all_tables(relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count)SELECT relid , schemaname , relname , seq_scan , seq_tup_read , idx_scan , idx_tup_fetch , n_tup_ins , n_tup_upd , n_tup_del , n_tup_hot_upd , n_live_tup , n_dead_tup , last_vacuum , last_autovacuum , last_analyze , last_autoanalyze , vacuum_count , autovacuum_count , analyze_count , autoanalyze_countFROM pg_stat_all_tables;INSERT INTO db_stat.snapshot_pg_statio_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit)SELECT relid , indexrelid , schemaname , relname , indexrelname , idx_blks_read , idx_blks_hitFROM pg_statio_all_indexes;INSERT INTO db_stat.snapshot_pg_statio_all_tables(relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit)SELECT relid , schemaname , relname , heap_blks_read , heap_blks_hit , idx_blks_read , idx_blks_hit , toast_blks_read , toast_blks_hit , tidx_blks_read , tidx_blks_hitFROM pg_statio_all_tables;--select db_stat.process_snapshot_table('192.168.174.10',5432,'MASTER','');--创建一个shell脚本,每天通过定时任务进行信息采集cat snap_stat.sh#!/bin/shsource ~/.bash_profilesource /etc/profilePSQL="psql"help_msg (){ echo "" echo "Usage:" echo " -f 要输出结果的文件,如果为null,则默认为/tmp/snapshot_pg_stat.log" echo " -u 数据库连接用户名,如果为null,则为postgresql默认" echo " -d 连接的数据库名,如果为null,则为postgresql默认" echo " -H 数据库的主机ip,如果为null,则为postgresql默认" echo " -p 数据库的端口,如果为null,则为postgresql默认" echo " -m 数据库的类型,MASTER为主,SLAVE为从" echo "" exit 0}# end functionswhile getopts "f:u:d:H:p:m:" flagdo case $flag in f) FILENAME=$OPTARG ;; u) USERNAME=$OPTARG ;; d) DATABASE=$OPTARG ;; H) HOST=$OPTARG ;; p) PORT=$OPTARG ;; m) DATABASE_TYPE=$OPTARG ;; \?|h) help_msg ;; esacdoneif [ $USERNAME"x" == "x" ]thenUSERNAME=postgresfiif [ $DATABASE"x" == "x" ]thenDATABASE=postgresfiif [ $HOST"x" == "x" ]thenhelp_msgfiif [ $PORT"x" == "x" ]thenPORT=5432fiif [ $DATABASE_TYPE"x" == "x" ]thenDATABASE_TYPE=MASTERfiif [ $FILENAME"x" == "x" ]thenFILENAME=/tmp/snapshot_pg_stat.logfiOUTPUT_FILENAME=/tmp/snapshot_pg_stat.csvecho "" > $FILENAMEif [ ! -f $FILENAME ] then touch $FILENAMEelse printf "" | tee -a $FILENAMEfiecho "脚本于时间 `date "+%Y-%m-%d %H:%M:%S"` 开始执行" >> $FILENAMEecho "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_stat_all_indexes表" >> $FILENAME$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch from pg_stat_all_indexes) to '$OUTPUT_FILENAME' with csv"$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch) from '$OUTPUT_FILENAME' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_stat_all_tables表" >> $FILENAME$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count from pg_stat_all_tables) to '$OUTPUT_FILENAME' with csv"$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_tables(relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count) from '$OUTPUT_FILENAME' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_statio_all_indexes表" >> $FILENAME$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit from pg_statio_all_indexes) to '$OUTPUT_FILENAME' with csv"$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit) from '$OUTPUT_FILENAME' with csv"echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_statio_all_tables表" >> $FILENAME$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit from pg_statio_all_tables) to '$OUTPUT_FILENAME' with csv"$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_tables(relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit) from '$OUTPUT_FILENAME' with csv"$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "select db_stat.process_snapshot_table('$HOST',$PORT,'$DATABASE_TYPE','database stat snapshot');"echo "############################################################################################" >> $FILENAMEecho "脚本于时间 `date "+%Y-%m-%d %H:%M:%S"` 结束执行" >> $FILENAME--清空数据表truncate table db_stat.snapshot_pg_stat_all_indexes ;truncate table db_stat.snapshot_pg_stat_all_tables ;truncate table db_stat.snapshot_pg_statio_all_indexes ;truncate table db_stat.snapshot_pg_statio_all_tables ;--手动执行shell脚本./snap_stat.sh -d mydb -p 5432 -m SLAVE -u postgres -H 192.168.174.10--定时任务,每天8点开始执行8 8 * * * /db/pgsql/snap_stat.sh -d mydb -p 5435 -m SLAVE -u postgres -H 192.168.174.10--查看使用比较少的索引select * from ( SELECT t.relname, t.indexrelname , max(idx_scan)-min(idx_scan) AS diff_idx_scan, max(idx_tup_read)-min(idx_tup_read) AS diff_idx_tup_read FROM db_stat.snapshot_pg_stat_all_indexes t --WHERE snap_create_time BETWEEN '2015-12-11' AND '2016-03-11' GROUP BY t.relname, t.indexrelname) t1order by diff_idx_scan,relname,indexrelname ;--查看索引使用率趋势图select relname, indexrelname, snap_day, diff_idx_scan, case when sum(diff_idx_scan) over w1 >0 then diff_idx_scan*100/sum(diff_idx_scan) over w1 else 0 end as diff_idx_scan_percent, diff_idx_tup_read, case when sum(diff_idx_tup_read) over w1 >0 then diff_idx_tup_read*100/sum(diff_idx_tup_read) over w1 else 0 end as diff_idx_tup_read_percent from ( SELECT t.relname, t.indexrelname, date_trunc('hour', snap_create_time) snap_day, t.idx_scan-lag(t.idx_scan,1) over w AS diff_idx_scan, t.idx_tup_read - lag(t.idx_tup_read,1) over w AS diff_idx_tup_read from db_stat.snapshot_pg_stat_all_indexes t --where indexrelname in ('','') WINDOW w AS (PARTITION BY t.relname,t.indexrelname ORDER BY date_trunc('hour', t.snap_create_time))) t1 where diff_idx_scan is not nullWINDOW w1 as (PARTITION BY t1.relname,t1.indexrelname)order by relname,indexrelname,snap_day;
转载地址:http://htezx.baihongyu.com/