diff --git a/Service/Docker/check-config.sh b/Docker/check-config.sh similarity index 100% rename from Service/Docker/check-config.sh rename to Docker/check-config.sh diff --git "a/Service/Docker/docker\345\256\211\350\243\205.sh" "b/Docker/docker\345\256\211\350\243\205.sh" similarity index 100% rename from "Service/Docker/docker\345\256\211\350\243\205.sh" rename to "Docker/docker\345\256\211\350\243\205.sh" diff --git "a/Service/Docker/\345\210\233\345\273\272\347\247\201\346\234\211\344\273\223\345\272\223" "b/Docker/\345\210\233\345\273\272\347\247\201\346\234\211\344\273\223\345\272\223" similarity index 100% rename from "Service/Docker/\345\210\233\345\273\272\347\247\201\346\234\211\344\273\223\345\272\223" rename to "Docker/\345\210\233\345\273\272\347\247\201\346\234\211\344\273\223\345\272\223" diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/Dockerfile" "b/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/Dockerfile" similarity index 100% rename from "Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/Dockerfile" rename to "Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/Dockerfile" diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/README.md" "b/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/README.md" similarity index 100% rename from "Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/README.md" rename to "Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/README.md" diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/files/wzp.conf" "b/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/files/wzp.conf" similarity index 100% rename from "Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/files/wzp.conf" rename to "Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/files/wzp.conf" diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/nginx.conf" "b/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/nginx.conf" similarity index 100% rename from "Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/nginx.conf" rename to "Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/assets/nginx.conf" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/Dockerfile" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/Dockerfile" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/Dockerfile" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/Dockerfile" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/YumRepo/Repo.sh" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/YumRepo/Repo.sh" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/YumRepo/Repo.sh" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/YumRepo/Repo.sh" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_history" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_history" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_history" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_history" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_logout" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_logout" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_logout" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_logout" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_profile" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_profile" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_profile" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bash_profile" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bashrc" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bashrc" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bashrc" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/assets/.bashrc" diff --git "a/Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/\351\225\234\345\203\217\345\210\266\344\275\234" "b/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/\351\225\234\345\203\217\345\210\266\344\275\234" similarity index 100% rename from "Service/Docker/\351\225\234\345\203\217\345\210\266\344\275\234/\351\225\234\345\203\217\345\210\266\344\275\234" rename to "Docker/\351\225\234\345\203\217\345\210\266\344\275\234/\351\225\234\345\203\217\345\210\266\344\275\234" diff --git a/Service/LazyManage/LazyManage.py b/LazyManage/LazyManage.py similarity index 100% rename from Service/LazyManage/LazyManage.py rename to LazyManage/LazyManage.py diff --git a/Service/LazyManage/LazyManage.sh b/LazyManage/LazyManage.sh similarity index 100% rename from Service/LazyManage/LazyManage.sh rename to LazyManage/LazyManage.sh diff --git a/Service/LazyManage/LazyManage_help.docx b/LazyManage/LazyManage_help.docx similarity index 100% rename from Service/LazyManage/LazyManage_help.docx rename to LazyManage/LazyManage_help.docx diff --git a/Service/LazyManage/ScriptRemote.sh b/LazyManage/ScriptRemote.sh similarity index 100% rename from Service/LazyManage/ScriptRemote.sh rename to LazyManage/ScriptRemote.sh diff --git a/Service/LazyManage/serverlist.conf b/LazyManage/serverlist.conf similarity index 100% rename from Service/LazyManage/serverlist.conf rename to LazyManage/serverlist.conf diff --git a/README.md b/README.md index e95a56b..4eec0c9 100644 --- a/README.md +++ b/README.md @@ -1,184 +1,19 @@ -## ops_doc +# ops_doc - 文档制作: 雪松 + 文档制作: left_left 小蜗牛 PF 雪松 rock Jesse sanm Derek 更新日期: 2016-04-28 - 本文档手册希望可以达到通俗易懂,方便运维人员使用,错误在所难免,还望指正! - - github更新下载地址: https://github.com/liquanzhou/ops_doc - - 请勿删除信息,转载请说明出处,抵制不道德行为 - - - - + 欢迎系统运维加入Q群: 198173206 # 加群请回答问题 - -### 一.总是听到有说: - -#### 1.运维累,背锅,薪资低? - -运维没做好,没有核心技能,肯定是累且背锅,薪资涨不上去。各行各业皆是如此,最底层都是一滩烂泥,不仅仅是运维行业。小事(运维处理个磁盘,处理个内存报警)其实就能体现出来能力. 太多运维都是处理表面问题,根本不去真正了解需求,也不去思考解决办法,不用心去把事情做漂亮,成长也基本止步,指望这类人去搞点创新,解决点难题,基本不可能,反过来想这种人怎么可能高工资? -做任何行业都一样,重要的是思考: 怎么差异化,怎么脱颖而出,做出与众不同,怎么做到核心精英. - -#### 2.运维开发的自动化取代运维? - -运维开发只是运维的一个技能分支,不需要神话运维开发。大多是培训招生鼓吹,运维开发淘汰运维。只做运维开发,不了解运维业务,做的系统平台怎么可能好用? -不了解业务,平台实现无法统一标准,统一流程。 - -#### 3.云平台取代运维? + 本文档手册希望可以达到通俗易懂, 方便运维人员使用, 错误在所难免, 还望指正! -云商资源是工具,业务问题是云商无法解决干预的,需要专业运维人员使用拼接。 - -#### 4.人工智能取代运维? - -人工智能类似GPT取代运维更是不可能,只会更好的帮助运维工作。高级运维要在架构能力,主导能力,灵活运用,解决各种复杂难题,并不是人工智能直接给出的建议就能解决。 - - - -### 二.做好本职工作: - -坚持本心,切勿浮躁,避免被他人干扰。 - -不了解运维痛点,不解决业务难题,无论运维,SRE,运维开发都是做不好的。 - -干活前多统计,多准备,多思考规避问题,多考虑简化方案。干的时候先挑大块的标准化的方式搞 - -见过运维技术好,搭建各种服务效率,各种参数都知道,可唯独没有任何标准化思维,导致工作累,业务又频繁故障。 - -运维岗位比其他技术岗位更了解技术架构全链路,很多开发和架构解决不了的问题只能依靠运维。 - -做运维部门核心技术人员,核心技术能力才能让自己不可替代。不要区分运维和运维开发,两方面能力都重要。 - - - - - - -### 三.针对运维岗位的一点经验看法: - -##### 做好技术,技术到了一定程度,多考虑业务。核心是用技术解决业务上的难题,想办法从根本原因解决问题。不能只解决表面问题。少一些花里胡哨的技术和流程。 -##### 多花心思考虑实用性,稳定性,运维可协同维护,开发的用户体验等。 -##### 多梳理业务,没什么解决不了的难题。 -##### 多主导业务,技术和能力才能提高质变。 -##### 监控(三层兜底监控和多维度的监控)和发布(打包,发布,服务注册,平滑上线,流量接入,自动扩容)是最核心重要的工作,需要反复打磨,简单好用,好协同。 -##### 工作标准要高,不能只搞表面,才能不被其他部门骂。 - - -统一业务标准化: 环境一致,数据一致,流程一致。最后才是自动化的实现。 - -业务可观测: 故障第一时间可以报警,链路可追踪,监控图表可事后分析。减少无用信息,快捷有效。 - -业务稳定性: 稳定是核心标准,处理故障后,多分析原因始末,多和业务方和架构师探讨交流,积累架构经验。 + 也希望大家能够在自己擅长的方面, 贡献一些通俗易懂实用的精品文档.万分感谢! + 文档请使用"notepad++"或其它编辑器打开此文档, "alt+0"将函数折叠后方便查阅 + github更新下载地址: https://github.com/liquanzhou/ops_doc -### 四.针对k8s使用的一点经验看法: - - -好多公司运维部门都是积极使用k8s,但没有规划的使用又会引起好多不易维护的难点痛点: - - -1.缺少自动化,导致繁重的人肉维护大量yaml文件 - - -如果可以,k8s内部的yaml尽量规避任何人肉改动,全部发布系统标准化生成(deployment,service,hpa,inrgess,vs等等全部统一与模板一致).规避复杂的7层路由策略在k8s中做.尽可能对外入口统一,k8s流量接入与deployment一一对应,就可以实现k8s内部全部标准化自动生成 - - -2.无法统一注册中心,每组k8s有各自的etcd注册,两组k8s中服务互联就会有麻烦,开发可能更喜欢服务框架依赖的各自注册发现方式.导致流量接入混乱.流量平滑,维护复杂,多语言服务互联障碍. - - -这个情况,要看实际业务场景,多与开发架构师沟通方案,如何推动统一注册发现机制,让开发互调流量和运维的流量接入统一 - - - - -### 五.新工作环境梳理工作思路: - - -1.优先解决棘手的业务稳定问题 - - -2.多维度监控,确保第一时间发现问题,定位问题根因 - - -3.多与架构交流,沟通稳定性解决方案 - - -4.统一打包和发布,定好标准的全流程,拒绝开发各异的方式,但还能支持开发多种合理的需求,有足够的扩展能力 - - -5.提升自动化工作范围 - - -6.成本控制 - - -7.精准报警,清理无用大量报警 - - -8.清理无用5xx错误,程序error干扰 - - - -### 六.中小公司做到如下标准,就能解决大部分运维痛点: - -##### 运维标准: - -1.服务名全局唯一,有运维内部域名,通配服务域名 - -2.服务全部标准化,统一打包和部署方式 - -3.监控图精准易用 - -4.监控报警灵敏,避免重复检查次数,触发即报警 - -5.报警维度健全 - -6.尽可能自动化 - -7.自动扩容(需要服务上下线平滑) - -8.cdn和存储主备自动切换 - -9.跨境质量,静态尽可能双源同步 - - -##### 开发标准: - -1.程序日志级别可靠,通过error数量报警 - -2.程序状态码标准,通过5xx状态码报警 - -3.核心服务降级 - - -##### 架构标准: - -1.统一健康检查接口 - -2.统一下线接口 - -3.全局服务统一注册,不区分语言和服务类型 - -4.注册有权重 - -5.调用方可根据5xx错误次数进行权重熔断 - -6.发布过程平滑,启动预热(可基于权重小流量) - -7.服务调用打点,调用服务名,被调用服务名,uri,状态 - -8.日志规范可收集 - -9.链路跟踪,前后端串联 - -10.限流,应对雪崩场景 - -11.响应时间 - -12.mysql限制全局兜底limit - -13.ABtest能力 + 请勿删除信息, 转载请说明出处, 抵制不道德行为 +# useful for beginners diff --git a/Service/.DS_Store b/Service/.DS_Store deleted file mode 100644 index 44eeebf..0000000 Binary files a/Service/.DS_Store and /dev/null differ diff --git a/Service/BeansDB/beansdb_install.sh b/Service/BeansDB/beansdb_install.sh new file mode 100644 index 0000000..983402f --- /dev/null +++ b/Service/BeansDB/beansdb_install.sh @@ -0,0 +1,90 @@ +beansdb安装 + + +yum install autoconf automake + +tar -zxvf beansdb-0.5.8.tar.gz +cd beansdb-0.5.8 + +./configure --prefix=/opt/beansdb +make +make install +cd /opt/beansdb/ +mkdir {data,log} + +vim admin_beansdb.sh + +################################################################# +#!/bin/sh + +basepath=/opt/beansdb +user=root +port=6666 +pidfile=${basepath}/beansdb_${port}.pid +execbin=${basepath}/bin/beansdb +datadir=${basepath}/data +accesslog=${basepath}/log/access.log +#flush_period=1 #sec +#flush_num=2048 #k + +function _usage_() { + echo "Usage:$0 " +} + +if [ -z $1 ] +then + _usage_ + exit 1 +fi + +#if [ -n $2 ] +#then +# port=$2 +#fi + +case "$1" in + start) + if [ -f ${pidfile} ] + then + pid=`cat ${pidfile}` + cnt=`ps -ef | grep $pid | grep "beansdb" | grep -v "grep" | wc -l` + if [ $cnt -gt 0 ] + then + echo "There is already an instance at port ${port}." + else + ${execbin} -u ${user} -p ${port} -P ${pidfile} -H ${datadir} -L ${accesslog} -T 2 -d + fi + else + ${execbin} -u ${user} -p ${port} -P ${pidfile} -H ${datadir} -L ${accesslog} -T 2 -d + fi + ;; + stop) + echo "Are you sure to stop beansdb instance at port ${port} (y/n)?" + read cfm + if [ $cfm == 'y' ] + then + if [ -f ${pidfile} ] + then + kill `cat ${pidfile}` + else + echo "pid file not exists." + fi + else + echo "do nothing." + fi + ;; + *) + _usage_ + ;; +esac + + +################################################################# + + +sh admin_beansdb.sh start +ll data +netstat -na | grep 6666 + + + diff --git a/Service/Docker/.DS_Store b/Service/Docker/.DS_Store deleted file mode 100644 index 70c0194..0000000 Binary files a/Service/Docker/.DS_Store and /dev/null differ diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/.DS_Store" "b/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/.DS_Store" deleted file mode 100644 index 696188b..0000000 Binary files "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/.DS_Store" and /dev/null differ diff --git "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/.DS_Store" "b/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/.DS_Store" deleted file mode 100644 index d273c98..0000000 Binary files "a/Service/Docker/\345\270\270\347\224\250\350\275\257\344\273\266/Nginx/.DS_Store" and /dev/null differ diff --git "a/Service/FastDFS/FastDFS_PHP\344\276\213\345\255\220.gz" "b/Service/FastDFS/FastDFS_PHP\344\276\213\345\255\220.gz" new file mode 100644 index 0000000..fe63f5a Binary files /dev/null and "b/Service/FastDFS/FastDFS_PHP\344\276\213\345\255\220.gz" differ diff --git "a/Service/FastDFS/FastDFS\345\256\211\350\243\205.sh" "b/Service/FastDFS/FastDFS\345\256\211\350\243\205.sh" new file mode 100644 index 0000000..67545df --- /dev/null +++ "b/Service/FastDFS/FastDFS\345\256\211\350\243\205.sh" @@ -0,0 +1,270 @@ +FastDFS安装 + + 前面了解了fastdfs的原理,接下来就熟悉一下安装过程,准备了三台机器,一台模拟client,一台模拟storage,一台模拟tracker。 + 三台机器均为debian6,系统为最小化安装,先安装基本编译环境: + apt-get install build-essential php5-dev libevent-dev + + 下载fastdfs源码包: + wget http://fastdfs.googlecode.com/files/FastDFS_v3.05.tar.gz + + 开始安装: + tar zxvf FastDFS_v3.05.tar.gz + cd FastDFS/ + ./make.sh + ./make.sh install + + 安装完成后,fastdfs默认的配置文件被放置在/etc/fdfs 目录下面,包含了client.conf http.conf mime.types storage.conf tracker.conf五个文件,fastdfs进程的启动是以加载的配置文件区分的。源码包中都包含了这三个配置文件。 + + tracker.conf 配置文件分析: + +#配置tracker.conf这个配置文件是否生效,因为在启动fastdfs服务端进程时需要指定配置文件,所以需要使次配置文件生效。false是生效,true是屏蔽。 +disabled=false + +#程序的监听地址,如果不设定则监听所有地址 +bind_addr= + +#tracker监听的端口 +port=22122 + +#链接超时设定 +connect_timeout=30 + +#tracker在通过网络发送接收数据的超时时间 +network_timeout=60 + +#数据和日志的存放地点 +base_path=/opt/fdfs + +#服务所支持的最大链接数 +max_connections=256 + +#工作线程数一般为cpu个数 +work_threads=4 + +#在存储文件时选择group的策略,0:轮训策略 1:指定某一个组 2:负载均衡,选择空闲空间最大的group +store_lookup=2 + +#如果上面的store_lookup选择了1,则这里需要指定一个group +#store_group=group2 + +#在group中的哪台storage做主storage,当一个文件上传到主storage后,就由这台机器同步文件到group内的其他storage上,0:轮训策略 1:根据ip地址排序,第一个 2:根据优先级排序,第一个 +store_server=0 + +#选择那个storage作为主下载服务器,0:轮训策略 1:主上传storage作为主下载服务器 +download_server=0 + +#选择文件上传到storage中的哪个(目录/挂载点),storage可以有多个存放文件的base path 0:轮训策略 2:负载均衡,选择空闲空间最大的 +store_path=0 + +#系统预留空间,当一个group中的任何storage的剩余空间小于定义的值,整个group就不能上传文件了 +reserved_storage_space = 4GB + +#日志信息级别 +log_level=info + +#进程以那个用户/用户组运行,不指定默认是当前用户 +run_by_group= +run_by_user= + +#允许那些机器连接tracker默认是所有机器 +allow_hosts=* + +#设置日志信息刷新到disk的频率,默认10s +sync_log_buff_interval = 10 + +#检测storage服务器的间隔时间,storage定期主动向tracker发送心跳,如果在指定的时间没收到信号,tracker人为storage故障,默认120s +check_active_interval = 120 + +#线程栈的大小,最小64K +thread_stack_size = 64KB + +#storage的ip改变后服务端是否自动调整,storage进程重启时才自动调整 +storage_ip_changed_auto_adjust = true + +#storage之间同步文件的最大延迟,默认1天 +storage_sync_file_max_delay = 86400 + +#同步一个文件所花费的最大时间 +storage_sync_file_max_time = 300 + +#是否用一个trunk文件存储多个小文件 +use_trunk_file = false + +#最小的solt大小,应该小于4KB,默认256bytes +slot_min_size = 256 + +#最大的solt大小,如果上传的文件小于默认值,则上传文件被放入trunk文件中 +slot_max_size = 16MB + +#trunk文件的默认大小,应该大于4M +trunk_file_size = 64MB + +#http服务是否生效,默认不生效 +http.disabled=false + +#http服务端口 +http.server_port=8080 + +#检测storage上http服务的时间间隔,<=0表示不检测 +http.check_alive_interval=30 + +#检测storage上http服务时所用请求的类型,tcp只检测是否可以连接,http必须返回200 +http.check_alive_type=tcp + +#通过url检测storage http服务状态 +http.check_alive_uri=/status.html + +#if need find content type from file extension name +http.need_find_content_type=true + +#用include包含进http的其他设置 +##include http.conf + + 启动tracker进程 + fdfs_trackerd /etc/fdfs/tracker.conf + + 检测状态 + + netstat -tupln|grep tracker + #可以看到如下: + tcp 0 0 0.0.0.0:22122 0.0.0.0:* LISTEN 18559/fdfs_trackerd + + storage.conf配置文件分析: + +#同tracker.conf +disabled=false + +#这个storage服务器属于那个group +group_name=group1 + +#同tracker.conf +bind_addr= + +#连接其他服务器时是否绑定地址,bind_addr配置时本参数才有效 +client_bind=true + +#同tracker.conf +port=23000 +connect_timeout=30 +network_timeout=60 + +#主动向tracker发送心跳检测的时间间隔 +heart_beat_interval=30 + +#主动向tracker发送磁盘使用率的时间间隔 +stat_report_interval=60 + +#同tracker.conf +base_path=/opt/fdfs +max_connections=256 + +#接收/发送数据的buff大小,必须大于8KB +buff_size = 256KB + +#同tracker.conf +work_threads=4 + +#磁盘IO是否读写分离 +disk_rw_separated = true + +#是否直接读写文件,默认关闭 +disk_rw_direct = false + +#混合读写时的读写线程数 +disk_reader_threads = 1 +disk_writer_threads = 1 + +#同步文件时如果binlog没有要同步的文件,则延迟多少毫秒后重新读取,0表示不延迟 +sync_wait_msec=50 + +#同步完一个文件后间隔多少毫秒同步下一个文件,0表示不休息直接同步 +sync_interval=0 + +#表示这段时间内同步文件 +sync_start_time=00:00 +sync_end_time=23:59 + +#同步完多少文件后写mark标记 +write_mark_file_freq=500 + +#storage在存储文件时支持多路径,默认只设置一个 +store_path_count=1 + +#配置多个store_path路径,从0开始,如果store_path0不存在,则base_path必须存在 +store_path0=/opt/fdfs +#store_path1=/opt/fastdfs2 + +#subdir_count * subdir_count个目录会在store_path下创建,采用两级存储 +subdir_count_per_path=256 + +#设置tracker_server +tracker_server=x.x.x.x:22122 + +#同tracker.conf +log_level=info +run_by_group= +run_by_user= +allow_hosts=* + +#文件在数据目录下的存放策略,0:轮训 1:随机 +file_distribute_path_mode=0 + +#当问及是轮训存放时,一个目录下可存放的文件数目 +file_distribute_rotate_count=100 + +#写入多少字节后就开始同步,0表示不同步 +fsync_after_written_bytes=0 + +#刷新日志信息到disk的间隔 +sync_log_buff_interval=10 + +#同步storage的状态信息到disk的间隔 +sync_stat_file_interval=300 + +#线程栈大小 +thread_stack_size=512KB + +#设置文件上传服务器的优先级,值越小越高 +upload_priority=10 + +#是否检测文件重复存在,1:检测 0:不检测 +check_file_duplicate=0 + +#当check_file_duplicate设置为1时,次值必须设置 +key_namespace=FastDFS + +#与FastDHT建立连接的方式 0:短连接 1:长连接 +keep_alive=0 + +#同tracker.conf +http.disabled=false +http.domain_name= +http.server_port=8888 +http.trunk_size=256KB +http.need_find_content_type=true +##include http.conf + + 启动storage进程 + fdfs_storaged /etc/fdfs/storage.conf + + 检测状态 + netstat -tupln | grep storage + #结果如下: + tcp 0 0 0.0.0.0:23000 0.0.0.0:* LISTEN 17138/fdfs_storaged + + client.conf配置文件分析: + #同tracker.conf + connect_timeout=30 + network_timeout=60 + base_path=/opt/fdfs + tracker_server=x.x.x.x:22122 + log_level=info + http.tracker_server_port=8080 + +测试上传文件: + + fdfs_upload_file /etc/fdfs/client.conf client.conf + #返回如下字符串 + group1/M00/00/00/CgEGflAqaFW4hENaAAACo8wrbSE16.conf + + 在storage的数据目录下的00/00目录下即可看到该文件,文件名称是CgEGflAqaFW4hENaAAACo8wrbSE16.conf \ No newline at end of file diff --git "a/Service/FastDFS/FastDFS\347\233\221\346\216\247.sh" "b/Service/FastDFS/FastDFS\347\233\221\346\216\247.sh" new file mode 100644 index 0000000..b94eba4 --- /dev/null +++ "b/Service/FastDFS/FastDFS\347\233\221\346\216\247.sh" @@ -0,0 +1,124 @@ +[root@localhost ~]# /usr/local/fastdfs/bin/fdfs_monitor /usr/local/fastdfs/conf/storage.conf + +[2014-05-27 15:14:58] DEBUG - base_path=/opt/fastdfs/contact, connect_timeout=30, network_timeout=60, tracker_server_count=2, anti_steal_token=0, anti_steal_secret_key length=0, use_connection_pool=0, g_connection_pool_max_idle_time=3600s, use_storage_id=0, storage server id count: 0 + +server_count=2, server_index=1 + +tracker server is 10.0.0.57:22122 + +group count: 1 + +Group 1: +group name = contact +disk total space = 1380838 MB +disk free space = 812573 MB +trunk free space = 0 MB +storage server count = 1 +active server count = 1 +storage server port = 23000 +storage HTTP port = 8888 +store path count = 1 +subdir count per path = 256 +current write server index = 0 +current trunk file id = 0 + + Storage 1: + id = 10.0.0.58 + ip_addr = 10.0.0.58 ACTIVE + http domain = + version = 4.07 + join time = 2013-08-07 14:05:30 + up time = 2013-12-31 09:48:17 + total storage = 1380838 MB + free storage = 812573 MB + upload priority = 10 + store_path_count = 1 + subdir_count_per_path = 256 + storage_port = 23000 + storage_http_port = 8888 + current_write_path = 0 + source storage id= + if_trunk_server= 0 + total_upload_count = 145463987 + success_upload_count = 145463987 + total_append_count = 0 + success_append_count = 0 + total_modify_count = 0 + success_modify_count = 0 + total_truncate_count = 0 + success_truncate_count = 0 + total_set_meta_count = 0 + success_set_meta_count = 0 + total_delete_count = 55020068 + success_delete_count = 55020068 + total_download_count = 0 + success_download_count = 0 + total_get_meta_count = 0 + success_get_meta_count = 0 + total_create_link_count = 0 + success_create_link_count = 0 + total_delete_link_count = 0 + success_delete_link_count = 0 + total_upload_bytes = 55349210207 + success_upload_bytes = 55349210207 + total_append_bytes = 0 + success_append_bytes = 0 + total_modify_bytes = 0 + success_modify_bytes = 0 + stotal_download_bytes = 0 + success_download_bytes = 0 + total_sync_in_bytes = 17409030985 + success_sync_in_bytes = 17371905709 + total_sync_out_bytes = 19884312270 + success_sync_out_bytes = 19867906118 + total_file_open_count = 195455141 + success_file_open_count = 190330634 + total_file_read_count = 0 + success_file_read_count = 0 + total_file_write_count = 190330634 + success_file_write_count = 190330634 + last_heart_beat_time = 2014-05-27 15:14:31 + last_source_update = 2014-05-27 15:14:25 + last_sync_update = 2013-10-14 14:18:36 + last_synced_timestamp = 2013-08-16 23:54:41 + + +# 主要是这里 只要是active的就没有问题 + + Storage 1: + id = 10.0.0.58 + ip_addr = 10.0.0.58 ACTIVE + http domain = + + + +#!/bin/bash + +results="" +statusValue=0 +storageAll=`/usr/local/fastdfs/bin/fdfs_monitor /usr/local/fastdfs/conf/storage.conf|awk '/ip_addr/||/last_synced_timestamp/{print $3"=="$4}'|sed '$!N;s/\n/==/'` +currentTimeStamp=$(date +%s) +for storage in $storageAll +do + IP=$(echo $storage |awk -F '==' '{print $1}') + status=$(echo $storage |awk -F '==' '{print $2}') + Time=$(echo $storage |awk -F '==' '{print $3,$4}') + timeStamp=$(date -d "$Time" +%s) + timeout=$((currentTimeStamp-timeStamp)) + + if [ "$status" != "ACTIVE" -o $timeout -gt 120 ];then + statusValue=$((statusValue+1)) + fi + results="$results[$IP $status timeout:$timeout]" +done + +if [ $statusValue -eq 0 ];then + echo "fastdfs OK $results" + exit 0 +else + echo "fastdfs CRITICAL $results" + exit 2 +fi + + + diff --git "a/Service/FastDFS/PHP\345\256\211\350\243\205fastDFS\346\211\251\345\261\225.sh" "b/Service/FastDFS/PHP\345\256\211\350\243\205fastDFS\346\211\251\345\261\225.sh" new file mode 100644 index 0000000..97747ef --- /dev/null +++ "b/Service/FastDFS/PHP\345\256\211\350\243\205fastDFS\346\211\251\345\261\225.sh" @@ -0,0 +1,40 @@ +PHP安装fastDFS扩展 + +原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 、作者信息和本声明。否则将追究法律责任。http://369369.blog.51cto.com/319630/771169 + +1、下载fastDFS源程序,最好与FastDFS服务器版本匹配,这里我下载了FastDFS_v3.06.tar.gz版本,放在/opt/soft目录下。 + +2、LAMP或LNMP已安装好,PHP安装目录为/usr/local/php + +3、步骤 +[root@snstest ~]#tar zxvf FastDFS_v3.06.tar.gz +[root@snstest ~]#cd FastDFS +[root@ FastDFS ~]#./make.sh +[root@ FastDFS ~]#./make.sh install +[root@ FastDFS ~]#Cd client +[root@ client ~]#make; make install +[root@ client ~]#cd ../php_client +如以上不安装,直接进php-config目录进行编译安装,会报如下错误: +make: *** [fastdfs_client.lo] Error 1 +[root@ php_client ~]#/usr/local/php/bin/phpize //执行php的安装目录下的phpize +[root@ php_client ~]#./configure --with-php-config=/usr/local/php/bin/php-config +[root@ php_client ~]#make +[root@ php_client ~]#make install +[root@ php_client ~]#cp ../conf/client.conf /etc/fdfs/ //3.06版本/etc/fdfs/目录下有client.conf +[root@ php_client ~]#cd /etc/fdfs/ +[root@ fdfs ~]#vi client.conf,保存 +tracker_server=192.168.133.171:22122 //根据环境填写IP地址及端口号 +在php.ini配置文件中加载fastdfs +[root@ fdfs ~]#cat fastdfs_client.ini >> /usr/local/php/etc/php.ini + +4、重启web服务器即可。在php_client已经有扩展函数说明和程序示例 + +5、验证扩展 + +[root@ fdfs ~]#cd /opt/soft/FastDFS/php_client +[root@ fdfs ~]#cp fastdfs_test.php /var/www ///var/www是我web服务器目录 +打开IE或其它浏览器,输入http://192.168.133.87/fastdfs_test.php,如出现 +3.06 fastdfs_tracker_make_all_connections result: 1 array(1) { ["group1"]=> array(12) { ["free_space"]=> int(10542) ["trunk_free_space"]=> int(0) ["server_count"]=> int(2) ["active_count"]=> int(2) ["storage_port"]=> int(23000) ["storage_http_port"]=> int(8888) + +如果报以下错,就是没有重启web服务软件,如apache,nginx,fast-php +Fatal error: Call to undefined function fastdfs_client_version() in �,B on line 6 \ No newline at end of file diff --git a/Service/FastDFS/check_fastdfs_status.sh b/Service/FastDFS/check_fastdfs_status.sh new file mode 100644 index 0000000..a8506a2 --- /dev/null +++ b/Service/FastDFS/check_fastdfs_status.sh @@ -0,0 +1,49 @@ +#!/bin/bash +source ~/.bash_profile +Basedir=`dirname $0` +Active=$Basedir/active.txt +IP=$Basedir/ip.txt +Syn_time=$Basedir/syn_time.txt +/sbin/ip add |grep em2 |grep inet|cut -d/ -f1|awk '{print $2}' >$IP +/usr/local/fastdfs/bin/fdfs_monitor /usr/local/fastdfs/conf/storage.conf >status.txt +AIP=`cat $IP` +/bin/cat status.txt | grep $AIP |awk '/ip_addr/{print $NF}' > active.txt +Now_time=`date +%s` +sed -n '/Storage/,/Storage/p' status.txt >Storage1 +sed -n '/Storage 2/,//p' status.txt >Storage2 +Hostip=`cat Storage1 |grep "$AIP"` +if [ "$Hostip" != "" ];then + num=`cat Storage1| grep last_synced_timestamp | awk -F"(" '{print $2}' |awk '{print $1}' |awk -F"s" '{print $1}'` + cat Storage1| grep last_synced_timestamp | awk '{ print $3,$4 }' >$Syn_time + NUM=$num +else + num=`cat Storage2| grep last_synced_timestamp | awk -F"(" '{print $2}' |awk '{print $1}' |awk -F"s" '{print $1}'` + cat Storage2| grep last_synced_timestamp | awk '{ print $3,$4 }' >$Syn_time + NUM=$num +fi +paste $Syn_time $IP $Active > main.log +cat main.log | while read day time ip active +do + if [ "$num" == "" ];then + sys_time=`date -d "$day $time" +%s` + num1=`expr ${Now_time} - ${sys_time}` + if [ "${active}" == "ACTIVE" ]&&[ "$num1" -lt 120 ]; + then + echo "OK - FASTDFS_STORAGE status: $active and delay is $num1" + exit 0 + else + echo "Critical - FASTDFS_STORAGE status: $ip State is $active, Update time delay $num1 (s),please check." + exit 1 + fi + else + if [ "${active}" == "ACTIVE" ]&&[ "$NUM" -lt 120 ]; + then + echo "OK - FASTDFS_STORAGE status: $active and delay is $NUM" + exit 0 + else + echo "Critical - FASTDFS_STORAGE status: $ip State is $active, Update time delay $NUM (s),please check." + exit 2 + fi + fi +done +rm -rf $Active $IP $Syn_time main.log status.txt diff --git a/Service/FastDFS/fastdfs_php_demo.rar b/Service/FastDFS/fastdfs_php_demo.rar new file mode 100644 index 0000000..fbc0518 Binary files /dev/null and b/Service/FastDFS/fastdfs_php_demo.rar differ diff --git "a/Service/Memcache/Linux\344\270\213\347\232\204Memcache\345\256\211\350\243\205.sh" "b/Service/Memcache/Linux\344\270\213\347\232\204Memcache\345\256\211\350\243\205.sh" new file mode 100644 index 0000000..3b0f7e2 --- /dev/null +++ "b/Service/Memcache/Linux\344\270\213\347\232\204Memcache\345\256\211\350\243\205.sh" @@ -0,0 +1,219 @@ + +Linux下的Memcache安装 + + + +最近在研究怎么让Discuz!去应用Memcache去做一些事情,记录下Memcache安装的过程。 + +Linux下Memcache服务器端的安装 +服务器端主要是安装memcache服务器端,目前的最新版本是 memcached-1.3.0 。 +下载:http://www.danga.com/memcached/dist/memcached-1.2.2.tar.gz +另外,Memcache用到了libevent这个库用于Socket的处理,所以还需要安装libevent,libevent的最新版本是libevent-1.3。(如果你的系统已经安装了libevent,可以不用安装) +官网:http://www.monkey.org/~provos/libevent/ +下载:http://www.monkey.org/~provos/libevent-1.3.tar.gz + +用wget指令直接下载这两个东西.下载回源文件后。 +1.先安装libevent。这个东西在配置时需要指定一个安装路径,即./configure --prefix=/usr;然后make;然后make install; +# yum install libevent-devel + +2.再安装memcached,只是需要在配置时需要指定libevent的安装路径即./configure --with-libevent=/usr;然后make;然后make install; +这样就完成了Linux下Memcache服务器端的安装。详细的方法如下: + + 1.分别把memcached和libevent下载回来,放到 /tmp 目录下: + # cd /tmp + # wget http://www.danga.com/memcached/dist/memcached-1.2.0.tar.gz + # wget http://www.monkey.org/~provos/libevent-1.2.tar.gz + + 2.先安装libevent: + # tar zxvf libevent-1.2.tar.gz + # cd libevent-1.2 + # ./configure --prefix=/usr + # make + # make install + + 3.测试libevent是否安装成功: + # ls -al /usr/lib | grep libevent + lrwxrwxrwx 1 root root 21 11?? 12 17:38 libevent-1.2.so.1 -> libevent-1.2.so.1.0.3 + -rwxr-xr-x 1 root root 263546 11?? 12 17:38 libevent-1.2.so.1.0.3 + -rw-r--r-- 1 root root 454156 11?? 12 17:38 libevent.a + -rwxr-xr-x 1 root root 811 11?? 12 17:38 libevent.la + lrwxrwxrwx 1 root root 21 11?? 12 17:38 libevent.so -> libevent-1.2.so.1.0.3 + 还不错,都安装上了。 + + 4.安装memcached,同时需要安装中指定libevent的安装位置: + # cd /tmp + # tar zxvf memcached-1.2.0.tar.gz + # cd memcached-1.2.0 + # ./configure --with-libevent=/usr + # make + # make install + 如果中间出现报错,请仔细检查错误信息,按照错误信息来配置或者增加相应的库或者路径。 + 安装完成后会把memcached放到 /usr/local/bin/memcached , + + 5.测试是否成功安装memcached: + # ls -al /usr/local/bin/mem* + -rwxr-xr-x 1 root root 137986 11?? 12 17:39 /usr/local/bin/memcached + -rwxr-xr-x 1 root root 140179 11?? 12 17:39 /usr/local/bin/memcached-debug + +安装Memcache的PHP扩展 +1.在http://pecl.php.net/package/memcache 选择相应想要下载的memcache版本。 +2.安装PHP的memcache扩展 + + tar vxzf memcache-2.2.1.tgz + cd memcache-2.2.1 + + # yum install php-devel + + /usr/local/php/bin/phpize + ./configure --enable-memcache --with-php-config=/usr/bin/php-config --with-zlib-dir + make + make install + + +3.上述安装完后会有类似这样的提示: + + Installing shared extensions: /usr/local/php/lib/php/extensions/no-debug-non-zts-2007xxxx/ + + +4.把php.ini中的extension_dir = "./"修改为 + + extension_dir = "/usr/local/php/lib/php/extensions/no-debug-non-zts-2007xxxx/" + + +5.添加一行来载入memcache扩展: extension=memcache.so + +memcached的基本设置: +1.启动Memcache的服务器端: +# /usr/local/bin/memcached -d -m 10 -u root -l 192.168.0.200 -p 12000 -c 256 -P /tmp/memcached.pid + + -d选项是启动一个守护进程, + -m是分配给Memcache使用的内存数量,单位是MB,我这里是10MB, + -u是运行Memcache的用户,我这里是root, + -l是监听的服务器IP地址,如果有多个地址的话,我这里指定了服务器的IP地址192.168.0.200, + -p是设置Memcache监听的端口,我这里设置了12000,最好是1024以上的端口, + -c选项是最大运行的并发连接数,默认是1024,我这里设置了256,按照你服务器的负载量来设定, + -P是设置保存Memcache的pid文件,我这里是保存在 /tmp/memcached.pid, + +2.如果要结束Memcache进程,执行: + + # kill `cat /tmp/memcached.pid` + + +也可以启动多个守护进程,不过端口不能重复。 + +3.重启apache,service httpd restart + +Memcache环境测试: +运行下面的php文件,如果有输出This is a test!,就表示环境搭建成功。开始领略Memcache的魅力把! +< ?php +$mem = new Memcache; +$mem->connect("127.0.0.1", 11211); +$mem->set('key', 'This is a test!', 0, 60); +$val = $mem->get('key'); +echo $val; +?> + + + +/usr/local/bin/memcached -d -m 100 -u root -l 192.168.1.107 -p 12000 -c 256 -P /tmp/memcache.pid + +telnet localhost 11211 +#直接回城出现错误 +ERROR + +#查看当前状态命令 +stats + +STAT pid 22459 进程ID +STAT uptime 1027046 服务器运行秒数 +STAT time 1273043062 服务器当前unix时间戳 +STAT version 1.4.4 服务器版本 +STAT pointer_size 64 操作系统字大小(这台服务器是64位的) +STAT rusage_user 0.040000 进程累计用户时间 +STAT rusage_system 0.260000 进程累计系统时间 +STAT curr_connections 10 当前打开连接数 +STAT total_connections 82 曾打开的连接总数 +STAT connection_structures 13 服务器分配的连接结构数 +STAT cmd_get 54 执行get命令总数 +STAT cmd_set 34 执行set命令总数 +STAT cmd_flush 3 指向flush_all命令总数 +STAT get_hits 9 get命中次数 +STAT get_misses 45 get未命中次数 +STAT delete_misses 5 delete未命中次数 +STAT delete_hits 1 delete命中次数 +STAT incr_misses 0 incr未命中次数 +STAT incr_hits 0 incr命中次数 +STAT decr_misses 0 decr未命中次数 +STAT decr_hits 0 decr命中次数 +STAT cas_misses 0 cas未命中次数 +STAT cas_hits 0 cas命中次数 +STAT cas_badval 0 使用擦拭次数 +STAT auth_cmds 0 +STAT auth_errors 0 +STAT bytes_read 15785 读取字节总数 +STAT bytes_written 15222 写入字节总数 +STAT limit_maxbytes 1048576 分配的内存数(字节) +STAT accepting_conns 1 目前接受的链接数 +STAT listen_disabled_num 0 +STAT threads 4 线程数 +STAT conn_yields 0 +STAT bytes 0 存储item字节数 +STAT curr_items 0 item个数 +STAT total_items 34 item总数 +STAT evictions 0 为获取空间删除item的总数 + + + +memcached数据存储和取回相关的基本命令只有4条。下面将采用telnet与memcached进行交互,并介绍这4条基本命令。假设memcached服务器在本机上,并监听在默认端口11211上。 + +telnet连接到memcached:telnet 127.0.0.1 11211 + +SET:添加一个新的条目到memcached,或是用新的数据替换掉已存在的条目 + +set test1 0 0 10 +testing001 +STORED + +ADD:仅当key不存在的情况下存储数据。如果一个key已经存在,将得到NOT_STORED的响应 + +add test1 0 0 10 +testing002 +NOT_STORED +add test2 0 0 10 +testing002 +STORED + +REPLACE:仅当key已经存在的情况下存储数据。如果一个key不存在,将得到NOT_STORED的响应 + +replace test1 0 0 10 +testing003 +STORED +replace test3 0 0 10 +testing003 +NOT_STORED + +GET:从memcached中返回数据。从缓存中返回数据时,将在第一行得到key的名字,flag的值和返回的value的长度。真正的数据在第二行,最后返回END。如果key并不存在,那么在第一行就直接返回END。 + +get test1 +VALUE test1 0 10 +testing003 +END +get test4 +END +get test1 test2 +VALUE test1 0 10 +testing003 +END + +注:像上面那样你可以在一个请求中包含多个由空格分开的key。当请求多个key时,将只会得到那些有存储数据的key的响应。memcached将不会响应没有存储Data的key。 + + + +参考资料: +对Memcached有疑问的朋友可以参考下列文章: +Linux下的Memcache安装:http://www.ccvita.com/257.html +Windows下的Memcache安装:http://www.ccvita.com/258.html +Memcache基础教程:http://www.ccvita.com/259.html +Discuz!的Memcache缓存实现:http://www.ccvita.com/261.html +Memcache协议中文版:http://www.ccvita.com/306.html +Memcache分布式部署方案:http://www.ccvita.com/395.html \ No newline at end of file diff --git "a/Service/Memcache/flush_all\346\270\205\351\231\244\346\211\200\346\234\211\346\225\260\346\215\256.sh" "b/Service/Memcache/flush_all\346\270\205\351\231\244\346\211\200\346\234\211\346\225\260\346\215\256.sh" new file mode 100644 index 0000000..7b0329e --- /dev/null +++ "b/Service/Memcache/flush_all\346\270\205\351\231\244\346\211\200\346\234\211\346\225\260\346\215\256.sh" @@ -0,0 +1,14 @@ +flush_all 这个最简单的命令仅用于清理缓存中的所有名称/值对。如果您需要将缓存重置到干净的状态,则 flush_all 能提供很大的用处。 + +view plaincopy to clipboardprint? +set userId 0 0 5 +55555 +STORED +get userId +VALUE userId 0 5 +55555 +END +flush_all +OK +get userId +END \ No newline at end of file diff --git a/Service/Memcache/memcache-PHPclient.txt b/Service/Memcache/memcache-PHPclient.txt new file mode 100644 index 0000000..5dd0694 --- /dev/null +++ b/Service/Memcache/memcache-PHPclient.txt @@ -0,0 +1,18 @@ +#!/bin/bash +wget http://pecl.php.net/get/memcache-2.2.5.tgz +tar vxzf memcache-2.2.5.tgz +cd memcache-2.2.5/ +# yum install php-devel +/usr/local/php/bin/phpize +./configure --with-php-config=/usr/local/php/bin/php-config +make +make install +###edit php.ini to add memcache module +sed -i '/extension_dir = "ext"/ a extension=memcache.so' /usr/local/php/lib/php.ini +sed -i '/extension_dir = "ext"/ a extension_dir = "/usr/local/php/lib/php/extensions/no-debug-non-zts-20100525/"' /usr/local/php/lib/php.ini +### session save +sed -i '/session.save_handler/ s/^/;/' /usr/local/php/lib/php.ini +sed -i '/Session/ a session.save_path = "tcp://192.168.146.146:11211"' /usr/local/php/lib/php.ini +sed -i '/Session/ a session.save_handler = memcache' /usr/local/php/lib/php.ini +/usr/local/apache/bin/apachectl restart +### /etc/init.d/iptables stop \ No newline at end of file diff --git a/Service/Memcache/memcache.php b/Service/Memcache/memcache.php new file mode 100644 index 0000000..6330c59 --- /dev/null +++ b/Service/Memcache/memcache.php @@ -0,0 +1,881 @@ + | + +----------------------------------------------------------------------+ +*/ + +$VERSION='$Id: memcache.php,v 1.1.2.3 2008/08/28 18:07:54 mikl Exp $'; + +define('ADMIN_USERNAME','memcache'); // Admin Username +define('ADMIN_PASSWORD','password'); // Admin Password +define('DATE_FORMAT','Y/m/d H:i:s'); +define('GRAPH_SIZE',200); +define('MAX_ITEM_DUMP',50); + +$MEMCACHE_SERVERS[] = 'mymemcache-server1:11211'; // add more as an array +$MEMCACHE_SERVERS[] = 'mymemcache-server2:11211'; // add more as an array + + +////////// END OF DEFAULT CONFIG AREA ///////////////////////////////////////////////////////////// + +///////////////// Password protect //////////////////////////////////////////////////////////////// +if (!isset($_SERVER['PHP_AUTH_USER']) || !isset($_SERVER['PHP_AUTH_PW']) || + $_SERVER['PHP_AUTH_USER'] != ADMIN_USERNAME ||$_SERVER['PHP_AUTH_PW'] != ADMIN_PASSWORD) { + Header("WWW-Authenticate: Basic realm=\"Memcache Login\""); + Header("HTTP/1.0 401 Unauthorized"); + + echo << +

Rejected!

+ Wrong Username or Password! + +EOB; + exit; +} + +///////////MEMCACHE FUNCTIONS ///////////////////////////////////////////////////////////////////// + +function sendMemcacheCommands($command){ + global $MEMCACHE_SERVERS; + $result = array(); + + foreach($MEMCACHE_SERVERS as $server){ + $strs = explode(':',$server); + $host = $strs[0]; + $port = $strs[1]; + $result[$server] = sendMemcacheCommand($host,$port,$command); + } + return $result; +} +function sendMemcacheCommand($server,$port,$command){ + + $s = @fsockopen($server,$port); + if (!$s){ + die("Cant connect to:".$server.':'.$port); + } + + fwrite($s, $command."\r\n"); + + $buf=''; + while ((!feof($s))) { + $buf .= fgets($s, 256); + if (strpos($buf,"END\r\n")!==false){ // stat says end + break; + } + if (strpos($buf,"DELETED\r\n")!==false || strpos($buf,"NOT_FOUND\r\n")!==false){ // delete says these + break; + } + if (strpos($buf,"OK\r\n")!==false){ // flush_all says ok + break; + } + } + fclose($s); + return parseMemcacheResults($buf); +} +function parseMemcacheResults($str){ + + $res = array(); + $lines = explode("\r\n",$str); + $cnt = count($lines); + for($i=0; $i< $cnt; $i++){ + $line = $lines[$i]; + $l = explode(' ',$line,3); + if (count($l)==3){ + $res[$l[0]][$l[1]]=$l[2]; + if ($l[0]=='VALUE'){ // next line is the value + $res[$l[0]][$l[1]] = array(); + list ($flag,$size)=explode(' ',$l[2]); + $res[$l[0]][$l[1]]['stat']=array('flag'=>$flag,'size'=>$size); + $res[$l[0]][$l[1]]['value']=$lines[++$i]; + } + }elseif($line=='DELETED' || $line=='NOT_FOUND' || $line=='OK'){ + return $line; + } + } + return $res; + +} + +function dumpCacheSlab($server,$slabId,$limit){ + list($host,$port) = explode(':',$server); + $resp = sendMemcacheCommand($host,$port,'stats cachedump '.$slabId.' '.$limit); + + return $resp; + +} + +function flushServer($server){ + list($host,$port) = explode(':',$server); + $resp = sendMemcacheCommand($host,$port,'flush_all'); + return $resp; +} +function getCacheItems(){ + $items = sendMemcacheCommands('stats items'); + $serverItems = array(); + $totalItems = array(); + foreach ($items as $server=>$itemlist){ + $serverItems[$server] = array(); + $totalItems[$server]=0; + if (!isset($itemlist['STAT'])){ + continue; + } + + $iteminfo = $itemlist['STAT']; + + foreach($iteminfo as $keyinfo=>$value){ + if (preg_match('/items\:(\d+?)\:(.+?)$/',$keyinfo,$matches)){ + $serverItems[$server][$matches[1]][$matches[2]] = $value; + if ($matches[2]=='number'){ + $totalItems[$server] +=$value; + } + } + } + } + return array('items'=>$serverItems,'counts'=>$totalItems); +} +function getMemcacheStats($total=true){ + $resp = sendMemcacheCommands('stats'); + if ($total){ + $res = array(); + foreach($resp as $server=>$r){ + foreach($r['STAT'] as $key=>$row){ + if (!isset($res[$key])){ + $res[$key]=null; + } + switch ($key){ + case 'pid': + $res['pid'][$server]=$row; + break; + case 'uptime': + $res['uptime'][$server]=$row; + break; + case 'time': + $res['time'][$server]=$row; + break; + case 'version': + $res['version'][$server]=$row; + break; + case 'pointer_size': + $res['pointer_size'][$server]=$row; + break; + case 'rusage_user': + $res['rusage_user'][$server]=$row; + break; + case 'rusage_system': + $res['rusage_system'][$server]=$row; + break; + case 'curr_items': + $res['curr_items']+=$row; + break; + case 'total_items': + $res['total_items']+=$row; + break; + case 'bytes': + $res['bytes']+=$row; + break; + case 'curr_connections': + $res['curr_connections']+=$row; + break; + case 'total_connections': + $res['total_connections']+=$row; + break; + case 'connection_structures': + $res['connection_structures']+=$row; + break; + case 'cmd_get': + $res['cmd_get']+=$row; + break; + case 'cmd_set': + $res['cmd_set']+=$row; + break; + case 'get_hits': + $res['get_hits']+=$row; + break; + case 'get_misses': + $res['get_misses']+=$row; + break; + case 'evictions': + $res['evictions']+=$row; + break; + case 'bytes_read': + $res['bytes_read']+=$row; + break; + case 'bytes_written': + $res['bytes_written']+=$row; + break; + case 'limit_maxbytes': + $res['limit_maxbytes']+=$row; + break; + case 'threads': + $res['rusage_system'][$server]=$row; + break; + } + } + } + return $res; + } + return $resp; +} + +////////////////////////////////////////////////////// + +// +// don't cache this page +// +header("Cache-Control: no-store, no-cache, must-revalidate"); // HTTP/1.1 +header("Cache-Control: post-check=0, pre-check=0", false); +header("Pragma: no-cache"); // HTTP/1.0 + +function duration($ts) { + global $time; + $years = (int)((($time - $ts)/(7*86400))/52.177457); + $rem = (int)(($time-$ts)-($years * 52.177457 * 7 * 86400)); + $weeks = (int)(($rem)/(7*86400)); + $days = (int)(($rem)/86400) - $weeks*7; + $hours = (int)(($rem)/3600) - $days*24 - $weeks*7*24; + $mins = (int)(($rem)/60) - $hours*60 - $days*24*60 - $weeks*7*24*60; + $str = ''; + if($years==1) $str .= "$years year, "; + if($years>1) $str .= "$years years, "; + if($weeks==1) $str .= "$weeks week, "; + if($weeks>1) $str .= "$weeks weeks, "; + if($days==1) $str .= "$days day,"; + if($days>1) $str .= "$days days,"; + if($hours == 1) $str .= " $hours hour and"; + if($hours>1) $str .= " $hours hours and"; + if($mins == 1) $str .= " 1 minute"; + else $str .= " $mins minutes"; + return $str; +} + +// create graphics +// +function graphics_avail() { + return extension_loaded('gd'); +} + +function bsize($s) { + foreach (array('','K','M','G') as $i => $k) { + if ($s < 1024) break; + $s/=1024; + } + return sprintf("%5.1f %sBytes",$s,$k); +} + +// create menu entry +function menu_entry($ob,$title) { + global $PHP_SELF; + if ($ob==$_GET['op']){ + return "
  • $title
  • "; + } + return "
  • $title
  • "; +} + +function getHeader(){ + $header = << + +MEMCACHE INFO + + + +
    +

    + + memcache.php by Harun Yayli +

    +
    +
    +
    +EOB; + + return $header; +} +function getFooter(){ + global $VERSION; + $footer = '
    + +'; + + return $footer; + +} +function getMenu(){ + global $PHP_SELF; +echo "