}, {
"IpProtocol": "tcp",
"FromPort": "7373",
"ToPort": "7373",
"CidrIp": "0.0.0.0/0"
}]
}
},
"InstanceSecurityGroupec2": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "ec2 jump security grp",
"VpcId": {
"Ref": "VPC"
},
"SecurityGroupIngress": [{
"IpProtocol": "icmp",
"FromPort": "-1",
"ToPort": "-1",
"CidrIp": "0.0.0.0/0"
}, {
"IpProtocol": "tcp",
"FromPort": "22",
"ToPort": "22",
"CidrIp": "0.0.0.0/0"
}],
"SecurityGroupEgress": {
"IpProtocol": "tcp",
"FromPort": "-1",
"ToPort": "-1",
"CidrIp": "0.0.0.0/0"
}
}
},
"ec2Server": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-123456",
"InstanceType": {
"Ref": "InstanceType"
},
"SecurityGroupIds": [{
"Ref": "InstanceSecurityGroupec2"
}],
"SubnetId": {
"Ref": "PublicSubnet"
},
},
}
"InstanceSecurityGroupSelfRule": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "InstanceSecurityGroup"
},
"IpProtocol": "-1",
"FromPort": "0",
"ToPort": "65535",
"SourceSecurityGroupId": {
"Ref": "InstanceSecurityGroup"
}
}
}
}
}
lbuserdata.sh
#!/bin/sh
set -e
# Install HAProxy
sudo apt-get update
sudo apt-get install -y haproxy
# Configure it in a jank way
cat <<EOF >/tmp/haproxy.cfg
global
daemon
maxconn 256
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen stats
bind *:9999
mode http
stats enable
stats uri /
stats refresh 2s
#listen http-in
# bind *:80
# balance roundrobin
# option http-server-close
frontend redis
bind 127.0.0.1:5000 name redis
default_backend redis_servers
maxconn 1024
backend redis_servers
balance roundrobin
#option tcp-check
#tcp-check connect
#tcp-check send PING\r\n
#tcp-check expect string +PONG
#tcp-check send QUIT\r\n
#tcp-check expect string +OK
#server redis_7000 localhost:7000 check inter 1s weight 77
#server redis_7001 localhost:7001 check inter 1s weight 33
EOF
sudo mv /tmp/haproxy.cfg /etc/haproxy/haproxy.cfg
# Enable HAProxy
cat <<EOF >/tmp/haproxy
ENABLED=1
EOF
sudo mv /tmp/haproxy /etc/default/haproxy
# Start it
sudo /etc/init.d/haproxy start
export SERF_ROLE="lb"
set -e
sudo apt-get install -y unzip
cd /tmp
until wget -O serf.zip https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip; do
sleep 1
done
unzip serf.zip
sudo mv serf /usr/local/bin/serf
# The member join script is invoked when a member joins the Serf cluster.
# Our join script simply adds the node to the load balancer.
cat <<EOF >/tmp/join.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member join."
exit 0
fi
while read line; do
ROLE=\`echo \$line | awk '{print \\\$3 }'\`
if [ "x\${ROLE}" != "xweb" ]; then
continue
fi
echo \$line | \\
awk '{ printf " server %s %s check\\n", \$1, \$2 }' >>/etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/join.sh /usr/local/bin/serf_member_join.sh
chmod +x /usr/local/bin/serf_member_join.sh
# The member leave script is invoked when a member leaves or fails out
# of the serf cluster. Our script removes the node from the load balancer.
cat <<EOF >/tmp/leav
"IpProtocol": "tcp",
"FromPort": "7373",
"ToPort": "7373",
"CidrIp": "0.0.0.0/0"
}]
}
},
"InstanceSecurityGroupec2": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "ec2 jump security grp",
"VpcId": {
"Ref": "VPC"
},
"SecurityGroupIngress": [{
"IpProtocol": "icmp",
"FromPort": "-1",
"ToPort": "-1",
"CidrIp": "0.0.0.0/0"
}, {
"IpProtocol": "tcp",
"FromPort": "22",
"ToPort": "22",
"CidrIp": "0.0.0.0/0"
}],
"SecurityGroupEgress": {
"IpProtocol": "tcp",
"FromPort": "-1",
"ToPort": "-1",
"CidrIp": "0.0.0.0/0"
}
}
},
"ec2Server": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-123456",
"InstanceType": {
"Ref": "InstanceType"
},
"SecurityGroupIds": [{
"Ref": "InstanceSecurityGroupec2"
}],
"SubnetId": {
"Ref": "PublicSubnet"
},
},
}
"InstanceSecurityGroupSelfRule": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "InstanceSecurityGroup"
},
"IpProtocol": "-1",
"FromPort": "0",
"ToPort": "65535",
"SourceSecurityGroupId": {
"Ref": "InstanceSecurityGroup"
}
}
}
}
}
lbuserdata.sh
#!/bin/sh
set -e
# Install HAProxy
sudo apt-get update
sudo apt-get install -y haproxy
# Configure it in a jank way
cat <<EOF >/tmp/haproxy.cfg
global
daemon
maxconn 256
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen stats
bind *:9999
mode http
stats enable
stats uri /
stats refresh 2s
#listen http-in
# bind *:80
# balance roundrobin
# option http-server-close
frontend redis
bind 127.0.0.1:5000 name redis
default_backend redis_servers
maxconn 1024
backend redis_servers
balance roundrobin
#option tcp-check
#tcp-check connect
#tcp-check send PING\r\n
#tcp-check expect string +PONG
#tcp-check send QUIT\r\n
#tcp-check expect string +OK
#server redis_7000 localhost:7000 check inter 1s weight 77
#server redis_7001 localhost:7001 check inter 1s weight 33
EOF
sudo mv /tmp/haproxy.cfg /etc/haproxy/haproxy.cfg
# Enable HAProxy
cat <<EOF >/tmp/haproxy
ENABLED=1
EOF
sudo mv /tmp/haproxy /etc/default/haproxy
# Start it
sudo /etc/init.d/haproxy start
export SERF_ROLE="lb"
set -e
sudo apt-get install -y unzip
cd /tmp
until wget -O serf.zip https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip; do
sleep 1
done
unzip serf.zip
sudo mv serf /usr/local/bin/serf
# The member join script is invoked when a member joins the Serf cluster.
# Our join script simply adds the node to the load balancer.
cat <<EOF >/tmp/join.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member join."
exit 0
fi
while read line; do
ROLE=\`echo \$line | awk '{print \\\$3 }'\`
if [ "x\${ROLE}" != "xweb" ]; then
continue
fi
echo \$line | \\
awk '{ printf " server %s %s check\\n", \$1, \$2 }' >>/etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/join.sh /usr/local/bin/serf_member_join.sh
chmod +x /usr/local/bin/serf_member_join.sh
# The member leave script is invoked when a member leaves or fails out
# of the serf cluster. Our script removes the node from the load balancer.
cat <<EOF >/tmp/leav
e.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member leave"
exit 0
fi
while read line; do
NAME=\`echo \$line | awk '{print \\\$1 }'\`
sed -i'' "/\${NAME} /d" /etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/leave.sh /usr/local/bin/serf_member_left.sh
chmod +x /usr/local/bin/serf_member_left.sh
# Configure the agent
cat <<EOF >/tmp/agent.conf
description "Serf agent"
start on runlevel [2345]
stop on runlevel [!2345]
exec /usr/local/bin/serf agent \\
-event-handler "member-join=/usr/local/bin/serf_member_join.sh" \\
-event-handler "member-leave,member-failed=/usr/local/bin/serf_member_left.sh" \\
-event-handler "query:load=uptime" \\
-tag role=${SERF_ROLE} >>/var/log/serf.log 2>&1
EOF
sudo mv /tmp/agent.conf /etc/init/serf.conf
# Start the agent!
sudo start serf
# If we're the web node, then we need to configure the join retry
if [ "x${SERF_ROLE}" != "xweb" ]; then
exit 0
fi
cat <<EOF >/tmp/join.conf
description "Join the serf cluster"
start on runlevel [2345]
stop on runlevel [!2345]
task
respawn
script
sleep 5
exec /usr/local/bin/serf join 10.0.0.5
end script
EOF
sudo mv /tmp/join.conf /etc/init/serf-join.conf
sudo start serf-join
cat <<EOF >/tmp/query.conf
description "Query the serf cluster load"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
echo `date` I am "${HOSTNAME}<br>" > /var/www/index.html.1
serf query -no-ack load | sed 's|$|<br>|' >> /var/www/index.html.1
mv /var/www/index.html.1 /var/www/index.html
sleep 10
end script
EOF
sudo mv /tmp/query.conf /etc/init/serf-query.conf
sudo start serf-query
redisboxdata.sh
#!/bin/sh
set -e
# Install HAProxy
sudo apt-get update
sudo apt-get install -y haproxy
# Configure it in a jank way
cat <<EOF >/tmp/haproxy.cfg
global
daemon
maxconn 256
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen stats
bind *:9999
mode http
stats enable
stats uri /
stats refresh 2s
frontend redis
bind 127.0.0.1:5000 name redis
default_backend redis_servers
maxconn 1024
backend redis_servers
balance roundrobin
#option tcp-check
#tcp-check connect
#tcp-check send PING\r\n
#tcp-check expect string +PONG
#tcp-check send QUIT\r\n
#tcp-check expect string +OK
#server redis_7000 localhost:7000 check inter 1s weight 77
#server redis_7001 localhost:7001 check inter 1s weight 33
EOF
sudo mv /tmp/haproxy.cfg /etc/haproxy/haproxy.cfg
# Enable HAProxy
cat <<EOF >/tmp/haproxy
ENABLED=1
EOF
sudo mv /tmp/haproxy /etc/default/haproxy
# Start it
sudo /etc/init.d/haproxy start
export SERF_ROLE="redis"
cat <<EOF >/tmp/redis.conf
bind 127.0.0.1
protected-mode no
timeout 0
tcp-keepalive 300
loglevel notice
pidfile /var/run/redis_6379.pid
EOF
sudo mv /tmp/redis.conf /etc/redis/redis.conf
set -e
sudo apt-get install -y unzip
cd /tmp
until wget -O serf.zip https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip; do
sleep 1
done
unzip serf.zip
sudo mv serf /usr/local/bin/serf
# The member join script is invoked when a member joins the Serf cluster.
# Our join script simply adds the node to the load balancer.
cat <<EOF >/tmp/join.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member join."
exit 0
fi
while read line; do
ROLE=\`echo \$line | awk '{print \\\$3 }'\`
if [ "x\${ROLE}" != "xweb" ]; then
con
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member leave"
exit 0
fi
while read line; do
NAME=\`echo \$line | awk '{print \\\$1 }'\`
sed -i'' "/\${NAME} /d" /etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/leave.sh /usr/local/bin/serf_member_left.sh
chmod +x /usr/local/bin/serf_member_left.sh
# Configure the agent
cat <<EOF >/tmp/agent.conf
description "Serf agent"
start on runlevel [2345]
stop on runlevel [!2345]
exec /usr/local/bin/serf agent \\
-event-handler "member-join=/usr/local/bin/serf_member_join.sh" \\
-event-handler "member-leave,member-failed=/usr/local/bin/serf_member_left.sh" \\
-event-handler "query:load=uptime" \\
-tag role=${SERF_ROLE} >>/var/log/serf.log 2>&1
EOF
sudo mv /tmp/agent.conf /etc/init/serf.conf
# Start the agent!
sudo start serf
# If we're the web node, then we need to configure the join retry
if [ "x${SERF_ROLE}" != "xweb" ]; then
exit 0
fi
cat <<EOF >/tmp/join.conf
description "Join the serf cluster"
start on runlevel [2345]
stop on runlevel [!2345]
task
respawn
script
sleep 5
exec /usr/local/bin/serf join 10.0.0.5
end script
EOF
sudo mv /tmp/join.conf /etc/init/serf-join.conf
sudo start serf-join
cat <<EOF >/tmp/query.conf
description "Query the serf cluster load"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
echo `date` I am "${HOSTNAME}<br>" > /var/www/index.html.1
serf query -no-ack load | sed 's|$|<br>|' >> /var/www/index.html.1
mv /var/www/index.html.1 /var/www/index.html
sleep 10
end script
EOF
sudo mv /tmp/query.conf /etc/init/serf-query.conf
sudo start serf-query
redisboxdata.sh
#!/bin/sh
set -e
# Install HAProxy
sudo apt-get update
sudo apt-get install -y haproxy
# Configure it in a jank way
cat <<EOF >/tmp/haproxy.cfg
global
daemon
maxconn 256
defaults
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen stats
bind *:9999
mode http
stats enable
stats uri /
stats refresh 2s
frontend redis
bind 127.0.0.1:5000 name redis
default_backend redis_servers
maxconn 1024
backend redis_servers
balance roundrobin
#option tcp-check
#tcp-check connect
#tcp-check send PING\r\n
#tcp-check expect string +PONG
#tcp-check send QUIT\r\n
#tcp-check expect string +OK
#server redis_7000 localhost:7000 check inter 1s weight 77
#server redis_7001 localhost:7001 check inter 1s weight 33
EOF
sudo mv /tmp/haproxy.cfg /etc/haproxy/haproxy.cfg
# Enable HAProxy
cat <<EOF >/tmp/haproxy
ENABLED=1
EOF
sudo mv /tmp/haproxy /etc/default/haproxy
# Start it
sudo /etc/init.d/haproxy start
export SERF_ROLE="redis"
cat <<EOF >/tmp/redis.conf
bind 127.0.0.1
protected-mode no
timeout 0
tcp-keepalive 300
loglevel notice
pidfile /var/run/redis_6379.pid
EOF
sudo mv /tmp/redis.conf /etc/redis/redis.conf
set -e
sudo apt-get install -y unzip
cd /tmp
until wget -O serf.zip https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip; do
sleep 1
done
unzip serf.zip
sudo mv serf /usr/local/bin/serf
# The member join script is invoked when a member joins the Serf cluster.
# Our join script simply adds the node to the load balancer.
cat <<EOF >/tmp/join.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member join."
exit 0
fi
while read line; do
ROLE=\`echo \$line | awk '{print \\\$3 }'\`
if [ "x\${ROLE}" != "xweb" ]; then
con
Single pane of glass Observability MCP server( a Jarvis style AI assistant)
I’m excited to share a project I’ve been diligently working past month during my free time to help out #devops #sre folks who are always oncall and into “firefighting” incidents, it’s an observability MCP server.
This MCP server — whose name, Eagle-Eye acts like a Jarvis-style MCP server.
Eagle-Eye aims to streamline workflows for on-call #devops, #sre engineers by providing quick insights using the power of AI.
You can ask Eagle-Eye things like:
🔍 “Why is this Kubernetes pod crashing?”
📊 “What’s this Datadog alert about?”
🧑💻 “Who’s on call in PagerDuty?”
📈 “Can you explain this PromQL query?”
Eagle-Eye connects to systems using the MCP server, retrieves data, and uses AI to provide recommendations back to the user.
Currently integrated systems include:
Kubernetes (k8s)
PagerDuty
Prometheus
Datadog
…and more integrations are on the way!
It currently use Cursor IDE to interact with the MCP server, making it feel like you’re chatting directly with your infrastructure.
Feel free to download the repo and add more integrations or update the code — it’s completely open source. The idea, as I mentioned, is to have a single-pane-of-glass tool that helps DevOps, SREs, or on-call folks.
I’ve attached some snapshots inside the repo for quick reference.
Here’s the link to the repo:- https://github.com/neeltom92/eagle-eye-mcp/blob/main/README.md
In my next post, I plan to share how I leveraged Facebook’s Prophet forecasting library and time-series metrics from Datadog to build an MCP server that does infrastructure capacity planning at scale.
Imagine a tool that could help predict traffic patterns on CPU, memory, HPA, and more — perfect for handling spikes during Black Friday sales or marketing campaigns.
Excited to keep building and sharing!
#mcp #server #ai #observability #devops #sre
https://redd.it/1lqkgxm
@r_devops
I’m excited to share a project I’ve been diligently working past month during my free time to help out #devops #sre folks who are always oncall and into “firefighting” incidents, it’s an observability MCP server.
This MCP server — whose name, Eagle-Eye acts like a Jarvis-style MCP server.
Eagle-Eye aims to streamline workflows for on-call #devops, #sre engineers by providing quick insights using the power of AI.
You can ask Eagle-Eye things like:
🔍 “Why is this Kubernetes pod crashing?”
📊 “What’s this Datadog alert about?”
🧑💻 “Who’s on call in PagerDuty?”
📈 “Can you explain this PromQL query?”
Eagle-Eye connects to systems using the MCP server, retrieves data, and uses AI to provide recommendations back to the user.
Currently integrated systems include:
Kubernetes (k8s)
PagerDuty
Prometheus
Datadog
…and more integrations are on the way!
It currently use Cursor IDE to interact with the MCP server, making it feel like you’re chatting directly with your infrastructure.
Feel free to download the repo and add more integrations or update the code — it’s completely open source. The idea, as I mentioned, is to have a single-pane-of-glass tool that helps DevOps, SREs, or on-call folks.
I’ve attached some snapshots inside the repo for quick reference.
Here’s the link to the repo:- https://github.com/neeltom92/eagle-eye-mcp/blob/main/README.md
In my next post, I plan to share how I leveraged Facebook’s Prophet forecasting library and time-series metrics from Datadog to build an MCP server that does infrastructure capacity planning at scale.
Imagine a tool that could help predict traffic patterns on CPU, memory, HPA, and more — perfect for handling spikes during Black Friday sales or marketing campaigns.
Excited to keep building and sharing!
#mcp #server #ai #observability #devops #sre
https://redd.it/1lqkgxm
@r_devops
GitHub
eagle-eye-mcp/README.md at main · neeltom92/eagle-eye-mcp
Contribute to neeltom92/eagle-eye-mcp development by creating an account on GitHub.