tinue
fi
echo \$line | \\
awk '{ printf " server %s %s check\\n", \$1, \$2 }' >>/etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/join.sh /usr/local/bin/serf_member_join.sh
chmod +x /usr/local/bin/serf_member_join.sh
# The member leave script is invoked when a member leaves or fails out
# of the serf cluster. Our script removes the node from the load balancer.
cat <<EOF >/tmp/leave.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member leave"
exit 0
fi
while read line; do
NAME=\`echo \$line | awk '{print \\\$1 }'\`
sed -i'' "/\${NAME} /d" /etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/leave.sh /usr/local/bin/serf_member_left.sh
chmod +x /usr/local/bin/serf_member_left.sh
# Configure the agent
cat <<EOF >/tmp/agent.conf
description "Serf agent"
start on runlevel [2345]
stop on runlevel [!2345]
exec /usr/local/bin/serf agent \\
-event-handler "member-join=/usr/local/bin/serf_member_join.sh" \\
-event-handler "member-leave,member-failed=/usr/local/bin/serf_member_left.sh" \\
-event-handler "query:load=uptime" \\
-tag role=${SERF_ROLE} >>/var/log/serf.log 2>&1
EOF
sudo mv /tmp/agent.conf /etc/init/serf.conf
# Start the agent!
sudo start serf
# If we're the web node, then we need to configure the join retry
if [ "x${SERF_ROLE}" != "xweb" ]; then
exit 0
fi
cat <<EOF >/tmp/join.conf
description "Join the serf cluster"
start on runlevel [2345]
stop on runlevel [!2345]
task
respawn
script
sleep 5
exec /usr/local/bin/serf join 10.0.0.5
end script
EOF
sudo mv /tmp/join.conf /etc/init/serf-join.conf
sudo start serf-join
cat <<EOF >/tmp/query.conf
description "Query the serf cluster load"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
echo `date` I am "${HOSTNAME}<br>" > /var/www/index.html.1
serf query -no-ack load | sed 's|$|<br>|' >> /var/www/index.html.1
mv /var/www/index.html.1 /var/www/index.html
sleep 10
end script
EOF
sudo mv /tmp/query.conf /etc/init/serf-query.conf
sudo start serf-query
Hello, I m new to this. I have written this which generates
\--vps
\--private/public subnet
\--asg(with starup script)
\--elb
\--separate ec2 instance as a jumpbox to access the redis instacnes in a round robin manner
​
getting the following error:
error: Parse error on line 321:
...icSubnet" }, }, } "InstanceS
---------------------^
Expecting 'STRING', got '}'
Also it would be a great help if someone told me if i m on the right track.
https://redd.it/fce9pm
@r_devops
fi
echo \$line | \\
awk '{ printf " server %s %s check\\n", \$1, \$2 }' >>/etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/join.sh /usr/local/bin/serf_member_join.sh
chmod +x /usr/local/bin/serf_member_join.sh
# The member leave script is invoked when a member leaves or fails out
# of the serf cluster. Our script removes the node from the load balancer.
cat <<EOF >/tmp/leave.sh
if [ "x\${SERF_TAG_ROLE}" != "xlb" ]; then
echo "Not an lb. Ignoring member leave"
exit 0
fi
while read line; do
NAME=\`echo \$line | awk '{print \\\$1 }'\`
sed -i'' "/\${NAME} /d" /etc/haproxy/haproxy.cfg
done
/etc/init.d/haproxy reload
EOF
sudo mv /tmp/leave.sh /usr/local/bin/serf_member_left.sh
chmod +x /usr/local/bin/serf_member_left.sh
# Configure the agent
cat <<EOF >/tmp/agent.conf
description "Serf agent"
start on runlevel [2345]
stop on runlevel [!2345]
exec /usr/local/bin/serf agent \\
-event-handler "member-join=/usr/local/bin/serf_member_join.sh" \\
-event-handler "member-leave,member-failed=/usr/local/bin/serf_member_left.sh" \\
-event-handler "query:load=uptime" \\
-tag role=${SERF_ROLE} >>/var/log/serf.log 2>&1
EOF
sudo mv /tmp/agent.conf /etc/init/serf.conf
# Start the agent!
sudo start serf
# If we're the web node, then we need to configure the join retry
if [ "x${SERF_ROLE}" != "xweb" ]; then
exit 0
fi
cat <<EOF >/tmp/join.conf
description "Join the serf cluster"
start on runlevel [2345]
stop on runlevel [!2345]
task
respawn
script
sleep 5
exec /usr/local/bin/serf join 10.0.0.5
end script
EOF
sudo mv /tmp/join.conf /etc/init/serf-join.conf
sudo start serf-join
cat <<EOF >/tmp/query.conf
description "Query the serf cluster load"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
echo `date` I am "${HOSTNAME}<br>" > /var/www/index.html.1
serf query -no-ack load | sed 's|$|<br>|' >> /var/www/index.html.1
mv /var/www/index.html.1 /var/www/index.html
sleep 10
end script
EOF
sudo mv /tmp/query.conf /etc/init/serf-query.conf
sudo start serf-query
Hello, I m new to this. I have written this which generates
\--vps
\--private/public subnet
\--asg(with starup script)
\--elb
\--separate ec2 instance as a jumpbox to access the redis instacnes in a round robin manner
​
getting the following error:
error: Parse error on line 321:
...icSubnet" }, }, } "InstanceS
---------------------^
Expecting 'STRING', got '}'
Also it would be a great help if someone told me if i m on the right track.
https://redd.it/fce9pm
@r_devops
reddit
Need help with cloudformation
{ "Parameters": { "redisboxes": { "Type": "String", "Default": "2", "Description": "launch 2 boxes" } ...
Security Engineer for Small vs Large Company?
Hello all im currently a security engineer for a small company, im responsible for many things like, SIEM tool, Monitoring tools, Identity management, CI/CD pipeline, cloud infrastructure, kubernetes clusters, etc etc. Basically a Cloud Security Engineer + DevOps, Because we are small company (250) and im the only one on this role i get a a bit of respect and people look up to me, i feel important many times. I get perks like training (SANS included) once a year, they will pay for any certification exam fee, as many as i take during the year, including some cheap training. I been with this company for about 1.5 years, and i just got my first raise few months ago, i was hoping it to be way larger than it was but it was only 4% increase. So i updated LinkedIn and recruiters had a new target :)
That being said i currently got offers too two large companies, one with 700k employees and the other with over 50k employees. Both offers are about 30% than what im currently making, with potential higher bonus, better benefits, but i will go from being a key member that have a say and do on the entire organization to a member of a large team. Which could be a bad thing, or a really good thing since i will have other members to learn from, and to share ideas with.
Note: I did talk to my CIO about not being happy about the salary raise, he told me to eat dirt (on a nice way).
https://redd.it/fcnecx
@r_devops
Hello all im currently a security engineer for a small company, im responsible for many things like, SIEM tool, Monitoring tools, Identity management, CI/CD pipeline, cloud infrastructure, kubernetes clusters, etc etc. Basically a Cloud Security Engineer + DevOps, Because we are small company (250) and im the only one on this role i get a a bit of respect and people look up to me, i feel important many times. I get perks like training (SANS included) once a year, they will pay for any certification exam fee, as many as i take during the year, including some cheap training. I been with this company for about 1.5 years, and i just got my first raise few months ago, i was hoping it to be way larger than it was but it was only 4% increase. So i updated LinkedIn and recruiters had a new target :)
That being said i currently got offers too two large companies, one with 700k employees and the other with over 50k employees. Both offers are about 30% than what im currently making, with potential higher bonus, better benefits, but i will go from being a key member that have a say and do on the entire organization to a member of a large team. Which could be a bad thing, or a really good thing since i will have other members to learn from, and to share ideas with.
Note: I did talk to my CIO about not being happy about the salary raise, he told me to eat dirt (on a nice way).
https://redd.it/fcnecx
@r_devops
reddit
Security Engineer for Small vs Large Company?
Hello all im currently a security engineer for a small company, im responsible for many things like, SIEM tool, Monitoring tools, Identity...
Does anyone use Bamboo? Where are you now?
I'm stuck using this. This product is barely in a supported mode and so misaligned to the industry of building or deploying software. There's no new features, and the existing ones are half baked, along with price increases and Atlassian's push on Cloud it doesn't make any sense to continue using this.
Where have you gone from Bamboo?
I'm thinking just move to Github Enterprise + Actions/Runners. Security is a big part. Any insights folks?
https://redd.it/fcj9l1
@r_devops
I'm stuck using this. This product is barely in a supported mode and so misaligned to the industry of building or deploying software. There's no new features, and the existing ones are half baked, along with price increases and Atlassian's push on Cloud it doesn't make any sense to continue using this.
Where have you gone from Bamboo?
I'm thinking just move to Github Enterprise + Actions/Runners. Security is a big part. Any insights folks?
https://redd.it/fcj9l1
@r_devops
reddit
Does anyone use Bamboo? Where are you now?
I'm stuck using this. This product is barely in a supported mode and so misaligned to the industry of building or deploying software. There's no...
Jenkins multi branch pipeline build PR and branches
So we have a multibranch pipeline. The problem with that is this.
Let\`s say we have a branch **develop** which contains a Jenkinsfile. There is also a webhook which triggers builds if there is a new commit to this branch. If i checkout a new branch (A) from **develop**, Jenkinks tracks and builds that branch. So far so good..
But if i create a PR from branch **A** to **develop**, Jenkins again tracks this PR and builds it.
So now the problem:
If i commit something to branch A, two builds are started.
1. One for branch **A**
2. One for the **PR**
But generally this builds the same code twice! How can a build only the PR if it is created and if not the branch ? I\`m using groovy pipeline.
Thanks in advance :)
https://redd.it/fckakm
@r_devops
So we have a multibranch pipeline. The problem with that is this.
Let\`s say we have a branch **develop** which contains a Jenkinsfile. There is also a webhook which triggers builds if there is a new commit to this branch. If i checkout a new branch (A) from **develop**, Jenkinks tracks and builds that branch. So far so good..
But if i create a PR from branch **A** to **develop**, Jenkins again tracks this PR and builds it.
So now the problem:
If i commit something to branch A, two builds are started.
1. One for branch **A**
2. One for the **PR**
But generally this builds the same code twice! How can a build only the PR if it is created and if not the branch ? I\`m using groovy pipeline.
Thanks in advance :)
https://redd.it/fckakm
@r_devops
reddit
Jenkins multi branch pipeline build PR and branches
So we have a multibranch pipeline. The problem with that is this. Let\`s say we have a branch **develop** which contains a Jenkinsfile. There is...
Splitting up a monolithic teamcity server. Should i stick to TC or adopt other tools for mostly boilerplate windows builds?
everything we're doing is pretty basic, boilerplate. I've only ever used TC for building windows apps (we also have a jenkins instance that's equally as monolithic, but that's a whole other thing), but i've poked around with gitlab and azure devops in the past for research.
wondering if there's an alternative to TC that's low effort to entry and that won't break the bank?
https://redd.it/fcv4v2
@r_devops
everything we're doing is pretty basic, boilerplate. I've only ever used TC for building windows apps (we also have a jenkins instance that's equally as monolithic, but that's a whole other thing), but i've poked around with gitlab and azure devops in the past for research.
wondering if there's an alternative to TC that's low effort to entry and that won't break the bank?
https://redd.it/fcv4v2
@r_devops
reddit
Splitting up a monolithic teamcity server. Should i stick to TC or...
everything we're doing is pretty basic, boilerplate. I've only ever used TC for building windows apps (we also have a jenkins instance that's...
How to add date to custom index via fluentd ?
First, just to give some insight about the architecture:
- There are several microservices running in Kubernetes, and two teams manage those services.
- There is a single elasticsearch cluster on which all the logs from Kubernetes are pushed through `fluentd` Daemon.
- Both teams have different index names (for example, logstash-team1, logstash-team2)
Our logback configuration:
```
<appender name="STDOUT_JSON" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<providers>
<contextName>
<fieldName>app</fieldName>
</contextName>
<timestamp>
<fieldName>timestamp</fieldName>
<timeZone>UTC</timeZone>
</timestamp>
<loggerName>
<fieldName>logger</fieldName>
</loggerName>
<logLevel>
<fieldName>level</fieldName>
</logLevel>
<callerData>
<classFieldName>class</classFieldName>
<methodFieldName>method</methodFieldName>
<lineFieldName>line</lineFieldName>
<fileFieldName>file</fileFieldName>
</callerData>
<threadName>
<fieldName>thread</fieldName>
</threadName>
<mdc />
<arguments>
<includeNonStructuredArguments>false</includeNonStructuredArguments>
</arguments>
<stackTrace>
<fieldName>stack</fieldName>
</stackTrace>
<message>
<fieldName>message</fieldName>
</message>
</providers>
<customFields>{"esindex": "logstash-team1"}</customFields>
</encoder>
</appender>
```
- Everything works fine with this, however the logs on Kibana are not deleted after 30 days. We figured that if we add date in the `esindex` field, it might solve the problem (there maybe other solutions too, that I'm not aware of).
- So the `esindex` should look something like this: `logstash-team1-%d{yyyy-mm-dd}`.
Is there a way to do this?
Or is there a way to delete logs, after 30 days, without appending date in index name?
https://redd.it/fcwk3k
@r_devops
First, just to give some insight about the architecture:
- There are several microservices running in Kubernetes, and two teams manage those services.
- There is a single elasticsearch cluster on which all the logs from Kubernetes are pushed through `fluentd` Daemon.
- Both teams have different index names (for example, logstash-team1, logstash-team2)
Our logback configuration:
```
<appender name="STDOUT_JSON" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="net.logstash.logback.encoder.LogstashEncoder">
<providers>
<contextName>
<fieldName>app</fieldName>
</contextName>
<timestamp>
<fieldName>timestamp</fieldName>
<timeZone>UTC</timeZone>
</timestamp>
<loggerName>
<fieldName>logger</fieldName>
</loggerName>
<logLevel>
<fieldName>level</fieldName>
</logLevel>
<callerData>
<classFieldName>class</classFieldName>
<methodFieldName>method</methodFieldName>
<lineFieldName>line</lineFieldName>
<fileFieldName>file</fileFieldName>
</callerData>
<threadName>
<fieldName>thread</fieldName>
</threadName>
<mdc />
<arguments>
<includeNonStructuredArguments>false</includeNonStructuredArguments>
</arguments>
<stackTrace>
<fieldName>stack</fieldName>
</stackTrace>
<message>
<fieldName>message</fieldName>
</message>
</providers>
<customFields>{"esindex": "logstash-team1"}</customFields>
</encoder>
</appender>
```
- Everything works fine with this, however the logs on Kibana are not deleted after 30 days. We figured that if we add date in the `esindex` field, it might solve the problem (there maybe other solutions too, that I'm not aware of).
- So the `esindex` should look something like this: `logstash-team1-%d{yyyy-mm-dd}`.
Is there a way to do this?
Or is there a way to delete logs, after 30 days, without appending date in index name?
https://redd.it/fcwk3k
@r_devops
reddit
How to add date to custom index via fluentd ?
First, just to give some insight about the architecture: - There are several microservices running in Kubernetes, and two teams manage those...
This Week in DevOps
This weeks newsletter is out and as always I'd love to get some feedback on the style, format and content. Did you find it useful? Is there anything else you'd like to see covered? Any other suggestions?
Ideally I'd like this to be a community shaped newsletter offering maximum value for the least time investment possible. We all have too much to keep up with these days anyway, and I don't want this to be one more thing. Instead I'd like to replace the other sources you currently use to keep up to date on the cloud and give you one clear concise source that covers everything.
Have a look at this weeks edition and let me know what I can do to improve.
[https://thisweekindevops.com/2020/03/03/weekly-roundup-march-2nd-2020/](https://thisweekindevops.com/2020/03/03/weekly-roundup-march-2nd-2020/)
https://redd.it/fcxb6t
@r_devops
This weeks newsletter is out and as always I'd love to get some feedback on the style, format and content. Did you find it useful? Is there anything else you'd like to see covered? Any other suggestions?
Ideally I'd like this to be a community shaped newsletter offering maximum value for the least time investment possible. We all have too much to keep up with these days anyway, and I don't want this to be one more thing. Instead I'd like to replace the other sources you currently use to keep up to date on the cloud and give you one clear concise source that covers everything.
Have a look at this weeks edition and let me know what I can do to improve.
[https://thisweekindevops.com/2020/03/03/weekly-roundup-march-2nd-2020/](https://thisweekindevops.com/2020/03/03/weekly-roundup-march-2nd-2020/)
https://redd.it/fcxb6t
@r_devops
This Week In DevOps
Weekly Roundup: March 2nd, 2020 - This Week In DevOps
This week in DevOps we have announcements for AWS Transcribe and Activate. Azure rolled out some updates to Sphere and AttackDetection while GoogleCloud released version 1.0 of KubeFlow and added FlexSlots to Bigquery. Hashicorp addressed some technical debt…
Basic way to setup Node/Express/MySQL from one liner CLI on EC2
So I have a backend written in as per the tittle. One main concern is usually it's like "alright please install MySQL and create a user with full privileges" then run this seed file that will generate the database/all the tables.
So I'm looking for a way to get all that running(install Node, run npm install, install MySQL, run seeder)
I've been doing these manually but trying to setup something so I can leave this thing for other developers who may not know how to setup MySQL.
Side note: I currently use systemd to run the node app and it handles cers within my index.js regarding the https part.
I'm also aware some IAMs have specific stacks. But I am just looking at a little Ubuntu server.
Any info is appreciated
https://redd.it/fcw7bi
@r_devops
So I have a backend written in as per the tittle. One main concern is usually it's like "alright please install MySQL and create a user with full privileges" then run this seed file that will generate the database/all the tables.
So I'm looking for a way to get all that running(install Node, run npm install, install MySQL, run seeder)
I've been doing these manually but trying to setup something so I can leave this thing for other developers who may not know how to setup MySQL.
Side note: I currently use systemd to run the node app and it handles cers within my index.js regarding the https part.
I'm also aware some IAMs have specific stacks. But I am just looking at a little Ubuntu server.
Any info is appreciated
https://redd.it/fcw7bi
@r_devops
reddit
Basic way to setup Node/Express/MySQL from one liner CLI on EC2
So I have a backend written in as per the tittle. One main concern is usually it's like "alright please install MySQL and create a user with full...
How to create an on-call schedule that doesn’t suck.
Here's a post on how to create an effective on-call schedule for small startup teams, to the ones that span across multiple teams in multiple timezones and flexible rotations at an enterprise.
[https://blog.fyipe.com/how-to-create-an-on-call-schedule-that-doesnt-suck/](https://blog.fyipe.com/how-to-create-an-on-call-schedule-that-doesnt-suck/)
https://redd.it/fcx4z7
@r_devops
Here's a post on how to create an effective on-call schedule for small startup teams, to the ones that span across multiple teams in multiple timezones and flexible rotations at an enterprise.
[https://blog.fyipe.com/how-to-create-an-on-call-schedule-that-doesnt-suck/](https://blog.fyipe.com/how-to-create-an-on-call-schedule-that-doesnt-suck/)
https://redd.it/fcx4z7
@r_devops
reddit
How to create an on-call schedule that doesn’t suck.
Here's a post on how to create an effective on-call schedule for small startup teams, to the ones that span across multiple teams in multiple...
Static Code Analysis in R
Hello Fellow members, I am using RStudio-1.2.5033 and R version 3.3.2 (2016-10-31). I am looking for a tool or a package that does static analysis of ".R" files . I was wondering whether there is a package like there is pylint, pyflakes etc. for python.
I did my research on this and found a Package called ' CodeDepends' but that doesn't support version-3.3.2 and found another one called 'codetools'. Right now I am looking at the 'codetools' package and seeing how that works with a ".r" file.
I am referring to this link:
[https://stat.ethz.ch/R-manual/R-patched/library/codetools/html/00Index.html](https://stat.ethz.ch/R-manual/R-patched/library/codetools/html/00Index.html)
https://redd.it/fcrsap
@r_devops
Hello Fellow members, I am using RStudio-1.2.5033 and R version 3.3.2 (2016-10-31). I am looking for a tool or a package that does static analysis of ".R" files . I was wondering whether there is a package like there is pylint, pyflakes etc. for python.
I did my research on this and found a Package called ' CodeDepends' but that doesn't support version-3.3.2 and found another one called 'codetools'. Right now I am looking at the 'codetools' package and seeing how that works with a ".r" file.
I am referring to this link:
[https://stat.ethz.ch/R-manual/R-patched/library/codetools/html/00Index.html](https://stat.ethz.ch/R-manual/R-patched/library/codetools/html/00Index.html)
https://redd.it/fcrsap
@r_devops
PoP: Concurrency vs Parallelism in Systems Design
Episode 85 of the Practical Operations Podcast is out! In this episode we focus on Concurrency versus Parallelism and how this affects systems and infrastructure design. Of course, its hard not to make comparisons to our favorite programming languages as well. Listen to our latest episode and tell us what you think!
[https://operations.fm/episodes/85/](https://operations.fm/episodes/85/)
Also available in [iTunes](https://podcasts.apple.com/us/podcast/episode-85-concurrency-vs-parallelism-in-systems-design/id1071645001?i=1000467050735) and [Google Play](https://podcasts.google.com/?feed=aHR0cDovL29wZXJhdGlvbnMuZm0vZXBpc29kZXMvaW5kZXgueG1s&episode=aHR0cDovL2F1ZGlvLm9wZXJhdGlvbnMuZm0vZXBpc29kZS04NS1wYXJhbGxlbC12cy1jb25jdXJyZW50Lm1wMw&ved=0CAYQkfYCahcKEwiQ94vSuf_nAhUAAAAAHQAAAAAQBQ)!
https://redd.it/fd3lq4
@r_devops
Episode 85 of the Practical Operations Podcast is out! In this episode we focus on Concurrency versus Parallelism and how this affects systems and infrastructure design. Of course, its hard not to make comparisons to our favorite programming languages as well. Listen to our latest episode and tell us what you think!
[https://operations.fm/episodes/85/](https://operations.fm/episodes/85/)
Also available in [iTunes](https://podcasts.apple.com/us/podcast/episode-85-concurrency-vs-parallelism-in-systems-design/id1071645001?i=1000467050735) and [Google Play](https://podcasts.google.com/?feed=aHR0cDovL29wZXJhdGlvbnMuZm0vZXBpc29kZXMvaW5kZXgueG1s&episode=aHR0cDovL2F1ZGlvLm9wZXJhdGlvbnMuZm0vZXBpc29kZS04NS1wYXJhbGxlbC12cy1jb25jdXJyZW50Lm1wMw&ved=0CAYQkfYCahcKEwiQ94vSuf_nAhUAAAAAHQAAAAAQBQ)!
https://redd.it/fd3lq4
@r_devops
Troubleshooting JFrog Artifactory...
Hi there,
First time posting and too be completely honest, a little out of my comfort zone. I am currently looking into a potential issue surrounding Artifactory that was recently set up for our new Docker instance. The team responsible for it had an issue today that they brought to my attention where they are receiving 400 Bad Requests trying to connect to our local Artifactory server from the Docker command line. However, if we open a browser to go to the web portal, they can log in no problem!
One issue I helped solve was there we needed to set up a \*.server.domain.com DNS A record which set up the sub folder in the parent DNS zone ([domain.com](https://domain.com)). This resolved one issue which was surrounding NGinx/Artifactory for mapping through NGinx to the Artifactory api. But now there maybe seems to be another issue where from Docker cmd they still cannot push to Artifactory?
Would this need a cert setup/installed somewhere? On the server running Docker? On the Artifactory server? Or not at all? Thanks!
https://redd.it/fd3awn
@r_devops
Hi there,
First time posting and too be completely honest, a little out of my comfort zone. I am currently looking into a potential issue surrounding Artifactory that was recently set up for our new Docker instance. The team responsible for it had an issue today that they brought to my attention where they are receiving 400 Bad Requests trying to connect to our local Artifactory server from the Docker command line. However, if we open a browser to go to the web portal, they can log in no problem!
One issue I helped solve was there we needed to set up a \*.server.domain.com DNS A record which set up the sub folder in the parent DNS zone ([domain.com](https://domain.com)). This resolved one issue which was surrounding NGinx/Artifactory for mapping through NGinx to the Artifactory api. But now there maybe seems to be another issue where from Docker cmd they still cannot push to Artifactory?
Would this need a cert setup/installed somewhere? On the server running Docker? On the Artifactory server? Or not at all? Thanks!
https://redd.it/fd3awn
@r_devops
Domain
Domain Names, Site Builder, Hosting, and More | Domain.com
Finding and buying the perfect domain is as easy as 1-2-3 with Domain.com. We'll even help get you online with our DIY and Pro site builder and marketing tools.
How do you handle development data?
I've been thinking about how to set up test data for development, but haven't found an easy way to get data that has the following properties:
* It's sufficiently similar to production that it's useful for testing. I.e. covers a variety of cases and is up to date with the latest schema.
* It's sufficiently different from production that it:
* isn't a security risk (it's anonymized and whatever)
* isn't gigantic (it's a subset of the production data, or it's mock data)
Is this difficult for you all? How do you deal with development data?
https://redd.it/fd4pl1
@r_devops
I've been thinking about how to set up test data for development, but haven't found an easy way to get data that has the following properties:
* It's sufficiently similar to production that it's useful for testing. I.e. covers a variety of cases and is up to date with the latest schema.
* It's sufficiently different from production that it:
* isn't a security risk (it's anonymized and whatever)
* isn't gigantic (it's a subset of the production data, or it's mock data)
Is this difficult for you all? How do you deal with development data?
https://redd.it/fd4pl1
@r_devops
reddit
How do you handle development data?
I've been thinking about how to set up test data for development, but haven't found an easy way to get data that has the following properties: ...
Building an onboarding series for new technical hires
I’m starting a new project unlike one most discuss in /r/devops and one I haven’t seen done before: a technical onboarding series for new software developer and engineering talent!
Though the details will differ company to company, but it feels like the overall flow shouldn’t be. At my current company, we have applications that live on-premise within corporate data centers with others in the cloud. Both have some underlying technology prerequisites (Kubernetes, AWS) and should probably have various exercises to go along with them followed by building a simple, first-class application.
The problem is that these are purely my ideas without any concrete examples that others do. I hope someone could point me to good examples to model such a process from.
Regards!
https://redd.it/fd4en0
@r_devops
I’m starting a new project unlike one most discuss in /r/devops and one I haven’t seen done before: a technical onboarding series for new software developer and engineering talent!
Though the details will differ company to company, but it feels like the overall flow shouldn’t be. At my current company, we have applications that live on-premise within corporate data centers with others in the cloud. Both have some underlying technology prerequisites (Kubernetes, AWS) and should probably have various exercises to go along with them followed by building a simple, first-class application.
The problem is that these are purely my ideas without any concrete examples that others do. I hope someone could point me to good examples to model such a process from.
Regards!
https://redd.it/fd4en0
@r_devops
reddit
Building an onboarding series for new technical hires
I’m starting a new project unlike one most discuss in /r/devops and one I haven’t seen done before: a technical onboarding series for new software...
Cloud Engineer vs Devops Engineer
I was curious on the communities take on which role I should focus my attention on when looking in the job market.
​
I'm interested in building out cloud resources using IaC, automating processes with python/golang/powershell, deploying CI/CD pipelines and using more Docker/Kubernetes.
​
(I understand devops engineer is a catch-all job title but nonetheless)
From what I've been seeing, Devops Engineer roles tend to be focused around building CI/CD pipelines for other teams (primarily devs) using IaC, etc built/maintained by a cloud engineering role.
It seems that Devops is "one layer up the stack" not really needing to focus on how infrastructure is deployed, but mainly on getting it deployed for their pipelines.
​
I just want to make sure I choose a role that is furthering my career in the right direction.
https://redd.it/fd639k
@r_devops
I was curious on the communities take on which role I should focus my attention on when looking in the job market.
​
I'm interested in building out cloud resources using IaC, automating processes with python/golang/powershell, deploying CI/CD pipelines and using more Docker/Kubernetes.
​
(I understand devops engineer is a catch-all job title but nonetheless)
From what I've been seeing, Devops Engineer roles tend to be focused around building CI/CD pipelines for other teams (primarily devs) using IaC, etc built/maintained by a cloud engineering role.
It seems that Devops is "one layer up the stack" not really needing to focus on how infrastructure is deployed, but mainly on getting it deployed for their pipelines.
​
I just want to make sure I choose a role that is furthering my career in the right direction.
https://redd.it/fd639k
@r_devops
reddit
Cloud Engineer vs Devops Engineer
I was curious on the communities take on which role I should focus my attention on when looking in the job market. I'm interested in...
Using Packer to share encrypted AMI to multiple accounts
Hello all,
Would anyone know how I would be able to share an encrypted ami that I create in our Dev AWS account to our Staging and Prod AWS accounts using Packer? Looks like there is multiple ways to do it, but ideally I would like to use just one json file to do the entire share as opposed to my other way which would have been:
\- 3 Packer files
\- 1 creates in Dev unencrypted and share to Stg/Prod
\-1 uses that shared ami in Stg to then encrypt it
\-1 uses that shared ami in Prod to then encrypt it
​
Would be best if I could just create one encrypted AMI in Dev and share it accordingly
https://redd.it/fd52c2
@r_devops
Hello all,
Would anyone know how I would be able to share an encrypted ami that I create in our Dev AWS account to our Staging and Prod AWS accounts using Packer? Looks like there is multiple ways to do it, but ideally I would like to use just one json file to do the entire share as opposed to my other way which would have been:
\- 3 Packer files
\- 1 creates in Dev unencrypted and share to Stg/Prod
\-1 uses that shared ami in Stg to then encrypt it
\-1 uses that shared ami in Prod to then encrypt it
​
Would be best if I could just create one encrypted AMI in Dev and share it accordingly
https://redd.it/fd52c2
@r_devops
reddit
Using Packer to share encrypted AMI to multiple accounts
Hello all, Would anyone know how I would be able to share an encrypted ami that I create in our Dev AWS account to our Staging and Prod AWS...
Mulesoft vs Tray.io, Zappier, Workato
Does anyone here have any experience with Tray.io , Zappier or Workato? How do they compare with Mulesoft?
https://redd.it/fd6nae
@r_devops
Does anyone here have any experience with Tray.io , Zappier or Workato? How do they compare with Mulesoft?
https://redd.it/fd6nae
@r_devops
reddit
Mulesoft vs Tray.io, Zappier, Workato
Does anyone here have any experience with Tray.io , Zappier or Workato? How do they compare with Mulesoft?
Vault
We're a really small team. Does anyone have experience running Hashicorp Vault as a small team? I think we'd get utility out of having a central store for our secrets, but I'm concerned about the overhead and also how mission critical it would be. If it went down, everything else goes down.
https://redd.it/fd2nhu
@r_devops
We're a really small team. Does anyone have experience running Hashicorp Vault as a small team? I think we'd get utility out of having a central store for our secrets, but I'm concerned about the overhead and also how mission critical it would be. If it went down, everything else goes down.
https://redd.it/fd2nhu
@r_devops
reddit
Vault
We're a really small team. Does anyone have experience running Hashicorp Vault as a small team? I think we'd get utility out of having a central...
How to securely handle SSL certificate deployment?
Hi all. I was wondering what is the best way at managing the deployment of SSL certificates to multiple servers.
My experience is pretty limited to only doing single server hosting and using Lets Encrypt. However, I'm now working on a rather large project and I want to be able to create a single SSL cert and distribute it to all of my servers. The reason for doing this instead of using, lets say, App Engine's managed SSL service, is that I want to be able to read the file and use it to sign tokens among other things.
The issue is, I'm unsure what is the best way to implement this and how to do it securely. My thought was to have a standalone server that just renews certificates once it nears expiration, upload it to object storage and then on each server, run a cron job that pulls the cert every month or something.
The second thought was to use scp to pull the file directly from the SSL server with a cron job, but then do I need to setup SSH keys for each server to talk to each other?
I'm lost at what to do thus far and any help would be appreciated. Thanks!
https://redd.it/fd8tw1
@r_devops
Hi all. I was wondering what is the best way at managing the deployment of SSL certificates to multiple servers.
My experience is pretty limited to only doing single server hosting and using Lets Encrypt. However, I'm now working on a rather large project and I want to be able to create a single SSL cert and distribute it to all of my servers. The reason for doing this instead of using, lets say, App Engine's managed SSL service, is that I want to be able to read the file and use it to sign tokens among other things.
The issue is, I'm unsure what is the best way to implement this and how to do it securely. My thought was to have a standalone server that just renews certificates once it nears expiration, upload it to object storage and then on each server, run a cron job that pulls the cert every month or something.
The second thought was to use scp to pull the file directly from the SSL server with a cron job, but then do I need to setup SSH keys for each server to talk to each other?
I'm lost at what to do thus far and any help would be appreciated. Thanks!
https://redd.it/fd8tw1
@r_devops
reddit
How to securely handle SSL certificate deployment?
Hi all. I was wondering what is the best way at managing the deployment of SSL certificates to multiple servers. My experience is pretty limited...
KPIs
Interested as to what different KPIs different DevOps teams use, and how they measure them.
https://redd.it/fd6qkh
@r_devops
Interested as to what different KPIs different DevOps teams use, and how they measure them.
https://redd.it/fd6qkh
@r_devops
reddit
KPIs
Interested as to what different KPIs different DevOps teams use, and how they measure them.
Naming Standards for Infrastructure
I wrote an article [here](https://medium.com/@nandovillalba/devops-naming-standards-5e9e73996dfb) about naming standards with some ideas on how to implement them. I was googling around and I couldn't find many resources on what good practices people follow to name their resources so I drew mostly from my experience. I was wondering if I could get more ideas from you so I can expand the article.
What sort of logic do you follow when you name your resources in general in your company? How do you enforce it?
https://redd.it/fcy3vq
@r_devops
I wrote an article [here](https://medium.com/@nandovillalba/devops-naming-standards-5e9e73996dfb) about naming standards with some ideas on how to implement them. I was googling around and I couldn't find many resources on what good practices people follow to name their resources so I drew mostly from my experience. I was wondering if I could get more ideas from you so I can expand the article.
What sort of logic do you follow when you name your resources in general in your company? How do you enforce it?
https://redd.it/fcy3vq
@r_devops
Medium
DevOps: Naming Standards
Get this right early on and make your life easier.